Compare commits

...

583 Commits

Author SHA1 Message Date
nikhilmantri0902
1948cdfaa4 chore: added for comparison across group by and merges debugging quantile exact 2025-08-19 01:16:24 +05:30
nikhilmantri0902
2fb7fe49ef chore: added for comparison across group by and merges debugging quantile exact 2025-08-19 00:56:43 +05:30
nikhilmantri0902
c4762045a6 chore: added debug functionality 2025-08-18 15:34:20 +05:30
nikhilmantri0902
1541734542 feat: match query building semantics for tags of 0 length 2025-08-18 15:26:12 +05:30
nikhilmantri0902
46e5b407f7 fix: added necessary 0 numCalls handling 2025-08-18 13:53:59 +05:30
nikhilmantri0902
f2c3946101 fix: added necessary 0 numCalls handling 2025-08-18 13:52:55 +05:30
nikhilmantri0902
4dca46de40 chore: added debugging funcs 2025-08-18 13:24:28 +05:30
nikhilmantri0902
6f420abe27 chore: removed comparison block 2025-08-18 13:09:12 +05:30
nikhilmantri0902
1d9b457af6 chore: removed comparison block 2025-08-18 12:40:51 +05:30
nikhilmantri0902
d437998750 chore: tuple issue fixed 2025-08-18 12:13:08 +05:30
nikhilmantri0902
e02d0cdd98 chore: tuple issue fixed 2025-08-18 11:44:45 +05:30
nikhilmantri0902
1ad4a6699a chore: added debug logs 2025-08-18 11:24:29 +05:30
nikhilmantri0902
00ae45022b fix: added filtering based on both name and serviceName pairs 2025-08-18 11:20:08 +05:30
nikhilmantri0902
6f4a965c6d fix: added filtering based on both name and serviceName pairs 2025-08-18 11:19:01 +05:30
nikhilmantri0902
4c29b03577 chore: added logs for debugging 2025-08-15 14:15:20 +05:30
nikhilmantri0902
ea1409bc4f fix: added query optimization 2025-08-14 20:12:48 +05:30
Abhi kumar
0e3ac2a179 fix: added loading indicators in traces pages when running query (#8782) 2025-08-14 13:53:39 +05:30
Amlan Kumar Nandy
249f8be845 fix: resolve infinite loading issue in metric view in messaging queues (#8779) 2025-08-14 04:16:39 +00:00
primus-bot[bot]
9c952942ad chore(release): bump to v0.92.1 (#8780)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-08-13 15:10:08 +05:30
Nityananda Gohain
dac46d82ff fix: check ch version (#8778)
Check the clickhouse version, before the setting secondary_indices_enable_bulk_filtering is used.
2025-08-13 14:57:25 +05:30
primus-bot[bot]
802ce6de01 chore(release): bump to v0.92.0 (#8776)
#### Summary
 - Release SigNoz v0.92.0
 - Bump SigNoz OTel Collector to v0.129.0
2025-08-13 12:17:43 +05:30
dependabot[bot]
6853f0c99d chore(deps): bump urllib3 from 2.4.0 to 2.5.0 in /tests/integration (#8296) 2025-08-13 04:58:39 +00:00
Srikanth Chekuri
3f8a2870e4 fix: key CONTAINS value doesn't work for numeric values (#8768) 2025-08-13 09:59:28 +05:30
Srikanth Chekuri
5fa70ea802 chore: use *_keys tables instead of tag_attributes_v2 for suggestions (#8753) 2025-08-12 18:10:35 +05:30
Yunus M
3a952fa330 fix: pass metric name to get value suggestions api (#8671)
* fix: pass metric name to get value suggestions api

* feat: add source to get value suggestions
2025-08-11 08:10:31 +00:00
Yunus M
6d97db1d9d fix: use localstorage value to avoid waiting for pref api to set the toggle state, add shortcut (#8751) 2025-08-11 10:26:27 +05:30
Shaheer Kochai
5412e7f70b feat: show count in span details drawer tabs (#8702)
* feat: show event count in Events tab of SpanDetailsDrawer

* feat: add count badges to all SpanDetailsDrawer tabs
2025-08-10 05:39:20 +00:00
aniketio-ctrl
8e5cb9046d fix(alert): added querier v5 in test notify (#8749) 2025-08-08 18:01:23 +05:30
Srikanth Chekuri
760eabb2dc chore: do not return err for meter source temporality (#8750) 2025-08-08 17:39:06 +05:30
Srikanth Chekuri
35ddaaa2fc chore: add env to override logs keys table name (#8748) 2025-08-08 11:34:09 +00:00
nikhilmantri0902
a51ee66c02 Improvement: Added Otel-collector setup for local dev environment (#8701)
* feat(devenv): add otel-collector support for local development

- Add .devenv/docker/otel-collector/ with compose.yaml and config
- Add devenv-otel-collector and devenv-up targets to Makefile
- Update development.md with otel-collector setup instructions
- Add README.md with usage documentation for otel-collector setup

This enables developers to run the complete SigNoz stack locally,
including the OpenTelemetry Collector for receiving telemetry data
on ports 4317 (gRPC) and 4318 (HTTP).

* docs: improve collector setup wordings

* chore: fixed comment and service name

* chore: docker service name updated otel-collector -> signoz-otel-collector
2025-08-08 16:54:05 +05:30
Yunus M
75d189162b feat: migrate old saved columns keys to name (#8747) 2025-08-08 14:41:34 +05:30
Yunus M
932918e3a4 feat: meter explorer (#8741)
* feat: meter explorer

* feat: meter explorer

* fix: remove meter as data source

* fix: change meter-explorer to meter - quick filter

* chore: delete test file

* fix: failing test cases
2025-08-08 12:03:26 +05:30
Vibhu Pandey
aa3bc16dcb test(integration): bump requests to 2.32.4 (#8743) 2025-08-08 00:25:38 +05:30
Yunus M
b5098e00a3 fix: logs explorer - should have atleast 1 column, discard empty key columns (#8740) 2025-08-07 20:17:34 +05:30
Abhi kumar
20dc561bfe fix: added fix for query becoming empty on time change (#8739) 2025-08-07 19:42:07 +05:30
Nityananda Gohain
99bbb87738 chore: add option to ignore data skipping indices (#8738)
* chore: add option to ignore data skipping indices

* fix: update example
2025-08-07 13:21:17 +00:00
Vikrant Gupta
f1ce93171c feat(telemetrymeter): add support for telemetry meter (#8667)
* feat(telemetry/meter): added base setup for telemetry meter signal

* feat(telemetry/meter): added metadata setup for meter

* feat(telemetry/meter): fix stmnt builder tests

* feat(telemetry/meter): test query range API fixes

* feat(telemetry/meter): improve error messages

* feat(telemetrymeter): step interval improvements

* feat(telemetrymeter): metadata changes and aggregate attribute changes

* feat(telemetrymeter): metadata changes and aggregate attribute changes

* feat(telemetrymeter): deprecate the signal and use aggregation instead

* feat(telemetrymeter): deprecate the signal and use aggregation instead

* feat(telemetrymeter): deprecate the signal and use aggregation instead

* feat(telemetrymeter): cleanup the types

* feat(telemetrymeter): introduce source for query

* feat(telemetrymeter): better naming for source in metadata

* feat(telemetrymeter): added quick filters for meter explorer

* feat(telemetrymeter): incorporate the new changes to stmnt builder

* feat(telemetrymeter): add the statement builder for the ranged cache queries

* feat(telemetrymeter): use meter aggregate keys

* feat(telemetrymeter): use meter aggregate keys

* feat(telemetrymeter): remove meter from complete bools

* feat(telemetrymeter): remove meter from complete bools

* feat(telemetrymeter): update the quick filters to use meter
2025-08-07 16:50:37 +05:30
Srikanth Chekuri
92794389d6 fix: limit keys for empty search key (#8728) 2025-08-07 00:34:44 +05:30
Srikanth Chekuri
bd02848623 chore: add sql migration for dashboards, alerts, and saved views (#8642)
## 📄 Summary

To reliably migrate the alerts and dashboards, we need access to the telemetrystore to fetch some metadata and while doing migration, I need to log some stuff to fix stuff later.

Key changes:
- Modified the migration to include telemetrystore and a logging provider (open to using a standard logger instead)
- To avoid the previous issues with imported dashboards failing during migration, I've ensured that imported JSON files are automatically transformed when migration is active
- Implemented detailed logic to handle dashboard migration cleanly and prevent unnecessary errors
- Separated the core migration logic from SQL migration code, as users from the dot metrics migration requested shareable code snippets for local migrations. This modular approach allows others to easily reuse the migration functionality.

Known: I didn't register the migration yet in this PR, and will not merge this yet, so please review with that in mid.
2025-08-06 23:05:39 +05:30
Abhi kumar
b5016b061b fix: added fix for key suggestions (#8727) 2025-08-06 11:48:43 +00:00
Abhi kumar
c308e8668c fix: added fix for query addon lightmode ui (#8725) 2025-08-06 16:21:35 +05:30
SagarRajput-7
41ee4176ad fix: fixed metric aggregation and value retention inconsistency in edit mode (#8718) 2025-08-06 13:55:16 +05:30
Abhi kumar
994663110d fix: added fix for query suggestions position (#8719)
* fix: added fix for query suggestions position

* chore: added box-shadows in the dropdowns
2025-08-06 12:48:07 +05:30
Abhi kumar
3a2eab2019 fixes: includes fixes required in the new QB (#8675)
* fix: removed unused code for querycontext (#8674)

* Update frontend/src/utils/queryValidationUtils.ts

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* feat: added tooltips in metric aggregations

* feat: enabled legend enhancement for explorer pages and alert page

* feat: updated the error state in explorer pages with new APIError

* fix: cloned panel query shows previous query (#8681)

* fix: cloned panel query shows previous query

* chore: removed comments

* chore: added null check

* fix: added fix for auto run query in dashboard panel + trace view issue

---------

Co-authored-by: Abhi Kumar <abhikumar@Mac.lan>

* feat: added new SubstituteVars api and enable v5 for creating new alerts (#8683)

* feat: added new SubstituteVars api and enable v5 for creating new alerts

* feat: add warning notification for query response

* feat: fixed failing test case

* fix: metric histogram UI config state in edit mode

* fix: fixed table columns getting duplicate data (#8685)

* fix: added fix for conversion of QB function to filter expression. (#8684)

* fix: added fix for QB filters for functions

* chore: minor fix

---------

Co-authored-by: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com>

* feat: query builder fixes and enhancement (#8692)

* feat: legend format fixes around single and multiple aggregation

* feat: fixed table unit and metric units

* feat: add fallbacks to columnWidth and columnUnits for old-dashboards

* feat: fixed metric edit issue and having filter suggestion duplications

* feat: fix and cleanup functions across product for v5

* chore: add tooltips with links to documentation (#8676)

* fix: added fix for query validation and empty query error (#8694)

* fix: added fix for selected columns being empty in logs explorer (#8709)

* feat: added columnUnit changes for old dashboard migrations (#8706)

* fix: fixed keyfetching logic (#8712)

* chore: lint fix

* fix: fixed logs explorer test

* feat: fix type checks

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
Co-authored-by: SagarRajput-7 <sagar@signoz.io>
Co-authored-by: Abhi Kumar <abhikumar@Mac.lan>
Co-authored-by: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-08-06 00:16:20 +05:30
SagarRajput-7
01202b5800 feat: created new error content plugin for QB v5 (#8700)
* feat: created new error content plugin for QB v5

* feat: added warning popover content for QB v5 feature

* feat: icon change for warning

* feat: added warning to QB v5 components

* feat: fixed type error

* feat: fix test cases

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-08-05 23:45:39 +05:30
Amlan Kumar Nandy
2901e052ae chore: improve metrics explorer empty state (#8711) 2025-08-05 20:45:21 +07:00
Amlan Kumar Nandy
372372694e chore: update home page task tracker for metrics explorer (#8631) 2025-08-05 11:09:31 +05:30
Shaheer Kochai
8e5b1be106 fix: improve the UX of trying to create a funnel with existing name (#8693) 2025-08-05 09:29:52 +04:30
Amlan Kumar Nandy
301d9ca4dd fix: disabling alert from overview page doesn't work (#8640) 2025-08-05 02:40:19 +00:00
Srikanth Chekuri
f350b0e2f0 chore: add endpoint to replace varibales (#8689) 2025-08-04 21:02:54 +05:30
nikhilmantri0902
090538f11f docs: add step to cd into frontend directory before yarn commands (#8696) 2025-08-04 08:41:19 +00:00
Shaheer Kochai
db13f85a3c fix: fix the issue of stale pipeline processor state while switching between different processors (#8661) 2025-08-04 05:58:51 +00:00
Vibhu Pandey
3d874c22b0 chore(resp): add omitempty to timestamp (#8688)
For the requestType: Trace, we don't care about the timestamp in the rawRow. 

- Handling Zero timestamp values in the rawData response
- simplify RawRow `map[string]*any` to `map[string]any` and eliminate unnecessary pointer indirection.
2025-08-03 17:18:51 +05:30
Vibhu Pandey
e68ce11183 test(integration): add traces integration tests (#8666)
- add integration tests for traces listing page
2025-07-31 15:42:34 +05:30
Yunus M
587b0ef6c4 Sign In / Sign Up UI updates (#8562)
* feat: update login page UI

* feat: update login and signup page UI

* chore: font optimization

* feat: update test cases for login

* feat: address review comments

* feat: remove playwright report file

* fix: fix the failing login test

---------

Co-authored-by: ahmadshaheer <ashaheerki@gmail.com>
2025-07-31 13:33:42 +05:30
Yunus M
bb6c366031 feat: new query builder (#8466) 2025-07-31 12:16:55 +05:30
Srikanth Chekuri
5c1f070d8f chore: use new querier for v5 versioned alerts (#8650) 2025-07-30 19:25:27 +05:30
Vishal Sharma
1ce150d4b0 chore: add unsupported saml list (#8665) 2025-07-30 17:08:27 +05:30
Abhi kumar
f6d96c2118 fix: minor fix for changelog popup to show only when preference is available (#8663) 2025-07-30 15:15:13 +05:30
Yunus M
ff3235bd02 fix: replace column resize with grab icon as the functionality is of reorder (#8662) 2025-07-30 12:19:36 +05:30
Nityananda Gohain
8e9a1b34cb fix: use correct column names (#8659) 2025-07-30 10:32:58 +05:30
SagarRajput-7
3d80a03f8a fix: uPlot logarithmic scale range error (#8637)
* fix: uPlot logarithmic scale range error

* fix: removed sanitzation of data
2025-07-30 10:41:12 +07:00
Harshit Raj Sinha
1c650c3c23 fix: adding missing flags to run go-run-community (#8653)
The change adds missing flags required during execution of `make go-run-community`. Currently, running the command cause the error "unknown flag: --config", mentioned in the issue #8623
2025-07-29 23:37:32 +05:30
Ankit Nayan
6b1d62ba8f feat: enable creating more than 3 steps in trace funnels + enable latency pointer (#8628)
* feat: enable creating more than 3 steps in trace funnels

- Remove 3-step limitation from FunnelContext.tsx addNewStep function
- Remove UI restriction in StepsContent.tsx to always show "Add Funnel Step" button
- Allow unlimited funnel steps while maintaining proper step_order indexing
- Step indexing continues to work correctly for API calls (1-based indexing)

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* feat: enable latency pointer configuration with smart defaults (#8629)

- Enable latency pointer dropdown UI in funnel step configuration
- Set step 1 default to 'Start of span' and all other steps to 'End of span'
- Add permission-based controls for latency pointer selection

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-authored-by: Ankit Nayan <ankitnayan@Ankits-MacBook-Pro.local>
Co-authored-by: Claude <noreply@anthropic.com>

---------

Co-authored-by: Ankit Nayan <ankitnayan@Ankits-MacBook-Pro.local>
Co-authored-by: Claude <noreply@anthropic.com>
2025-07-29 22:20:40 +05:30
Ankit Nayan
3c53ba308f fix: prevent creation of funnels with duplicate names (#8633)
* fix: prevent creation of funnels with duplicate names

- Fixed Update method to validate duplicate names before updating
- Added proper duplicate name validation that excludes the current funnel being updated
- Fixed incorrect error wrapping in Update method that was marking all errors as "already exists"
- Fixed typo in error message ("funnelr" -> "funnel")
- Added comprehensive tests for duplicate name validation in both Create and Update operations
- Used internal errors package for consistent error handling

The funnel API now properly prevents creating or updating funnels with duplicate names
within the same organization, resolving issues where duplicate funnels could be created
but would fail during retrieval.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: returning error instance

* fix: implement database transactions for funnel creation and updates

- Wrap check-and-create operations in Bun transactions to prevent race conditions
- Apply transaction pattern to both Create() and Update() methods
- Ensures atomic operations when checking for duplicate funnel names
- Prevents concurrent requests from creating duplicate funnels
- Follows existing transaction patterns from user store implementation

Addresses PR feedback for race condition prevention

---------

Co-authored-by: Ankit Nayan <ankitnayan@Ankits-MacBook-Pro.local>
Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: Shaheer Kochai <ashaheerki@gmail.com>
2025-07-29 22:04:06 +05:30
Ankit Nayan
f2abddd2ed feat: refactor tracefunnel to support dynamic multi-step funnels (#8627)
* feat: refactor tracefunnel to support dynamic multi-step funnels

Replace hardcoded 2-step and 3-step funnel functions with dynamic
implementations that support unlimited steps. Add comprehensive tests
for multi-step funnel functionality while maintaining backward
compatibility.

Key changes:
- Add dynamic query builders for n-step funnels
- Update all query functions to use new builders
- Remove old hardcoded functions
- Add tests for 1-6 step funnels
- Maintain temporal ordering logic

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* feat: add duration calculation for latency_pointer='end' in funnel qu… (#8632)

* feat: add duration calculation for latency_pointer='end' in funnel queries

- Updated BuildFunnelOverviewQuery and BuildFunnelStepOverviewQuery to calculate end time
  when latency_pointer is 'end'
- Modified BuildFunnelTopSlowTracesQuery and BuildFunnelTopSlowErrorTracesQuery to support
  latency pointer parameters
- Added comprehensive tests for latency pointer functionality in
  clickhouse_queries_latency_test.go
- When latency_pointer is 'end', the query now adds span duration to timestamp for
  accurate latency calculations

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* do matching after lowercase conversion

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

---------

Co-authored-by: Ankit Nayan <ankitnayan@Ankits-MacBook-Pro.local>
Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: apply remaining changes from PR #8615 for ClickHouse 25.5 compatibility

- Updated BuildTracesFilter to BuildTracesFilterQuery with false parameter in query.go
- Updated test files to expect resource_string_service$$name instead of serviceName
- Fixed function reference in query_test.go

These changes complete the ClickHouse 25.5 compatibility updates while maintaining
the dynamic multi-step funnel functionality.

* fix: replace durationNano with duration_nano for ClickHouse compatibility

- Updated all SQL queries in clickhouse_queries.go to use duration_nano column name
- Updated test expectations in clickhouse_queries_latency_test.go
- Ensures consistency with ClickHouse snake_case column naming convention

* refactor: code formatting and add TODO comment

- Remove trailing whitespace in query.go
- Add TODO comment for GetErroredTraces function regarding product improvement
- Add newline at end of file for proper formatting

---------

Co-authored-by: Ankit Nayan <ankitnayan@Ankits-MacBook-Pro.local>
Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-07-29 16:18:15 +00:00
Shaheer Kochai
f53a13e7fa chore: write test for recalculating timestamps on clicking stage and run in logs explorer (#8581) 2025-07-29 15:51:54 +00:00
Shaheer Kochai
b69ac637c3 fix: fix the pipeline processor re-ordering issue (#8646) 2025-07-29 15:38:19 +00:00
Vibhu Pandey
a3c039006f chore(goreleaser): fix main path (#8654)
#### Chores

- Fix main path in goreleaser
2025-07-29 13:31:08 +00:00
Shaheer Kochai
2141b1b90a feat: enhance logs explorer chart to display full selected time window (#8446)
* feat: enhance logs explorer chart to display full selected time window

* fix: don't show tooltip in logs chart on empty hover areas + lint fix

---------

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-07-29 12:30:34 +00:00
aniketio-ctrl
771ba45d01 Chore/added_ilike : added ilike and notIlike filter operator (#8595)
* chore(added-ilike): added ilike operator in qbv5

* chore(added-ilike): added test cases
2025-07-29 11:58:25 +00:00
Nageshbansal
7df5c33ce9 chore: adds advocate.md for community advocate program (#8648) 2025-07-29 11:42:00 +00:00
Vibhu Pandey
537c95e05a test(integration): add or filter with resource attributes (#8651) 2025-07-29 16:33:11 +05:30
Srikanth Chekuri
7d9e0523c9 chore: add anomaly to v5 response (#8643) 2025-07-29 10:00:28 +00:00
aniketio-ctrl
360285ef33 fix(added-backticks): added backticks for hyphen (#8644)
* fix(added-backticks): added backticks for hyphen also

* Update pkg/query-service/utils/format.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-07-29 09:49:28 +00:00
Vibhu Pandey
c17241272f test(integration): add integration tests for logs (#8619) 2025-07-29 14:44:16 +05:30
Yunus M
fa936a7e0d fix: open log details on clicking on empty column cell too (#8618) 2025-07-29 14:06:13 +05:30
SagarRajput-7
498d398ea3 chore: resolved conflicting npm-form-data version requirement (#8645) 2025-07-29 11:25:01 +05:30
Srikanth Chekuri
8b2ed674a4 chore: add alert link visitor for expression link (#8641) 2025-07-28 17:44:58 +00:00
Srikanth Chekuri
9a06603ff3 chore: add event for query range v5 (#8639)
I am also using the referrer to derive the information, as it requires some development effort to have the frontend send this information.
2025-07-28 17:35:44 +00:00
Abhi kumar
4888491a79 fix: added fix for changelog modal auto popup for new users (#8634)
* fix: added fix for changelog modal auto popup for new users

* fix: removed custom logic for diff calculation

* chore: minor PR reviews
2025-07-28 21:59:37 +05:30
Srikanth Chekuri
7c9f05c2cc chore: order time series result set (#8638)
## 📄 Summary

- Fix the order by for the time series result
- Add the statement builder for trace query (was supposed to be replaced with new development but that never happened, so we continue the old table)
- Removed `pkg/types/telemetrytypes/virtualfield.go`, not used currently anywhere but causing circular import. Will re-introduce later.
2025-07-28 21:42:56 +05:30
Amlan Kumar Nandy
160802fe11 fix: prefill alert conditions only if no condition are already set (#8636) 2025-07-28 14:30:57 +00:00
Nityananda Gohain
86057cad9f fix: changes in code to support ch 25.5 (#8615)
* fix: changes in code to support ch 25.5

* fix: address comments

* fix: make changes in funnels
2025-07-28 19:49:52 +05:30
Abhi kumar
210393e281 Fix: Minor UI fixes in changelog + sidebar (#8625)
* fix: removed feature count from changelog modal

* fix: sidenav width fix

* chore: added arrows in external link

* fix: refetch condition fix

* fix: added changes for not showing changelogmodal to restricted users

* chore: minor changes
2025-07-27 18:30:02 +05:30
Yunus M
e96ed433fe chore: update e2e github action and playwright config (#8624)
* chore: update e2e github action and playwright config

* chore: update e2e github action and playwright config
2025-07-27 16:59:17 +05:30
Yunus M
52636284fc feat: update env variable names (#8621) 2025-07-26 11:46:44 +00:00
Yunus M
2639f975ee Update run-e2e.yaml (#8620) 2025-07-26 11:07:31 +00:00
Yunus M
f9db796489 feat: settings e2e sanity test cases (#8613)
* feat: generate e2e test plan and tests using playwright mcp

* feat: settings e2e sanity test cases

* feat: playwright prettier

* feat: update the gitignore file temporarily exclude test plans

* chore: remove e2e test-plan directories from git tracking and update .gitignore

* chore: remove playwright github action from frontend repo

* chore: update base url in playwright config

* chore: add github action to run playwright tests

* chore: wrap env variables in quotes and enable uploading test results always

* chore: update github action

* chore: update github action

* Update run-e2e.yaml

* Update run-e2e.yaml

* feat: update github action
2025-07-26 16:08:47 +05:30
Vibhu Pandey
65018abc4a fix(community): fix injection of alertmanager (#8612) 2025-07-25 13:58:56 +05:30
SagarRajput-7
43706f877a chore: upgraded typescript-plugin-css-modules package to latest and remove stylus resolution (#8611) 2025-07-25 12:41:21 +05:30
Yunus M
d7fdbcd90d feat: open entities in new tab on ctrl / cmd + click (#8607)
* feat: open entities in new tab on ctrl / cmd + click

* feat: open entities in new tab on ctrl / cmd + click
2025-07-25 11:08:12 +05:30
Amlan Kumar Nandy
db0f362482 chore: fix sentry issues in alerts and infra monitoring (#8566) 2025-07-24 14:30:47 +00:00
Yunus M
db440a6eb4 feat: alert module ui improvements (#8572)
* feat: alert module ui improvements

* feat: update something went wrong page

* feat: remove safe navigate hook usage in ErrorBoundaryFallback
2025-07-24 13:40:48 +00:00
Yunus M
76b58b7317 feat: show success message with note to notify user on role change (#8491) 2025-07-24 19:01:17 +05:30
primus-bot[bot]
bba3e95914 chore(release): bump to v0.91.0 (#8596)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-07-24 12:20:10 +05:30
Amlan Kumar Nandy
78df27a140 chore: pre-populate the alert condition based on the selected reduce to and thresholds (#8352) 2025-07-24 06:03:00 +00:00
Aditya Singh
b40fda02cf fix: add safeguard for large log body entries (#8560)
* fix: add safeguard for large log body entries

* feat: added test cases for getSanitizedLogBody

* feat: minor refactor

* feat: revert try catch

* feat: minor refactor

* feat: minor refactor

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-07-23 13:18:04 +00:00
Nityananda Gohain
c9e8114b5e fix: don't fetch all attributes during build and run query (#8589) 2025-07-23 10:00:37 +00:00
aniketio-ctrl
d712dc1f28 chore(dot-metrics): added log line to check for those queries who are still using normalized metrics (#8518)
* chore(dot-metrics): added log line to check for those queries who are still using normalized metrics

* chore(dot-metrics): added log line to check for those queries who are still using normalized metrics

* chore(dot-metrics): added log line to check for those queries who are still using normalized metrics

* chore(dot-metrics): added log line to check for those queries who are still using normalized metrics

* chore(dot-metrics): added log line to check for those queries who are still using normalized metrics

* chore(dot-metrics): added array metrics search

* chore(dot-metrics): removed regex query and added a simpler metrics in query

* chore(dot-metrics): removed regex query and added a simpler metrics in query

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-07-23 14:57:31 +05:30
Nityananda Gohain
7cdff13343 fix: update how index filters are built for resource table (#8561)
* fix: update how index filters are built for resource table

* fix: add fix to new qb
2025-07-23 09:05:15 +00:00
SagarRajput-7
08db2febe1 fix: fixed stylus vulnerability by using the npm provided - 0.0.1-security version (#8592) 2025-07-23 14:25:49 +05:30
Yunus M
a576982497 feat: update app loading screen and add system theme option (#8567)
* feat: update app loading screen and add system theme option

* feat: update test case
2025-07-22 20:13:07 +00:00
Alpcan Yıldız
55eadf914b fix: prevent 1Password extension from interfering with time inputs (#8486)
- Add data-1p-ignore attribute to CustomTimePicker input
- Add data-1p-ignore attribute to RangePickerModal DatePicker
- Add data-1p-ignore attribute to TimezonePicker search input

This prevents 1Password extension from automatically opening when
users interact with time-related input fields, improving UX.

Fixes #8485
2025-07-22 19:34:34 +00:00
Shaheer Kochai
b91407416b feat: add support for single step funnels while creating from span details (#8492)
* feat: add support for single step funnels while creating from span details

* fix: fix the UI for loading state
2025-07-22 13:13:09 +00:00
aniketio-ctrl
24d6d83575 fix(prom-dup-labels): added fingerprint in prom labels (#8563)
* fix(prom-dup-labels): added fingerprint in prom labels

* fix(prom-dup-labels): removed fingerprint labels from result series

* fix(prom-dup-labels): removed fingerprint labels from result series

* fix(prom-dup-labels): removed fingerprint labels from result series

* fix(prom-dup-labels): removed fingerprint labels from result series

* fix(prom-dup-labels): added test cases

* Update pkg/prometheus/clickhouseprometheus/client_query_test.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix(prom-dup-labels): added test cases

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-07-22 10:39:50 +00:00
Sahil Khan
fe95ee716a fix: added attribute check for log details filtering (#8427)
* fix: added attribute check for log details filtering

* chore: added unit tests

* chore: add the missing args to onClickHandler to fix the failing build

---------

Co-authored-by: ahmadshaheer <ashaheerki@gmail.com>
2025-07-22 09:21:07 +00:00
Vibhu Pandey
b053ce23cd ci(prereleaser): remove cron scheduler (#8584) 2025-07-22 09:03:18 +00:00
Amlan Kumar Nandy
57febd2f52 fix: navigating from infra monitoring to logs with open in explorer has missing filters (#8570) 2025-07-22 15:51:48 +07:00
Abhi kumar
ba6a1c594b fix: use only needed resource attributes in context filter (#8568)
* chore: use only needed resource attributes in context filter

* chore: moved regex to constants
2025-07-21 18:37:32 +05:30
Amlan Kumar Nandy
6afdecbd0f chore: add unit tests for metric details drawer (#8515) 2025-07-21 04:45:56 +00:00
Shaheer Kochai
41661a5e28 feat: add support for entrypoint spans toggle in top operations table (#8175)
* feat: add support for entrypoint spans toggle in top operations table

* fix: write tests for entry point toggle

* chore: entry point -> entrypoint

* fix: add info icon and tooltip for entrypoint spans toggle

* fix: fix the copy and link for entrypoint toggle in top operations

* chore: update the tooltip text

* Update frontend/src/container/MetricsApplication/TopOperationsTable.tsx

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* chore: fix the failing build

* chore: update the entry point spans docs link

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-07-20 11:20:13 +00:00
Nageshbansal
507dc86af2 Remove docker downloads badge (#8556) 2025-07-19 03:04:38 +00:00
Srikanth Chekuri
ff3bb04655 chore: support legacy cols usage and address several gaps (#8552) 2025-07-18 18:37:57 +05:30
Vibhu Pandey
31c4f800fc feat: add more codeowners (#8558) 2025-07-18 12:05:31 +00:00
aniketio-ctrl
51c2bbcd4b fix(dedup-prom): added check for duplicated samples data (#8502)
* fix(dedup-prom): added check for duplicated samples data

* fix(dedup-prom): added test cases for duplicated samples data

* fix(dedup-prom): added test cases for duplicated samples data
2025-07-18 08:57:00 +00:00
scout9ll
5610cb1f81 fix: retain compositeQuery during pagination and field filtering on exceptions page (#8300) 2025-07-17 15:28:28 +00:00
Yunus M
478d28eda1 feat(license): show refetch payment status button to reconcile payments (#8551) 2025-07-17 20:00:33 +05:30
Vibhu Pandey
ebb2f1fd63 feat(cmd): add cmd package (#8535) 2025-07-17 10:38:31 +00:00
Shaheer Kochai
629e502703 feat: add support for role based access in trace funnels flows (#8481)
* feat: add support for role based access in trace funnels flows

* chore: fix the failing build
2025-07-17 05:47:54 +00:00
primus-bot[bot]
cf4e44d341 chore(release): bump to v0.90.1 (#8546)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-07-16 16:47:39 +05:30
Abhi kumar
7ce1a1cbca fix: added fix for changelog refetching multiple times on tab change (#8545) 2025-07-16 16:35:25 +05:30
Priyanshu Shrivastava
e2253ec7c0 chore(release): bump SigNoz to v0.90.0, OTel Collector to v0.128.1 (#8543) 2025-07-16 12:42:59 +05:30
Yunus M
86be2869a9 fix: light mode for channels create and edit (#8542) 2025-07-16 05:03:09 +00:00
Chitransh
9ec503e302 feat: added new datasources (#8536) 2025-07-16 04:53:22 +00:00
Srikanth Chekuri
77ee201bb7 chore: remove migration notification (#8537) 2025-07-16 10:04:07 +05:30
Nageshbansal
168a7baf6c feat(statsreporter): add ecs platform detection (#8529) 2025-07-15 13:25:30 +00:00
Piyush Singariya
c36c492877 chore: update in agent version (#8457) 2025-07-15 13:01:21 +00:00
Shaheer Kochai
15332b90c1 fix: fix the layout shift issues in logs explorer column view (#8434)
* fix: fix the layout shift issue in logs list column view

* chore: overall improvements
2025-07-15 11:45:44 +00:00
Abhi kumar
53b71d7062 fix: revalidate changelog on tab visibility change (#8532)
* fix: added changes for revalidation changelog on tab change

* chore: removed four hours diff change
2025-07-15 15:17:14 +05:30
Abhi kumar
5e0bf930d6 chore: added new frontend maintainers (#8530) 2025-07-15 12:38:53 +05:30
Piyush Singariya
d6eed8e79d feat: JSON Flattening in logs pipelines (#8227)
* feat: introducing JSON Flattening

* fix: removed bug and tested

* test: removed testing test

* feat: additional severity levels, and some clearing

* chore: minor changes

* test: added tests for processJSONParser

* test: added check for OnError

* fix: review from ellipsis

* fix: variablise max flattening depth

* Update pkg/query-service/app/logparsingpipeline/pipelineBuilder.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* Update pkg/errors/errors.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: quoted JSON strings fix

* test: updating otel collector for testing

* test: update collector's reference

* chore: change with new error package

* chore: set flattening depth equal to 1

* fix: fallback for depth

* fix: change in errors package

* fix: tests

* fix: test

* chore: update collector version

* fix: go.sum

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-07-14 18:48:01 +05:30
Abhi kumar
6137740907 fix: changelog ui fixes (#8526)
* chore: removed redundant scorllbar when space available

* chore: sidenav version pill can be clicked when changelog is available

* fix: removed upgrade cta for cloud users

* chore: minor ui fix
2025-07-14 17:44:08 +05:30
Nityananda Gohain
1aa6c98822 fix: check if it's castable to string (#8517) 2025-07-14 10:00:15 +00:00
Yunus M
8cf511cdb9 feat: update data on skip in onboarding (#8508) 2025-07-14 13:50:09 +05:30
Aditya Singh
a7ce6da7d1 Show new error component when saving pipeline fails (#8511)
* fix: show new error component when saving pipeline fails

* fix: minor refactor

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-07-14 13:13:39 +05:30
Shaheer Kochai
ddb08b3883 feat: add support for JSON flattening in pipeline processor create and edit (#8331)
* feat: add support for JSON flattening in pipeline processor create and edit

* chore: add fallback value for mapping state

* feat: improve the UI of json flattening form and show path_prefix if enable_path is true

* fix: improve the state management

* chore: get enablePaths state using useWatch

* chore: json flattening tests

* chore: improve the test descriptions

* fix: update snapshot and adjust the existing failing test

* chore: overall improvements

* fix: update the JSON flattening keys map

* fix: destroy the new processor modal on closing to fix the unintended persistent mapping state

* chore: log event on saving pipeline if json_parser processors get modified

* chore: fix the alignment of json flattening switch

* chore: overall improvement

* refactor: improve the mapping comparison by using lodash's isEqual

* chore: update the snapshot

* refactor: improve the pipeline json_parser processor filtering logic

---------

Co-authored-by: Aditya Singh <adityasinghssj1@gmail.com>
Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-07-14 06:04:04 +00:00
Srikanth Chekuri
893b11c4a0 chore: queries variable replacement and escape $ in mat col (#8514) 2025-07-12 16:47:59 +05:30
Abhi kumar
b7025af703 Feat: show changelog in the sidebar + cloud users (#8503)
* feat: added changes for fething current version changelog

* chore: added changes for showing changelog in sidebar

* test: update ChangelogModal tests to use ChangelogSchema and mock data

* chore: update changelog properties in app context mock

* chore: removed changes for current version

* chore: changed fetch key for fetching changelog by version

* chore: added changes related to fetching tanent specific changelog

* chore: fixed failing test for changelogRenderer

* chore: remove what's new no feature available

* chore: added changes for fetching changelog for specific tenant types

* test: fixed test for changelogRenderer

* Feat: changes for showing changelog to cloud users (#8512)

* feat: added backend changes for storing seen changelog verion

* feat: added changes for showing changelog to cloud users

* chore: added useeffect cleanup

* test: fixed test for changelogModal

* chore: minor changes in changelogmodal

* chore: minor PR review fixes
2025-07-11 22:28:09 +05:30
Nityananda Gohain
552d44d208 chore: send email on role update (#8489)
* chore: send email on role update

* fix: minor changes

* fix: update template

* fix: minor changes

* fix: return updated user
2025-07-10 15:17:04 +00:00
SagarRajput-7
497315579f chore: added got at 11.8.5 patch to fix image-webpack-loader vulnerability (#8500) 2025-07-10 20:22:39 +05:30
SagarRajput-7
bfaac15ccb chore: replace image-webpack-loader (deprecated) with image-minimizer-webpack-plugin (#8498)
* chore: replace image-webpack-loader (deprecated) with image-minimizer-webpack-plugin

* chore: used sharp

* chore: remove got resolution
2025-07-10 19:17:02 +05:30
SagarRajput-7
5e18be6a23 chore: added got at 11.8.5 patch to fix image-webpack-loader vulnerability (#8493) 2025-07-10 16:07:10 +05:30
Yunus M
1793706f87 feat: show ingestion keys to self hosted users (#8490) 2025-07-10 14:51:53 +05:30
aniketio-ctrl
da2a3c738a fix(aws-elastic-cache): corrected variable query for elastic cache (#8487)
* fix(aws-elastic-cache): corrected variable query for elastic cache overview.json

* fix(aws-elastic-cache): corrected variable query for elastic cache overview.json

---------

Co-authored-by: Piyush Singariya <piyushsingariya@gmail.com>
2025-07-09 10:21:15 +00:00
primus-bot[bot]
d17dab9a1d chore(release): bump to v0.89.0 (#8482) 2025-07-09 12:06:47 +05:30
Srikanth Chekuri
88b75d4e72 fix(apdex): use right metric name for metadata (#8463) 2025-07-09 09:08:40 +05:30
Sahil Khan
6327ab5ec6 fix: allowed user to select text in json body field in log details (#8450) 2025-07-08 21:28:05 +05:30
Sahil Khan
5b09490ad7 fix: trace details v2 ui bugs (#8448) 2025-07-08 13:51:40 +00:00
Nageshbansal
b50127b567 feat(statsreporter): add railway platform detection (#8467) 2025-07-08 13:01:21 +00:00
Vishal Sharma
ba2ed3ad22 chore: only log telemetry query for explorer, rule and dashboard pages (#8464)
* chore: only log telemetry query for explorer, rule and dashboard pages

* chore: add dashboard and rule properties for no telemetry result
2025-07-08 11:32:46 +00:00
0xflotus
eb3dfbf63b docs: fixed small typo error (#8458)
Co-authored-by: Vibhu Pandey <vibhupandey28@gmail.com>
2025-07-08 16:46:43 +05:30
Nageshbansal
c3e048470d fix: add DOT_METRICS_ENABLED and remove clickhousemetricswrite (#8461) 2025-07-08 15:36:32 +05:30
Vibhu Pandey
4563ff0e62 fix(users): skip sending email if frontend base url is not set (#8459)
skip sending email if frontend base url is not set
2025-07-08 01:47:37 +05:30
Vibhu Pandey
c9e48b6de9 feat(sqlschema): add sqlschema (#8384)
## 📄 Summary

- add sqlschema package
- add unique index on email,org_id in users and user_invite
2025-07-08 00:21:26 +05:30
Amlan Kumar Nandy
06ef9ff384 fix: resolve ui full reload on auto-refresh (#8383) 2025-07-07 16:51:06 +00:00
Amlan Kumar Nandy
26d55875f5 chore: fix metrics explorer events (#8411) 2025-07-07 16:34:17 +00:00
Srikanth Chekuri
b1864ee328 chore: use {k8s.pod/k8s.node/container}.cpu.usage metric for metadata and CPU usage charts (#8398) 2025-07-07 11:25:20 +00:00
Amlan Kumar Nandy
8b62c8dced chore: fix regex issue in route tab (#8440) 2025-07-07 16:55:23 +07:00
aniketio-ctrl
273452352d chore(2354): added preloaded metrics metadata at first api call (#8229)
* chore(2354): added preloaded metrics metadata at first api call
2025-07-06 17:09:29 +05:30
Vibhu Pandey
8274ebfe37 fix(memorycache): add a cloneable interface (#8414) 2025-07-05 19:08:23 +05:30
Abhi kumar
7d5e14abb6 fix: simplify changelog fetching logic and enhance version display interactivity (#8432) 2025-07-05 13:49:09 +05:30
primus-bot[bot]
7c17ac42b1 chore(release): bump to v0.88.1 (#8436) 2025-07-04 16:54:57 +05:30
Aditya Singh
74ee7bb2c7 fix: fix setting nav items from multiple places (#8435)
Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-07-04 15:44:53 +05:30
Sahil Khan
2f5640b2e6 fix: used a new classname for banner container earlier one was in ad block list (#8433)
* fix: used a new classname for banner container earlier one was in ad block list

* chore: minor comment added

* chore: minor comment added
2025-07-04 08:20:32 +00:00
Aditya Singh
121debcecc Fix drag handle obstruction on dashboard and service details page (#8428)
* fix: fix overlapping pylon support btn on dashboard and services details page

* fix: minor refactor

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-07-04 12:12:30 +05:30
Sahil Khan
ff13504a74 fix: performance optimizations in log details view json field rendering (#8324)
* fix: log details filters use data types from log data response as primary data type

* chore: added test cases

* test: add comprehensive unit tests for chooseAutocompleteFromCustomValue function

* fix: added datatypes to util and test cases

* fix: added new tests

* fix: performance optimizations in log details view json field rendering

* fix: fixed import failing tests

* fix: added default html rendering field in body field in log details

* fix: fixed eslint errors

* chore: moved hook to a new file and renamed a state
2025-07-04 04:58:26 +00:00
dependabot[bot]
d4e373443b chore(deps): bump github.com/go-viper/mapstructure/v2 (#8379)
Bumps [github.com/go-viper/mapstructure/v2](https://github.com/go-viper/mapstructure) from 2.2.1 to 2.3.0.
- [Release notes](https://github.com/go-viper/mapstructure/releases)
- [Changelog](https://github.com/go-viper/mapstructure/blob/main/CHANGELOG.md)
- [Commits](https://github.com/go-viper/mapstructure/compare/v2.2.1...v2.3.0)

---
updated-dependencies:
- dependency-name: github.com/go-viper/mapstructure/v2
  dependency-version: 2.3.0
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-07-04 02:40:14 +00:00
Vibhu Pandey
3ccf822d67 fix(statsreporter): add unix timestamps for last observed time (#8426) 2025-07-03 14:25:12 +00:00
Abhi kumar
0e270e6f51 fix: update media styling for changelog renderer and adjust video class (#8425) 2025-07-03 18:35:11 +05:30
Vishal Sharma
749df2a979 fix: nil pointer exception when result[0].Table is nil (#8424)
* fix: nil pointer exception when result[0].Table is nil

* fix: decrease complexity

---------

Co-authored-by: grandwizard28 <vibhupandey28@gmail.com>
2025-07-03 12:13:51 +00:00
Sahil Khan
9ee5d5d599 fix: no logs in list view issue - added logs datasource for formatting and column options in the useoptionsmenu consumption (#8421)
* fix: added logs datasource for formatting and column options in the useoptionsmenu consumption

* fix: changed data source to logs in context log renderer from metrics
2025-07-03 14:32:19 +05:30
Amlan Kumar Nandy
4940dfd46f Revert "chore: fix regex issue (#8393)" (#8418)
This reverts commit 53c58b9983.
2025-07-03 06:04:51 +00:00
Aditya Singh
79a31cc205 Sentry issues (#8264)
* fix: fix breaking json parsing

* fix: sentry fix

* fix: fix sentry edge case

* test: update test for useUrlQueryData

* test: minor fix

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-07-03 05:33:11 +00:00
Srikanth Chekuri
5102cf2b7b fix: remove deprecated telemetry::metrics::address from config (#8412) 2025-07-02 11:58:40 +00:00
Vishal Sharma
9ec5594648 fix: telemetry query events (#8388)
* fix: telemetry query events

* chore: reduced cyclomatic complexity

* chore: nit
2025-07-02 08:22:54 +00:00
Shaheer Kochai
b6c2ebd6d7 feat: trace to logs custom empty state UI (#8381)
* feat: display custom empty message if no logs on navigating from trace to logs

* chore: write tests for logs explorer normal and custom empty state

* feat: build the custom empty logs UI based on the updated designs

* feat: clear the filters and run stage query on clicking clear filters in logs custom empty state

* fix: update the failing test to match the logs custom empty state

* chore: remove the unnecessary onClick for documentation links

* refactor: overall improvements

* refactor: move the empty logs list config to util

* chore: update the documentation links + remove the explicit height from resources card

* refactor: reuse the EmptyLogsListConfig type in EmptyLogsSearch

* test: update LogsExplorerList tests to reflect changes in documentation links

---------

Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2025-07-02 07:30:17 +00:00
primus-bot[bot]
9a3a8c8305 chore(release): bump SigNoz to v0.88.0, OTel Collector to v0.128.0 (#8410)
* chore(release): bump to v0.88.0

* chore: bump to v0.111.28

* chore: bump to v0.111.28

---------

Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
Co-authored-by: grandwizard28 <vibhupandey28@gmail.com>
2025-07-02 12:19:54 +05:30
Nageshbansal
2ac45b0174 feat: adds support for Hetzner and coolify deployment platform in statsreporter (#8409) 2025-07-02 06:30:32 +00:00
Srikanth Chekuri
2a53918ebd chore: make queries compatible with 24.1 and fix string json query (#8391) 2025-07-02 05:09:16 +00:00
Shaheer Kochai
9daefeb881 fix: override the stagedQuery orderBy and send order by timestamp in traces view of traces explorer (#8390)
* fix: override the stagedQuery orderBy and send order by timestamp in traces view of traces explorer

* chore: write test for sending order by timestamp in the traces view of traces explorer

* refactor: refactor the query transformer to accept partial query object and override fields
2025-07-01 14:14:07 +00:00
Shaheer Kochai
526cf01cb7 fix: fix the issue of traces filters getting duplicated on switching between the span scopes (#8389)
* fix: fix the issue of changing span scope duplicating filters

* chore: write test for duplicate filters issue on changing the span scope
2025-07-01 07:53:33 +00:00
Amlan Kumar Nandy
cd4766ec2b fix: correct step numbering for non-metric based alerts (#8367) 2025-07-01 05:29:24 +00:00
Amlan Kumar Nandy
2196b58d36 fix: correct query data for cluster details metric view in infra monitoring (#8380) 2025-07-01 05:12:11 +00:00
Amlan Kumar Nandy
53c58b9983 chore: fix regex issue (#8393) 2025-07-01 11:32:07 +07:00
Piyush Singariya
d174038dce fix: panic after connecting to collector (#8344) 2025-06-26 16:34:49 +05:30
Srikanth Chekuri
78d09e2940 chore: log the request and expected response payload (#8341) 2025-06-26 09:40:31 +00:00
Srikanth Chekuri
6cb7f152e1 chore: bump opamp-go version (#8310) 2025-06-26 15:01:17 +05:30
Amlan Kumar Nandy
f6730d3d09 chore: update memory usage field in hosts list to exclude cached memory (#8173) 2025-06-26 13:36:16 +07:00
Nityananda Gohain
899a6ab70a fix: fetch only required traces fields (#8351)
* fix: fetch only required traces fields

* fix: remove only logs case in field name

* fix: add extra if condition for logs json field names

* fix: tests
2025-06-25 18:29:22 +05:30
Amlan Kumar Nandy
a4b852bb99 chore: fix environment filter in infra monitoring (#8357) 2025-06-25 10:46:05 +00:00
Vishal Sharma
92cd108c0d doc: update docker metrics doc link (#8358) 2025-06-25 15:55:12 +05:30
SagarRajput-7
34c116fc7e fix: fixed stepInterval not getting updated in the request payload for Bar (#8350)
* fix: fixed stepInterval not getting updated in the request payload for Bar

* fix: added test case
2025-06-25 11:47:59 +05:30
Vibhu Pandey
250646a354 feat(telemetry): remove telemetry (#8326) 2025-06-24 15:59:23 +00:00
Shaheer Kochai
00191d5774 fix: show status message in trace details v2 drawer (#8346) 2025-06-24 15:36:36 +00:00
Sahil Khan
525a0d7a1a fix: back button issue in trace details page (#8347) 2025-06-24 15:27:24 +00:00
Sahil Khan
564edc7430 fix: added network call on search in explorercolumnsrenderer with debounce (#8325) 2025-06-24 15:16:53 +00:00
Amlan Kumar Nandy
78f396b94a chore: add environment filter in infra monitoring (#8309) 2025-06-24 13:25:34 +00:00
Sahil Khan
9e53c150b8 fix: added missing context provider (#8342) 2025-06-24 18:00:47 +05:30
Shaheer Kochai
f80a6c3014 feat: add support for expandable popover for stack message and body in trace details page (#8330)
* feat: add support for expandable popover for stack message and body in trace details page

* refactor: overall improvements + refactor ExpandableAttribute
2025-06-24 11:52:33 +00:00
Vishal Sharma
1eff6d82c9 fix: color code expiry dates in ingestion key settings (#8323) 2025-06-24 13:00:30 +05:30
Shaheer Kochai
f138eff26c fix: fix the flickering in logs explorer table view (#8304) 2025-06-24 08:31:13 +04:30
Vishal Sharma
50f3fc0ff9 Chore/update request texts and integrations (#8305)
* chore: update request dashboard and integrations text

* chore: hide tab/table when data is not available

* chore: add new template text

* fix: test case
2025-06-23 14:24:47 +00:00
Yunus M
ebcb172614 feat(user-profile): update organisation onboarding questions (#8206)
* feat: update org onboarding questions

* feat: type updates

* chore(user-profile): update the onboarding questions

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-06-23 13:27:05 +00:00
Shaheer Kochai
133c0deaa8 fix: prevent sending order by id with traces query (#8250)
* fix: prevent sending order by id with traces query

* test: write tests for preventing sending order by id with traces query
2025-06-23 12:34:59 +00:00
Shaheer Kochai
35e8165463 fix: recalculate the query_range start and end timestamps for logs explorer chart and list queries (#8277)
* fix: recalculate start and end time only for relative time ranges

* fix: don't dsipatch UpdateTimeInterval on initial load

* fix: change list query to state instead of memo

* fix: fix the failing test

* chore: improvement to the test

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-06-23 09:06:46 +00:00
Srikanth Chekuri
6d009c6607 chore: recognize variable in expression (#8328) 2025-06-23 08:30:50 +00:00
Amlan Kumar Nandy
f0994e52c0 chore: alerts fixes and improvements (#8327) 2025-06-23 07:08:17 +00:00
Srikanth Chekuri
7f5b388722 chore: add time range optimization for trace id search (#8317) 2025-06-23 04:09:19 +00:00
Sahil Khan
b11a4c0c21 fix: log details filters use data types from log data response as primary data type (#8278)
* fix: log details filters use data types from log data response as primary data type

* chore: added test cases

* test: add comprehensive unit tests for chooseAutocompleteFromCustomValue function

* fix: added datatypes to util and test cases

* fix: added new tests
2025-06-22 10:58:43 +00:00
Srikanth Chekuri
bbb21f608f chore: more validation, zero values and enfore max step interval (#8319) 2025-06-21 17:49:33 +05:30
Nityananda Gohain
50a5b88708 fix: fetch only required log fields (#8299)
* fix: fetch only required log fields

* fix: update old endpoints

* fix: remove old code
2025-06-21 04:37:57 +00:00
Vibhu Pandey
5601c0886d chore(signoz): deprecate all flags (#8308)
Deprecate all flags

- Use querier.config.fluxInterval in lieu of passing `--flux-interval` and `--flux-interval-for-trace-detail`
- Remove `--gateway-url`
- Use telemetrystore.clickhouse.cluster in lieu of passing `--cluster` or `--cluster-name`
- Add an `unparam` check in the linter. Updated some functions across the querier codebase to be compatible with this linter.
- Remove prometheus config from docker builds.
2025-06-21 00:55:38 +05:30
Srikanth Chekuri
5b342b9b5d chore: handle nan/inf in response (#8318) 2025-06-20 22:26:25 +05:30
Vishal Sharma
7ec59c3c77 chore: move posthog and appcues ids to userId and orgId (#8316) 2025-06-20 13:21:59 +00:00
Nityananda Gohain
a12990f0bd fix: update trace panel query (#8315) 2025-06-20 11:18:17 +00:00
Yunus M
1ee1ca7951 fix: update app layout height based on banners visible (#8307)
* fix: update app layout height based on banners visible

* fix: show banners only in logged in state

---------

Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2025-06-20 11:08:30 +05:30
Abhi kumar
3b1bf34d3e feat(changelog): show changelogs for newer versions available (#8270)
* feat(changelog): add getChangelogByVersion API and related types

* feat(changelog): implement ChangelogModal and ChangelogRenderer components with styles

* test(dateUtils): add unit tests for formatDate utility

* chore(changelog): fixed pr review changes

* style(ChangelogRenderer): format SCSS for improved readability

* feat(SideNav): integrate ChangelogModal and manage its visibility state

* feat(changelog): refactor changelog handling and integrate into app state

* test(ChangelogModal): add unit tests for scroll functionality and data rendering

* test(ChangelogRenderer): add unit tests for rendering changelog details

* test(ChangelogModal, ChangelogRenderer): refactor tests

* fix(applayout): bot fetching changelog for cloud users

* fix(ChangelogModal): update footer to display feature count dynamically

* fix(ChangelogModal): update link for workspace migration to point to releases page

* feat(ChangelogModal): enhance footer layout and update link behavior

* test(ChangelogModal): update link for workspace migration to point to releases page

* refactor(AppContext): migrate changelog state management to context and update related components

* feat(test-utils): add changelog state and updateChangelog mock to app context

* test(changelogModal): fixed test by adding mock for useAppContext

* fix: added PR review fixes

* Fixed css variable name in ChangelogModal.styles.scss

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix(style): added light mode support for changelog modal

* Fixed heading color token

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: remove debug log for isLatestVersion in AppLayout

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-06-20 10:55:52 +05:30
Vibhu Pandey
fbcff29fae chore(sqlstore): remove sqlx (#8306)
## 📄 Summary

remove sqlx
2025-06-20 00:34:54 +05:30
Yunus M
81fcca3bd3 fix: use pathname to get channel id while saving (#8303) 2025-06-19 14:57:32 +00:00
Yunus M
4f7d84aa37 fix: use pathname to get channel id (#8298) 2025-06-19 19:28:47 +05:30
Abhi kumar
8f8dedb8b3 fix(sidebar): added fix routes not highlighting, minor gitter fix (#8297)
* fix(sidebar): added fix routes not highlighting, minor gitter fix

* chore(routes): tsc fix

* fix(private): added check in private route for routes with no role

* fix(private): minor fix in condition

* chore: added roles in empty routes
2025-06-19 16:17:54 +05:30
Ankit Nayan
3f65229506 fix: tracefunnel analytics duration fixes + 2-step funnel fixes (#8294) 2025-06-19 06:19:31 +00:00
Srikanth Chekuri
f006260719 chore: find contradictory condition keys in expression (#8238) 2025-06-19 05:40:50 +00:00
Piyush Singariya
3fc8f6c353 fix: JSON Query parse string int value (#8292)
* fix: json query parse string int

* chore: minor

* Update pkg/query-service/app/logs/v3/enrich_query_test.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-06-18 16:14:23 +00:00
Yunus M
e02ae9a5c4 feat: show billing , settings to admin when workspace is blocked (#8291)
* feat: show billing , settings to admin when workspace is blocked

* feat: enable keyboard shortcuts page for all

* feat: remove duplicated option
2025-06-18 20:43:30 +05:30
Nityananda Gohain
1989d07e52 fix: delete existing agents in migration (#8289) 2025-06-18 18:06:36 +05:30
Shaheer Kochai
78194ae955 chore: remove dev env check (#7994)
Co-authored-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-06-18 07:45:54 +00:00
Shivanshu Raj Shrivastava
da1b6d1ed0 feat: adds a final part of trace funnel feature (analytics APIs, and analytics queries) implementation (#8129)
* feat: trace funnel queries

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: update access

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: fix queries

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: minor fix in handler

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: update clauses

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: update step overview queries

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: add new api endpoints for analytics (#8253)

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fixing steps and funnel (#8283)

* add todo: remove identical function

---------

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2025-06-18 07:40:20 +00:00
Amlan Kumar Nandy
d3c76ae8be chore: update alert details error state (#8246) 2025-06-18 07:20:07 +00:00
Shaheer Kochai
bed3dbc698 chore: funnel run and save flow changes (#8231)
* feat: while the funnel steps are invalid, handle auto save in local storage

* chore: handle lightmode style in 'add span to funnel' modal

* fix: don't save incomplete steps state in local storage if last saved configuration has valid steps

* chore: close the 'Add span to funnel' modal on clicking save or discard

* chore: deprecate the run funnel flow for unexecuted funnel

* feat: change the funnel configuration save logic, and deprecate auto save

* refactor: send all steps in the payload of analytics/overview

* refactor: send all steps in the payload of analytics/steps (graph API)

* chore: send all steps in the payload of analytics/steps/overview API

* chore: send funnel steps with slow and error traces + deprecate the refetch on latency type change

* chore: overall improvements

* chore: change the save funnel icon + increase the width of funnel steps

* fix: make the changes w.r.t. the updated funnel steps validation API + bugfixes

* fix: remove funnelId from funnel results APIs

* fix: handle edge case i.e. refetch funnel results on deleting a funnel step

* chore: remove funnel steps configuration cache on removing funnel

* chore: don't refetch the results on changing the latency type

* fix: fix the edge cases of save funnel button being enabled even after saving the funnel steps

* chore: remove the span count column from top traces tables

* fix: fix the failing CI check by removing unnecessary props / fixing the types
2025-06-18 06:08:41 +00:00
Amlan Kumar Nandy
66affb0ece chore: add unit tests for hosts list in infra monitoring (#8230) 2025-06-18 05:53:42 +00:00
Vibhu Pandey
75f62372ae feat(analytics): move frontend event to group_id (#8279)
* chore(api_key): add api key analytics

* feat(analytics): move frontend events

* feat(analytics): add collect config

* feat(analytics): add collect config

* feat(analytics): fix traits

* feat(analytics): fix traits

* feat(analytics): fix traits

* feat(analytics): fix traits

* feat(analytics): fix traits

* feat(analytics): fix factor api key

* fix(analytics): fix org stats

* fix(analytics): fix org stats
2025-06-18 01:54:55 +05:30
Sahil Khan
a3ac307b4e fix: sentry issues SIGNOZ-UI-Q9 SIGNOZ-UI-QA (#8281) 2025-06-17 23:44:21 +05:30
Vikrant Gupta
7672d2f636 chore(user): return user resource on register user request (#8271) 2025-06-17 17:26:06 +05:30
aniketio-ctrl
e3018d9529 fix(8232): added fix for error graph in services tab (#8263) 2025-06-17 08:08:38 +00:00
Nityananda Gohain
385ee268e3 fix: use first org in agent migration (#8269)
* fix: exit gracefull if there are more than one org

* fix: use first org
2025-06-17 06:25:12 +00:00
Piyush Singariya
01036a8a2f fix: top level keys EXIST and NOTEXIST filter simulation (#8255)
* fix: top level keys EXIST and NOTEXIST filter simulation

* test: fix tests

* test: temporarily change collector version

* test: updating go.mod

* fix: tests

* chore: revert changes

* chore: update collector's reference to stable version
2025-06-17 11:28:40 +05:30
Srikanth Chekuri
1542b9d6e9 chore: disallow unknown fields and address gaps (#8237) 2025-06-16 23:11:28 +05:30
Nityananda Gohain
8455349459 fix: support orgId and postgres in agents (#7327)
* fix: initial commit for agents

* fix: remove frontend package manger commit

* fix: use sqlstore

* fix: opamp server changes

* fix: tests

* fix: tests

* fix: minor changes

* fix: migrations

* fix: use uuid7

* fix: use default orgID for single tenant

* fix: pipelines tests fixed

* fix: use correct agentId

* fix: use orgID in coordinator

* fix: fix tests

* fix: remove redundant hash check

* fix: migration

* fix: migration

* fix: address comments

* fix: rename migration file

* fix: remove unwanted orgid code

* fix: use orggetter

* fix: comment

* fix: schema cleanup

* fix: minor changes

* chore: addresses changes

* fix: add back agentID as it used ulid

* fix: keep only 50 agents for an orgId

* chore: explicitly specify text type

* chore: use valuer.uuid for orgid

* fix: linting complain

* chore: final fixes

* chore: minor changes

* fix: add not null

* fix: fe tests

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-06-16 20:07:16 +05:30
aniketio-ctrl
c488a24d09 fix(prom-aggr): fix prom aggregation queries using utf-8 charset (#8262)
* fix(prom-aggr): added fix for prom aggregation

* fix(prom-aggr): added fix for prom aggregation
2025-06-16 19:42:17 +05:30
Vikrant Gupta
9091cf61fd chore(github): fix codeowners file (#8261) 2025-06-16 11:37:07 +00:00
Ekansh Gupta
eeb2ab3212 feat: added support for trace_operators in query range v5 (#8165)
* feat: added support for trace_operators in query range v5

* feat: added support for trace_operators in query range v5

* feat: added support for trace_operators in query range v5

* feat: added support for trace_operators in query range v5\n

* feat: added support for trace_operators in query range v5\n

* feat: added support for trace_operators in query range v5

* feat: added support for trace_operators in query range v5

* feat: added support for trace_operators in query range v5

* feat: added support for trace_operators in query range v5

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-06-16 16:43:51 +05:30
Nageshbansal
3f128f0f1d fix: configs in multi-node docker-swarm cluster (#8239) 2025-06-16 11:42:02 +05:30
Vibhu Pandey
59ff7ed1e1 feat(licensing): add analytics (#8252) 2025-06-16 01:09:41 +05:30
Vibhu Pandey
d236b6ce1e feat(statsreporter): add stats for telemetry.*.last_observed.time (#8251)
## 📄 Summary

- add stats for telemetry.*.last_observed.time
2025-06-16 00:02:17 +05:30
Dimitris Mavrommatis
44b118a212 fix: span links tab to span details drawer (#7888)
* fix: span links tab to span details drawer

* Update LinkedSpans.styles.scss

---------

Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2025-06-15 16:22:36 +04:30
Dimitris Mavrommatis
3fc6f7ee63 feat(trace): add visuals for events on span waterfall and flamegraph (#7889)
* fix: add visuals on waterfall and flamegraph for span events

* fix: correct offsets for events

* fix: addressed comments

* chore: update the name of the import

* fix: interface change

* chore: formatting

---------

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-06-15 10:39:39 +00:00
Sahil Khan
f1016baf03 chore: updated http-proxy-middleware to 3.0.5 from 3.0.3 (#8245) 2025-06-13 21:35:48 +05:30
Amlan Kumar Nandy
e5c0d9e44a chore: allow url as label value in alerts (#8244) 2025-06-13 17:47:01 +07:00
Yunus M
e51056c804 fix: use preference.name rather than preference.key (#8234) 2025-06-13 11:44:42 +05:30
Nityananda Gohain
7d8dad4550 Revert "fix: remove whitespace from sso cert (#8141)" (#8233)
This reverts commit 44ea237039.
2025-06-12 22:39:24 +05:30
Yunus M
c477e0ef16 feat: sidebar revamp (#8087)
* feat: sidebar revamp - initial commit

* feat: move billinga and other isolated routes to settings

* feat: handle channel related routes

* feat: update account settings page

* feat: show dropdown for secondary items

* feat: handle reordering of pinned nav items

* feat: improve font load performance

* feat: update font reference

* feat: update page content styles

* feat: handle external links in sidebar

* feat: handle secondary nav item clicks

* feat: handle pinned nav items reordering

* feat: handle sidenav pinned state using preference, handle light mode

* feat: show sidenav items conditionally

* feat: show version diff indicator only to self hosted users

* feat: show billing to admins only and integrations to cloud and enterprise users

* feat: update fallback link

* feat: handle settings menu items

* fix: settings page reload on nav chnage

* feat: intercom to pylon

* feat: show invite user to admin only

* feat: handle review comments

* chore: remove react query dev tools

* feat: minor ui updates

* feat: update changes based on preference store changes

* feat: handle sidenav shortcut state

* feat: handle scroll for more

* feat: maintain shortcuts order

* feat: manage license ui updates

* feat: manage settings options based on license and roles

* feat: update types

* chore: add logEvents

* feat: update types

* chore: fix type errors

* chore: remove unused variable

* feat: update my settings page test cases

---------

Co-authored-by: makeavish <makeavish786@gmail.com>
2025-06-12 19:55:32 +05:30
Shaheer Kochai
fff7f8fc76 feat: add span scope filter to trace details page (#8005)
* feat: add span scope filter to trace details page

* chore: add tests for the span scope selector flows when onchange and query are provided

* refactor: remove the unnecessary queryName prop and infer it from query

* fix: fix the failing span scope selector tests
2025-06-12 11:03:28 +00:00
Shaheer Kochai
8cfeef4521 fix: fix sentries (#8003)
* fix: handle potential undefined values in groupBy calculation in TracesExplorer

* fix: add optional chaining for aggregateAttribute key check in Query component

* fix: add optional chaining for filters in SpanScopeSelector to handle potential undefined values

* fix: fix the warning in logs chart by adding the missing date-time format option

* fix: improve trace graph allDataPoints null check

* chore: remove the keys.length from null check
2025-06-12 15:28:06 +04:30
Sahil Khan
d85a1a21ac feat: generalised preferences framework (#7903)
* feat: preferences framework generalised scaffolded

* feat: preferences framework integrated logs & saved logs views

* feat: fixed bugs in saved views for traces

* fix: removed unused file

* fix: wrapped metric explorer inside preferences context as it uses useoptions hook

* feat: added tests for preferences framework alongside some minor bugs improvements

* chore: added tests for traces loader and updater

* chore: fixed failing tests due to new context for preferences

* fix: minor saved views handling bug

* fix: minor pref fix

* fix: breaking tests

* fix: undo removal of pref context from live logs

* feat: split the logic of columns and formatting load in logs

* fix: breaking tests

* fix: pr comments

* fix: minor bug and pr comments regarding better resync

* fix: bugs in internal flows

* fix: url pref sync

* fix: minor bug fix

* fix: fixed failing tests

* fix: fixed failing tests
2025-06-11 10:06:01 +00:00
primus-bot[bot]
17f48d656d chore(release): bump to v0.87.0 (#8222)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-06-11 12:06:17 +05:30
Srikanth Chekuri
2d6774da68 fix: add missing denominator for reset case (#8180) 2025-06-11 11:32:50 +05:30
Vibhu Pandey
62a9d7e602 docs(contributing): add endpoint docs (#8215)
* docs(contributing): add endpoint docs

* docs(contributing): add endpoint docs

* docs(contributing): add endpoint docs

* docs(contributing): add endpoint docs

* docs(contributing): add endpoint docs
2025-06-10 17:25:07 +00:00
Vikrant Gupta
3a2c7a7a68 fix(dashboard): create dashboard panic for id (#8214) 2025-06-10 21:31:56 +05:30
Sahil Khan
33e70d1f37 fix: traces back button issue (#8041)
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2025-06-10 13:25:59 +00:00
Srikanth Chekuri
85f04e4bae chore: add querier HTTP API endpoint and bucket cache implementation (#8178)
* chore: update types
1. add partial bool to indicate if the value covers the partial interval
2. add optional unit if present (ex: duration_nano, metrics with units)
3. use pointers wherever necessary
4. add format options for request and remove redundant name in query envelope

* chore: fix some gaps
1. make the range as [start, end)
2. provide the logs statement builder with the body column
3. skip the body filter on resource filter statement builder
4. remove unnecessary agg expr rewriter in metrics
5. add ability to skip full text in where clause visitor

* chore: add API endpoint for new query range

* chore: add bucket cache implementation

* chore: add fingerprinting impl and add bucket cache to querier

* chore: add provider factory
2025-06-10 12:56:28 +00:00
Shaheer Kochai
53f9e7d811 chore: trace funnels bugfixes/improvements (#8114)
* fix: refetch funnel steps overview on clicking refresh

* chore: temporarily hide latency pointer from funnel steps

* chore: remove the existing filters of a step on clicking replace button

* fix(useLocalStorage): stabilize initialValue handling to prevent unnecessary re-renders

* chore: remove p99_latency references from funnel metrics

* fix(useFunnelMetrics): ensure latency type defaults to P99 when undefined
2025-06-10 12:45:25 +00:00
Vibhu Pandey
ad46e22561 docs(contributing): add provider docs (#8193) 2025-06-10 05:39:14 +00:00
Yunus M
e79195ccf1 fix: handle alert list updates (#8109) 2025-06-10 10:56:45 +05:30
Amlan Kumar Nandy
f77bb888a8 chore: add analytics for metrics explorer (#8108) 2025-06-10 04:42:49 +00:00
Amlan Kumar Nandy
baa15baea9 chore: metrics explorer summary view fixes (#8126) 2025-06-10 04:18:12 +00:00
Vibhu Pandey
316e6821f1 feat(statsreporter): report stats on stop (#8187) 2025-06-10 07:55:32 +05:30
Vibhu Pandey
a1fa2769e4 feat(statsreporter): build a statsreporter service (#8177)
- build a new statsreporter service
2025-06-09 16:43:29 +05:30
Vikrant Gupta
decb660992 chore(sqlmigration): drop the rule history and data migrations table (#8181) 2025-06-09 15:46:22 +05:30
Amlan Kumar Nandy
0acbcf8322 chore: remove critters-webpack-plugin (#8172) 2025-06-07 16:21:06 +07:00
dependabot[bot]
11eabdc2ac chore(deps): bump webpack-dev-server from 4.15.2 to 5.2.1 in /frontend (#8160)
* chore(deps): bump webpack-dev-server from 4.15.2 to 5.2.1 in /frontend

Bumps [webpack-dev-server](https://github.com/webpack/webpack-dev-server) from 4.15.2 to 5.2.1.
- [Release notes](https://github.com/webpack/webpack-dev-server/releases)
- [Changelog](https://github.com/webpack/webpack-dev-server/blob/master/CHANGELOG.md)
- [Commits](https://github.com/webpack/webpack-dev-server/compare/v4.15.2...v5.2.1)

---
updated-dependencies:
- dependency-name: webpack-dev-server
  dependency-version: 5.2.1
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

* feat: upgraded webpack-cli for compatibility fix for webpack-dev-server upgrade

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com>
Co-authored-by: SagarRajput-7 <sagar@signoz.io>
2025-06-07 07:24:18 +00:00
Vibhu Pandey
eb94554f5a feat(preference): add support for objects and arrays (#8142)
* refactor(preference): better readability

* refactor: better readability

* refactor: better readability

* fix: change frontend contract

* refactor: change frontend

* refactor: change frontend

* refactor: change frontend

* refactor: change frontend

* chore: fix tsc

* chore: fix tsc

* chore: fix tsc

* chore: fix tsc
2025-06-06 22:38:28 +05:30
Piyush Singariya
e8280dbea4 feat: Adding ContainerInsights in ECS Integrations (AWS) (#8122)
* fix: adding ECS ContainerInsights

* chore: dashboard added

* chore: format integrations.json

* feat(7294): added _dot metrics for aws ecs

---------

Co-authored-by: aniket <aniket@signoz.io>
2025-06-06 09:27:35 +00:00
Nityananda Gohain
44ea237039 fix: remove whitespace from sso cert (#8141)
* fix: remove whitespace from sso cert

* fix: use trimspace instead

* fix: use replaceall
2025-06-06 09:03:46 +00:00
Srikanth Chekuri
72b0214d1d chore: add range query impl for promql (#8130) 2025-06-05 19:18:44 +00:00
Srikanth Chekuri
386a215324 chore: metric statement builder (#8104) 2025-06-06 00:38:48 +05:30
Vibhu Pandey
ba0ba4bbc9 build(go): upgrade purego to v0.8.4 (#8159) 2025-06-05 12:31:49 +00:00
SagarRajput-7
d60c9ab36b feat: handle unkown metric in panel query (#8083)
* feat: handle unkown metric in panel query

* feat: added handling with type empty and key present or not

* feat: added test cases

* feat: added comment to better explain the logic

* feat: fixed operator list for unkown metric

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-06-05 11:23:25 +00:00
Piyush Singariya
90770b90bd feat: Introducing EKS integration (AWS) (#8021)
* feat: introducing EKS integration (AWS)

* fix: update metrics and enable logs collection

* feat: eks Overview dashboard ready

* feat: containerinsights incoming

* chore: dashboard name update

* feat(7294): added _dot metrics for aws ecs

* feat(7274): added dot metrics for overview metrics in eks

* Update pkg/query-service/app/cloudintegrations/services/definitions/aws/eks/assets/dashboards/overview_dot.json

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* Update pkg/query-service/app/cloudintegrations/services/definitions/aws/eks/assets/dashboards/containerinsights_dot.json

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

---------

Co-authored-by: aniket <aniket@signoz.io>
Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-06-05 15:35:39 +05:30
Vikrant Gupta
a19874c1dd fix(dashboard): dashboards/alerts info telemetry fix (#8161) 2025-06-05 13:47:25 +05:30
Piyush Singariya
65ff460d63 fix: Enhance filter support for Pipeline Simulation (#8134)
* feat: enhance filter support for JSON log body

* test: added tests for exists and not exists

* test: remove the value
2025-06-05 05:05:39 +00:00
primus-bot[bot]
b9d542a294 chore(release): bump to v0.86.2 (#8154) 2025-06-04 14:53:32 +00:00
aniketio-ctrl
e75e5bdbdb feat(7294): added flag columns in query (#8153)
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-06-04 20:10:52 +05:30
Srikanth Chekuri
0d03203977 chore: add formula evaluator (#8112) 2025-06-04 13:40:42 +00:00
primus-bot[bot]
28f6f42ac4 chore(release): bump to v0.86.1 (#8152)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-06-04 13:16:42 +05:30
Vibhu Pandey
92f8e4d5b9 fix(alertmanager): fix legacy alertmanager injection (#8151) 2025-06-04 07:29:40 +00:00
primus-bot[bot]
037eea5262 chore(release): bump to v0.86.0 (#8150)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-06-04 12:23:01 +05:30
SagarRajput-7
cd4df6280f feat: fixed multiple sentry error around dashboards (#8148) 2025-06-04 13:20:51 +07:00
Vikrant Gupta
ad2d4ed56c chore(dashboard): mismatch in dashboard lock rbac (#8137) 2025-06-03 20:06:38 +05:30
aniketio-ctrl
7955497a8d Feat/7294 (#8139)
* feat(7294): updated dashboard uri for cloud integrations
2025-06-03 19:43:42 +05:30
aniketio-ctrl
6ed30318bd feat(7294): updated dashboard uri for cloud integrations (#8135) 2025-06-03 17:48:31 +05:30
Vikrant Gupta
c32dd9f17e chore(feature): drop the feature status table (#8124)
* chore(feature): drop the feature set table

* chore(feature): cleanup the types and remove unused flags

* chore(feature): some more cleanup

* chore(feature): add codeowners file

* chore(feature): init to basic plan for failed validations

* chore(feature): cleanup

* chore(feature): pkg handler cleanup

* chore(feature): pkg handler cleanup

* chore(feature): address review comments

* chore(feature): address review comments

* chore(feature): address review comments

---------

Co-authored-by: Vibhu Pandey <vibhupandey28@gmail.com>
2025-06-03 17:05:42 +05:30
Shaheer Kochai
c58cf67eb0 refactor: update funnel description endpoint from POST /save to PUT /{funnel_id} (#8080)
* refactor: update funnel description endpoint from POST /save to PUT /{funnel_id}

* feat: add timestamp to funnel description payload and update mutation type

---------

Co-authored-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-06-03 10:59:54 +00:00
Nageshbansal
440c3d8386 fix: Broken Docker Downloads Badge (#7954)
* Updated the Badge for Docker Downloads to use signoz/signoz repo
2025-06-03 16:01:28 +05:30
Aditya Singh
d683b94344 [Fix #8102] Logs Issues with context view (#8111)
* fix: add active log id to charts query

* refactor: remove comment

* fix: remove active log id on filter change

* test: update test for log explorer

* test: update test for log explorer

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-06-03 09:57:39 +00:00
Srikanth Chekuri
6a629623bc chore: port functions, reduce to, series limit support (#8105) 2025-06-03 11:37:47 +05:30
Srikanth Chekuri
982688ccc9 chore: add field mapper and condition builder for ts v4 (#8100) 2025-06-02 19:43:48 +00:00
aniketio-ctrl
74bbb26033 fix(metrics): exclude NoRecordedValue data points from aggregation (#7674) 2025-06-03 00:40:05 +05:30
Vikrant Gupta
3bb9e05681 chore(dashboard): make dashboard schema production ready (#8092)
* chore(dashboard): intial commit

* chore(dashboard): bring all the code in module

* chore(dashboard): remove lock unlock from ee codebase

* chore(dashboard): go deps

* chore(dashboard): fix lint

* chore(dashboard): implement the store

* chore(dashboard): add migration

* chore(dashboard): fix lint

* chore(dashboard): api and frontend changes

* chore(dashboard): frontend changes for new dashboards

* chore(dashboard): fix test cases

* chore(dashboard): add lock unlock APIs

* chore(dashboard): add lock unlock APIs

* chore(dashboard): move integrations controller out from module

* chore(dashboard): move integrations controller out from module

* chore(dashboard): move integrations controller out from module

* chore(dashboard): rename migration file

* chore(dashboard): surface errors for lock/unlock dashboard

* chore(dashboard): some testing cleanups

* chore(dashboard): fix postgres migrations

---------

Co-authored-by: Vibhu Pandey <vibhupandey28@gmail.com>
2025-06-02 22:41:38 +05:30
aniketio-ctrl
61b2f8cb31 fix(8082): removed unnecessary log lines (#8123) 2025-06-02 13:09:43 +00:00
Vikrant Gupta
9d397d0867 fix(license): fixes for license service (#8121)
* fix(license): fixes for license service

* fix(license): fixes for license service

* fix(license): add code comments
2025-06-02 17:09:19 +05:30
aniketio-ctrl
5fb4206a99 Feat/7294: Updated Dashboards for integrations (#8113) 2025-06-02 15:17:53 +05:30
Aditya Singh
dd11ba9f48 fix: remove create dashboard call before navigate (#8029)
* fix: remove create dashboard call before navigate

* feat: minor refactor

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-06-02 08:02:09 +00:00
Shivanshu Raj Shrivastava
f9cb9f10be feat: adds a part of trace funnel feature (APIs, module, handler, store, migrations) implementation (#7763)
* feat: adds server and handler changes

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* feat: add tracefunnel module and handler

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* feat: add required types for tracefunnels

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* feat: db operations, module and handler implementation

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* feat: add db migrations

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: add utility functions

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* test: add utility function tests

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* test: add handler tests

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* test: add trace funnel module tests

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: refactor handler and utils

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: add funnel validation while processing funnel steps

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* test: add more tests to utils

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: fix package naming

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: fix naming convention

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: update normalize funnel steps

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: added some improvements

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: optimize funnel creation by combining insert and update operations

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: fix error handling

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* feat: trace funnel state management

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: updated unit tests and mocks

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: review comments

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: minor fixes

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: update funnel migration number

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: review comments and some changes

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* fix: update modules

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

---------

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-06-02 07:00:49 +00:00
Aditya Singh
b6180f6957 fix: fix html escape and json string parsing in qb (#8039)
Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-06-02 06:42:02 +00:00
aniketio-ctrl
51d3ca16f7 fix(metric-explorer): case sensitivity in contains (#8103) 2025-06-02 04:35:32 +00:00
Vibhu Pandey
91cbd17275 feat(sharder): add simple and noop sharder (#8107) 2025-05-31 16:04:13 +05:30
aniketio-ctrl
68effaf232 chore: support for non-normalized metrics behind a feature flag (#7919)
feat(7294-services): added dot metrics boolean for services tab
2025-05-30 10:27:29 +00:00
Aditya Singh
c08d1bccaf FIX: Pipelines edit filter return empty filter (#8055)
* fix: fix pipeline add and edit form flow

* test: update test cases

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2025-05-30 15:47:26 +05:30
Amlan Kumar Nandy
1d77780c70 feat: add views tab to metrics explorer (#8091) 2025-05-30 05:39:24 +00:00
primus-bot[bot]
80ded899c7 chore(release): bump to v0.85.3 (#8099)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-05-29 17:45:38 +05:30
Vikrant Gupta
4733af974e fix(license): return the active license even in case of suspended status (#8097)
* fix(license): return the active license even in case of suspended status

* fix(license): suspended check for side nav

* fix(license): suspended check for side nav

* fix(license): suspended check for side nav

* fix(license): suspended check for side nav
2025-05-29 12:05:27 +00:00
Amlan Kumar Nandy
1ab6c7177f chore: infra monitoring fixes (#8066) 2025-05-29 10:53:47 +07:00
primus-bot[bot]
c3123a4fa4 chore(release): bump to v0.85.2 (#8089)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-05-28 16:42:27 +00:00
SagarRajput-7
5a602bbeb7 fix: added safety checks for query data (#8088) 2025-05-28 21:41:24 +05:30
SagarRajput-7
f487f088bd Revert "feat: improved the alert rules list search functionality" (#8085)
* Revert "feat: improved the alert rules list search functionality (#8075)"

This reverts commit bec52c3d3e.

* feat: added search capability for labels

* feat: added test cases
2025-05-28 21:24:26 +05:30
Vikrant Gupta
1cb01e8dd2 fix(saml): do not fetch the claims and use orgID from domain (#8086)
* fix(saml): do not fetch the claims and use orgID from domain

* fix(saml): do not fetch the claims and use orgID from domain
2025-05-28 18:21:35 +05:30
primus-bot[bot]
595a500be4 chore(release): bump to v0.85.0 (#8078)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-05-28 12:17:24 +05:30
SagarRajput-7
bec52c3d3e feat: improved the alert rules list search functionality (#8075)
* feat: improved the alert rules list search functionality

* feat: improvements and tooltip added for more info

* feat: style improvement

* feat: style improvement

* feat: style improvement
2025-05-28 05:23:42 +00:00
Srikanth Chekuri
0a6a7ba729 chore: show migration info to all cloud regions (#8077) 2025-05-28 04:48:42 +00:00
Vishal Sharma
3d758d4358 feat: init pylon and deprecate intercom (#8059) 2025-05-28 07:11:11 +05:30
Vishal Sharma
9c8435119d feat: add appcues and remove customerio (#8045) 2025-05-27 19:49:55 +00:00
Ekansh Gupta
d732f8ba42 fix: updated the service name in exceptions filter (#8069)
* fix: updated the service name in exceptions filter

* fix: updated the service name in exceptions filter

* fix: updated the service name in exceptions filter
2025-05-27 17:42:08 +00:00
Vibhu Pandey
83b8eaf623 feat(pylon|appcues): add pylon and appcues (#8073) 2025-05-27 17:32:45 +00:00
Vikrant Gupta
ae7364f098 fix(login): fixed the interceptor to handle multiple failures (#8071)
* fix(login): fixed the interceptor to handle multiple failures

* fix(login): fixed the interceptor to handle multiple failures
2025-05-27 15:47:33 +00:00
Srikanth Chekuri
0ec1be1ddf chore: add querier base implementation (#8028) 2025-05-27 20:54:48 +05:30
Yunus M
93de4681a9 feat: oss - sso and api keys (#8068)
* feat: oss - sso and api keys

* feat: show to community and community enterprise

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-05-27 20:29:09 +05:30
Aditya Singh
69e94cbd38 Custom Quick FIlters: Integration across other tabs (#8001)
* chore: added filters init

* chore: handle save and discard

* chore: search and api intergrations

* feat: search on filters

* feat: style fix

* feat: style fix

* feat: signal to data source config

* feat: search styles

* feat: update drawer slide style

* chore: quick filters - added filters init (#7867)

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

* feat: no results state

* fix: minor fix

* feat: qf setting ui

* feat: add skeleton to dynamic qf

* fix: minor fix

* feat: announcement tooltip added

* feat: announcement tooltip added refactor

* feat: announcement tooltip styles

* feat: announcement tooltip integration

* fix: number vals in filter list

* feat: announcement tooltip show logic added

* feat: light mode styles

* feat: remove unwanted styles

* feat: remove filter disable when one filter added

* style: minor style

* fix: minor refactor

* test: added test cases

* feat: integrate custom quick filters in logs

* Custom quick filter: Other Filters | Search integration (#7939)

* chore: added filters init

* chore: handle save and discard

* chore: search and api intergrations

* feat: search on filters

* feat: style fix

* feat: style fix

* feat: signal to data source config

* feat: search styles

* feat: update drawer slide style

* feat: no results state

* fix: minor fix

* Custom Quick FIlters: UI fixes and Announcement Tooltip (#7950)

* feat: qf setting ui

* feat: add skeleton to dynamic qf

* fix: minor fix

* feat: announcement tooltip added

* feat: announcement tooltip added refactor

* feat: announcement tooltip styles

* feat: announcement tooltip integration

* fix: number vals in filter list

* feat: announcement tooltip show logic added

* feat: light mode styles

* feat: remove unwanted styles

* feat: remove filter disable when one filter added

* style: minor style

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

* feat: code refactor

* feat: debounce search

* feat: refactor

* feat: exceptions integrate

* feat: api monitoring qf integrate

* feat: handle query name show

* Custom quick filters: Tests and pr review comments (#7967)

* chore: added filters init

* chore: handle save and discard

* chore: search and api intergrations

* feat: search on filters

* feat: style fix

* feat: style fix

* feat: signal to data source config

* feat: search styles

* feat: update drawer slide style

* feat: no results state

* fix: minor fix

* feat: qf setting ui

* feat: add skeleton to dynamic qf

* fix: minor fix

* feat: announcement tooltip added

* feat: announcement tooltip added refactor

* feat: announcement tooltip styles

* feat: announcement tooltip integration

* fix: number vals in filter list

* feat: announcement tooltip show logic added

* feat: light mode styles

* feat: remove unwanted styles

* feat: remove filter disable when one filter added

* style: minor style

* fix: minor refactor

* test: added test cases

* feat: integrate custom quick filters in logs

* feat: code refactor

* feat: debounce search

* feat: refactor

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

* feat: integrate traces data source to settings

* feat: duration nano traces filter in qf

* fix: allow only admins to change qf  settings

* feat: has error handling

* feat: fix existing tests

* feat: update test cases

* feat: update test cases

* feat: minor refactor

* feat: minor refactor

* feat: log quick filter settings changes

* feat: log quick filter settings changes

* feat: log quick filter settings changes

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-05-27 20:04:57 +05:30
Srikanth Chekuri
62810428d8 chore: add logs statement builder base (#8024) 2025-05-27 13:51:38 +00:00
Yunus M
b921a2280b Update pull_request_template.md (#8065) 2025-05-27 16:32:25 +05:30
SagarRajput-7
8990fb7a73 feat: allow custom color pallete in panel for legends (#8063) 2025-05-27 16:19:35 +07:00
SagarRajput-7
aaeffae1bd feat: added enhancements to legends in panel (#8035)
* feat: added enhancements to legends in panel

* feat: added option for right side legends

* feat: created the legend marker as checkboxes

* feat: removed histogram and pie from enhanced legends

* feat: row num adjustment

* feat: added graph visibilty in panel edit mode also

* feat: allignment and fixes

* feat: added test cases
2025-05-27 13:50:40 +05:30
Vikrant Gupta
d1d7da6c9b chore(preference): add sidenav pinned preference (#8062) 2025-05-27 13:29:31 +05:30
Piyush Singariya
28a01bf042 feat: Introducing DynamoDB integration (#8012)
* feat: introducing DynamoDB integration

* fix: allow non expireable API key

* fix: clean up pat to API key middleware

* fix: address comments

* fix: update response of create api key

* feat: adding dashboard

* fix: adding dynamodb icon

* Update pkg/query-service/app/cloudintegrations/services/definitions/aws/dynamodb/assets/dashboards/overview.json

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

---------

Co-authored-by: nityanandagohain <nityanandagohain@gmail.com>
Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-05-27 07:18:20 +00:00
SagarRajput-7
fb1f320346 feat: added custom stepIntervals to bar chart for better visibilty (#8023)
* feat: added custom stepIntervals to bar chart for better visibilty

* feat: added test cases

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-05-27 11:48:38 +05:30
Shaheer Kochai
4d484b225f feat(error): build generic error component (#8038)
* feat: build generic error component

* chore: test error component in DataSourceInfo component

* feat: get version from API + minor improvements

* feat: enhance error notifications with ErrorV2 support and integrate ErrorModal

* feat: implement ErrorModalContext + directly display error modal in create channel if request fails

* chore: write tests for the generic error modal

* chore: add optional chaining + __blank to _blank

* test: add trigger component tests for ErrorModal component

* test: fix the failing tests by wrapping in ErrorModalProvider

* chore: address review comments

* test: fix the failing tests

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-05-26 22:20:24 +05:30
Amlan Kumar Nandy
3a396602a8 chore: persist the state selection in the URL for all entities and filters in Infra Monitoring (#7991) 2025-05-26 06:58:55 +00:00
Amlan Kumar Nandy
650cf81329 chore: metrics explorer minor fixes (#8042) 2025-05-26 06:43:06 +00:00
Amlan Kumar Nandy
cdbf23d053 chore: infra monitoring improvements (#8002) 2025-05-26 06:03:01 +00:00
Amlan Kumar Nandy
3ca3db2567 chore: add to alerts/dashboard improvements for one chart per query mode in metrics explorer (#8014) 2025-05-26 05:45:21 +00:00
Srikanth Chekuri
0925ae73a9 chore: add traces statement builder base (#8020) 2025-05-25 22:14:47 +05:30
Vikrant Gupta
cffa511cf3 feat(user): support sso and api key (#8030)
* feat(user): support sso and api key

* feat(user): remove ee references from pkg

* feat(user): remove ee references from pkg

* feat(user): related client changes

* feat(user): remove the sso available check

* feat(user): fix go tests

* feat(user): move the middleware from ee to pkg

* feat(user): some more error code cleanup

* feat(user): some more error code cleanup

* feat(user): skip flaky UI tests

* feat(user): some more error code cleanup
2025-05-25 14:16:42 +05:30
Vibhu Pandey
2ba693f040 chore(linter): add more linters and deprecate zap (#8034)
* chore(linter): add more linters and deprecate zap

* chore(linter): add more linters and deprecate zap

* chore(linter): add more linters and deprecate zap

* chore(linter): add more linters and deprecate zap
2025-05-25 11:40:39 +05:30
Vibhu Pandey
403630ad31 feat(signoz): compile time check for dependency injection (#8033) 2025-05-24 23:53:54 +05:30
Vibhu Pandey
93ca3fee33 fix(quickfilter): fix injection of quickfilter (#8031)
## 📄 Summary

fix injection of quickfilter
2025-05-24 22:00:12 +05:30
Vikrant Gupta
b1c78c2f12 feat(license): build license service (#7969)
* feat(license): base setup for license service

* feat(license): delete old manager and import to new

* feat(license): deal with features

* feat(license): complete the license service in ee

* feat(license): add sqlmigration for licenses

* feat(license): remove feature flags

* feat(license): refactor into provider pattern

* feat(license): remove the ff lookup interface

* feat(license): add logging to the validator functions

* feat(license): implement features for OSS build

* feat(license): fix the OSS build

* feat(license): lets blast frontend

* feat(license): fix the EE OSS build without license

* feat(license): remove the hardcoded testing configs

* feat(license): upgrade migration to 34

* feat(license): better naming and structure

* feat(license): better naming and structure

* feat(license): better naming and structure

* feat(license): better naming and structure

* feat(license): better naming and structure

* feat(license): better naming and structure

* feat(license): better naming and structure

* feat(license): integration tests

* feat(license): integration tests

* feat(license): refactor frontend

* feat(license): make frontend api structure changes

* feat(license): fix integration tests

* feat(license): revert hardcoded configs

* feat(license): fix integration tests

* feat(license): address review comments

* feat(license): address review comments

* feat(license): address review comments

* feat(license): address review comments

* feat(license): update migration

* feat(license): update migration

* feat(license): update migration

* feat(license): fixed logging

* feat(license): use the unmarshaller for postable subscription

* feat(license): correct the error message

* feat(license): fix license test

* feat(license): fix lint issues

* feat(user): do not kill the service if upstream is down
2025-05-24 19:14:29 +05:30
Piyush Singariya
7feb94e5eb feat: Introducing SNS integration (AWS) (#7996)
* feat: introducing SNS integrations

* fix: panel title updated
2025-05-23 11:38:27 +00:00
Vibhu Pandey
47dc2b98f1 chore(go-lint): enable go-lint (#8022) 2025-05-23 15:52:58 +05:30
Srikanth Chekuri
f4dc2a8fb8 chore: remove telemetrytests package and add generic type for aggregation (#8019) 2025-05-23 09:29:15 +00:00
Nityananda Gohain
77d1492aac fix: allow non expireable API key (#8013)
* fix: allow non expireable API key

* fix: clean up pat to API key middleware

* fix: address comments

* fix: update response of create api key

* fix: gettable struct

* fix(api-key): frontend changes for api key refactor

---------

Co-authored-by: vikrantgupta25 <vikrant@signoz.io>
2025-05-23 07:47:20 +00:00
Piyush Singariya
6090a6be6e feat: ElastiCache AWS Integration (#7923)
* feat: adding elastiCache

* chore: removing AWS unnecessary prefix

* chore: update in units in 2 panels
2025-05-23 06:07:29 +00:00
Srikanth Chekuri
eabddf87d2 fix: time shift not working with fill gaps (#7999) 2025-05-23 09:33:47 +05:30
Vibhu Pandey
9e13245d1b feat(emailing): add smtp and emailing (#7993)
* feat(emailing): initial commit for emailing

* feat(emailing): implement emailing

* test(integration): fix tests

* fix(emailing): fix directory path

* fix(emailing): fix email template path

* fix(emailing): copy from go-gomail

* fix(emailing): copy from go-gomail

* fix(emailing): fix smtp bugs

* test(integration): fix tests

* feat(emailing): let missing templates passthrough

* feat(emailing): let missing templates passthrough

* feat(smtp): refactor and beautify

* test(integration): fix tests

* docs(smtp): fix incorrect grammer

* feat(smtp): add to header

* feat(smtp): remove comments

* chore(smtp): address comments

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-05-22 18:31:52 +00:00
Nityananda Gohain
a1c7a948fa fix: add error message in login (#8010)
* fix: add error message in login

* fix: use newf
2025-05-22 16:23:54 +00:00
Vikrant Gupta
0bfe53a93c chore(cache): use sensible defaults for caching (#8011)
* chore(cache): use sensible defaults for caching

* chore(cache): initialize the cache for http handlers

* chore(cache): revert cache DI
2025-05-22 11:46:09 +00:00
Shaheer Kochai
16140991be refactor: update logs explorer pagination logic (#7010)
* refactor: pagination changes in query range custom hook

* refactor: handle pagination changes in k8s logs and host logs

* refactor: logs panel component pagination changes

* fix: handle resetting offset on changing page size

* fix: optimize context log rendering and prevent duplicate logs

* chore: revert pagination handling changes for logs context view

* chore: remove unused function and prop

* refactor: handle the updated pagination logic in k8s entity events

* refactor: refactor queryPayload generation in logs pagination custom hook

* fix: remove handling for last log timestamp being sent with query_range

* chore: remove the unnecessary last log related changes from LogsExplorerViews

* Revert "fix: optimize context log rendering and prevent duplicate logs"

This reverts commit ad9ef188651e5106cbc84fe40fc36061c2b9fd40.

* fix: prevent recalculating the start/end timestamps while fetching next/prev pages

* chore: logs explorer pagination tests

* fix: rewrite test and mock scroll

* chore: use real timers to detect the start/end timestamps mismatch + enhance tests

* refactor: extract filters and order by into a separate function

* chore: add tests for logs context pagination

* chore: write tests for host logs pagination

* chore: overall improvements to host logs tests

* chore: k8s entity logs pagination tests

* chore: reset capturedQueryRangePayloads in beforeEach

* chore: dashboard logs panel component pagination tests

* fix: fix the breaking logs pagination test by change /v3 to /v4

* chore: remove the unused prop from HostMetricsLogs
2025-05-22 09:42:51 +00:00
Nityananda Gohain
aadf2a3ac7 fix: logs window based pagination to pageSize offset instead of using… (#6830)
* fix: logs window based pagination to pageSize offset instead of using id filter

* fix: add support for asc

* fix: remove unwanted code

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-05-22 15:05:41 +05:30
Nityananda Gohain
824302be38 chore: update API key (#7959)
* chore: update API key

* fix: delete api key on user delete

* fix: migration

* fix: api

* fix: address comments

* fix: address comments

* fix: update structs

* fix: minor changes

* fix: error message

* fix: address comments

* fix: integration tests

* fix: minor issues

* fix: integration tests
2025-05-21 17:21:19 +05:30
primus-bot[bot]
8d4c4dc5f2 chore(release): bump to v0.84.1 (#7998)
#### Summary
 - Release SigNoz v0.84.1
2025-05-21 13:49:38 +05:30
Ekansh Gupta
91fae8c0f3 fix: changed the get request access from admin to view (#7997) 2025-05-21 08:01:34 +00:00
primus-bot[bot]
4bbe8c0ee7 chore(release): bump to v0.84.0 (#7995)
#### Summary
 - Release SigNoz v0.84.0
2025-05-21 12:18:14 +05:30
Nityananda Gohain
0f7d226b9b Fix: exists clause in logs QB (#7987)
* fix: exists in logs QB

* fix: exists in logs json qb
2025-05-21 10:22:42 +05:30
Shaheer Kochai
e03342e001 feat: add support for request integrations in aws integrations page (#7968)
* feat: add support for request integrations in aws integrations page

* chore: write test for request aws integrations
2025-05-20 21:39:40 +05:30
Shaheer Kochai
57f96574ff fix: trace funnel bugfixes and improvements (#7922)
* fix: display the inter-step latency type in step metrics table

* chore: send latency type with n+1th step + make latency type optional

* fix: fetch and format get funnel steps overview metrics

* chore: remove dev env check

* fix: overall fixes

* fix: don't cache validate query + trigger validate on changing error and where clause as well

* fix: display the latency type in step overview metrics table + p99_latency to latency

* chore: revert dev env check removal (remove after BE changes are merged)

* fix: adjust create API response

* chore: useLocalStorage custom hook

* feat: improve the run funnel flow

- for the initial fetch of funnel results, require the user to run the funnel
- subsequently change the run funnel button to a refresh button
- display loading state while any of the funnel results APIs are being fetched

* fix: fix the issue of add step details breaking

* fix: refetch funnel details on rename success

* fix: redirect 'learn more' to trace funnels docs

* fix: handle potential undefined step in latency type calculation

* fix: properly handle incomplete steps state

* fix: fix the edge case of stale validation state on transitioning from invalid steps to valid steps

* fix: remove the side effect from render and move to useEffect
2025-05-20 19:44:05 +04:30
Yunus M
354e4b4b8f feat: show pricing update banner in home page (#7990)
* feat: show pricing update banner in  home page
2025-05-20 19:40:01 +05:30
Shaheer Kochai
d7102f69a9 feat: add support for S3 region buckets syncing (#7874)
* feat: add support for S3 region buckets syncing

* fix: hide the dropdown icon and not found dropdown for s3 buckets selector

* fix: display s3 buckets selector only for s3 sync service

* chore: tests for configure service s3 sync

---------

Co-authored-by: Piyush Singariya <piyushsingariya@gmail.com>
2025-05-20 13:32:32 +00:00
Aditya Singh
040c45b144 Custom Quick Filters: Logs (#7986)
* chore: quick filters - added filters init (#7867)

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

* Custom quick filter: Other Filters | Search integration (#7939)

* chore: added filters init

* chore: handle save and discard

* chore: search and api intergrations

* feat: search on filters

* feat: style fix

* feat: style fix

* feat: signal to data source config

* feat: search styles

* feat: update drawer slide style

* feat: no results state

* fix: minor fix

* Custom Quick FIlters: UI fixes and Announcement Tooltip (#7950)

* feat: qf setting ui

* feat: add skeleton to dynamic qf

* fix: minor fix

* feat: announcement tooltip added

* feat: announcement tooltip added refactor

* feat: announcement tooltip styles

* feat: announcement tooltip integration

* fix: number vals in filter list

* feat: announcement tooltip show logic added

* feat: light mode styles

* feat: remove unwanted styles

* feat: remove filter disable when one filter added

* style: minor style

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

* Custom quick filters: Tests and pr review comments (#7967)

* chore: added filters init

* chore: handle save and discard

* chore: search and api intergrations

* feat: search on filters

* feat: style fix

* feat: style fix

* feat: signal to data source config

* feat: search styles

* feat: update drawer slide style

* feat: no results state

* fix: minor fix

* feat: qf setting ui

* feat: add skeleton to dynamic qf

* fix: minor fix

* feat: announcement tooltip added

* feat: announcement tooltip added refactor

* feat: announcement tooltip styles

* feat: announcement tooltip integration

* fix: number vals in filter list

* feat: announcement tooltip show logic added

* feat: light mode styles

* feat: remove unwanted styles

* feat: remove filter disable when one filter added

* style: minor style

* fix: minor refactor

* test: added test cases

* feat: integrate custom quick filters in logs

* feat: code refactor

* feat: debounce search

* feat: refactor

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-05-20 12:39:49 +00:00
Sahil Khan
207d7602ab chore: changed name of api monitoring from third party apis to external apis (#7989) 2025-05-20 17:24:09 +05:30
Srikanth Chekuri
018346ca18 chore: add aggregation expr rewriter and exhaustive tests for logs filter (#7972) 2025-05-20 16:54:34 +05:30
Ekansh Gupta
7290ab3602 feat: added entry point operations api for the service overview page (#7957)
* feat: added entry point operations api for the service overview page

* feat: added entry point operations api for the service overview page

* feat: added entry point operations api for the service overview page

* feat: added entry point operations api for the service overview page

* feat: added entry point operations api for the service overview page

* feat: added entry point operations api for the service overview page

* feat: added entry point operations api for the service overview page

---------

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-05-20 05:58:40 +00:00
Shaheer Kochai
88239cec4d fix: AWS integration bugfixes (#7886)
* fix(AccountSettingsModal): add region deselect functionality to region selector

* fix(AWS integration): redirect help button to aws integration documentation

* style(Header): update button color on hover for improved visibility
2025-05-20 03:48:11 +00:00
SagarRajput-7
10ba0e6b4f feat: added error preview and warning text with info on cyclic dependency detected (#7893)
* feat: added error preview and warning text with info on cyclic dependency detected

* feat: removed console.log

* feat: restricted save when cycle found

* feat: added test cases for variableitem flow and update test cases

* feat: updated test cases

* feat: corrected the recent resolved title text
2025-05-19 17:28:26 +07:00
Shaheer Kochai
88e1e42bf0 chore: add analytics events for trace funnels (#7638) 2025-05-19 09:22:35 +00:00
Piyush Singariya
a0d896557e feat: introducing ECS + SQS integration (#7840)
* feat: introducing S3 sync as AWS integration

* chore: trying restructuring

* chore: in progress

* chore: restructuring looks ok

* chore: minor fix in tests

* feat: integration with Agent check-in complete

* chore: minor change in validation

* fix: removing validation and altering overview

* fix: aftermath of merge conflicts

* test: updating agent version

* test: updating agent version

* test: updating agent version 3

* test: updating agent version 11

* test: updating agent version 14

* chore: replace with newer error utility

* feat: introducing ECS integration (AWS Integrations)

* chore: adding metrics to ecs integration

* feat: adding base SQS files

* feat: adding metrics for SQS

* feat: adding ECS dashboard

* feat: adding dashboards for SQS

* fix: adding SentMessageSize metrics in SQS

* fix: for calculating log connection status for S3 Sync

* fix: adding check for svc type, fixing cw logs integration.json S3 Sync

* fix: in compiledCollectionStrat for servicetype s3sync

* test: testing agent version

* fix: change in data collected for S3 Sync logs

* test: testing agent 19

* chore: replace fmt.Errorf

* fix: tests and adding validation in S3 buckets

* fix: test TestAvailableServices

* chore: replacing fmt.Errorf

* chore: updating the agent version to latest

* chore: reverting some changes

* fix: remove services from Variables

* chore: change overview.png
2025-05-19 14:17:52 +05:30
Amlan Kumar Nandy
2b28c5f2e2 chore: persist the filters and time selection, modal open state in summary view (#7942) 2025-05-19 11:54:05 +07:00
Vibhu Pandey
6dbcc5fb9d fix(analytics): fix heartbeat event (#7975) 2025-05-19 08:04:33 +05:30
Vikrant Gupta
175e9a4c5e fix(apm): update the apdex to latest response structure (#7966)
* fix(apm): update the apdex to latest response structure

* fix(apm): update the apdex to latest response structure
2025-05-17 16:23:11 +05:30
SagarRajput-7
33506cafce fix: cover the title as reactNode case for useGetResolvedText (#7965)
* fix: cover the title as reactNode case for useResolvedText

* fix: added more test cases
2025-05-16 22:15:19 +00:00
SagarRajput-7
e34e61a20d feat: removed allow clear icon from when ALL option is selected (#7894) 2025-05-17 00:48:41 +05:30
Vibhu Pandey
da084b4686 chore(savedview|apdex|dashboard): create modules and handlers (#7960)
* chore(savedview): refactor into module and handler

* chore(rule): move telemetry inside telemetry

* chore(apdex): refactor apdex and delete dao

* chore(dashboard): create a dashboard module

* chore(ee): get rid of the init nonesense

* chore(dashboard): fix err and apierror confusion

* chore: address comments
2025-05-17 00:15:00 +05:30
Srikanth Chekuri
6821efeb99 chore: move visitor impl out of generated files (#7956) 2025-05-16 14:47:23 +00:00
Srikanth Chekuri
c5d5c84a0e chore: add fieldmapper implementation (#7955) 2025-05-16 20:09:57 +05:30
SagarRajput-7
9c298e83a5 feat: added user role restriction on crud for planned downtime feat (#7896) 2025-05-16 16:59:52 +05:30
SagarRajput-7
9383b6576d feat: added variable description icon and details on tooltip (#7897) 2025-05-16 16:46:19 +05:30
SagarRajput-7
f10f7a806f feat: suggest and allow variables in panel title (#7898)
* feat: suggest and allow variables in panel title

* feat: refined the logic for suggestion and addition with $

* feat: added logic for panel title resolved string and added test cases

* feat: added support to full view
2025-05-16 16:35:11 +05:30
Srikanth Chekuri
03600f4d6f chore: add query builder types (#7940) 2025-05-16 00:00:01 +05:30
Srikanth Chekuri
9fbf111976 chore: less strict context for fetching field values (#7807) 2025-05-15 19:59:40 +05:30
Nityananda Gohain
b8dff86a56 fix: refresh token to access token (#7949)
* fix: fix refresh token to access token

* fix: update the if condition
2025-05-15 12:52:14 +05:30
Vikrant Gupta
f525647b40 chore(auth): refactor the client handlers in preparation for multi tenant login (#7902)
* chore: update auth

* chore: password changes

* chore: make changes in oss code

* chore: login

* chore: get to a running state

* fix: migration inital commit

* fix: signoz cloud intgtn tests

* fix: minor fixes

* chore: sso code fixed with org domain

* fix: tests

* fix: ee auth api's

* fix: changes in name

* fix: return user in login api

* fix: address comments

* fix: validate password

* fix: handle get domain by email properly

* fix: move authomain to usermodule

* fix: use displayname instead of hname

* fix: rename back endpoints

* fix: update telemetry

* fix: correct errors

* fix: test and fix the invite endpoints

* fix: delete all things related to user in store

* fix: address issues

* fix: ee delete invite

* fix: rename func

* fix: update user and update role

* fix: update role

* chore(api): update the api folder structure according to rest principles

* fix: login and invite changes

* chore(api): update the api folder structure according to rest principles

* chore(login): update the frontend according to the new APIs

* fix: return org name in users response

* chore(login): update the frontend according to the new APIs

* fix: update user role

* fix: nil check

* chore(login): update the frontend according to the new API

* fix: getinvite and update role

* fix: sso

* fix: getinvite use sso ctx

* fix: use correct sourceurl

* fix: getsourceurl from req payload

* chore(login): update the frontend according to the new API

* fix: update created_at

* fix: fix reset password

* chore(login): fixed reset password and bulk invites

* fix: sso signup and token password change

* fix: don't delete last admin

* fix: reset password and migration

* fix: migration

* chore(login): fix the unwanted throw statement and tsconfig

* fix: reset password for sso users

* fix: clean up invite

* chore(login): delete last admin user and reset password

* fix: migration

* fix: update claims and store code

* fix: use correct error

* fix: proper nil checks

* fix: make migration multitenant

* fix: address comments

* fix: minor fixes

* fix: test

* fix: rename reset password

* fix: set self restration only when sso endabled

* chore(auth): update the invite user API

* fix: integration tests

* fix: integration tests

* fix: integration tests

* fix: integration tests

* fix: integration tests

* fix: integration tests

* fix: integration tests

* chore(auth): update integration test

* fix: telemetry

---------

Co-authored-by: nityanandagohain <nityanandagohain@gmail.com>
2025-05-14 18:23:41 +00:00
Nityananda Gohain
0a2b7ca1d8 chore(auth): refactor the auth modules and handler in preparation for multi tenant login (#7778)
* chore: update auth

* chore: password changes

* chore: make changes in oss code

* chore: login

* chore: get to a running state

* fix: migration inital commit

* fix: signoz cloud intgtn tests

* fix: minor fixes

* chore: sso code fixed with org domain

* fix: tests

* fix: ee auth api's

* fix: changes in name

* fix: return user in login api

* fix: address comments

* fix: validate password

* fix: handle get domain by email properly

* fix: move authomain to usermodule

* fix: use displayname instead of hname

* fix: rename back endpoints

* fix: update telemetry

* fix: correct errors

* fix: test and fix the invite endpoints

* fix: delete all things related to user in store

* fix: address issues

* fix: ee delete invite

* fix: rename func

* fix: update user and update role

* fix: update role

* fix: login and invite changes

* fix: return org name in users response

* fix: update user role

* fix: nil check

* fix: getinvite and update role

* fix: sso

* fix: getinvite use sso ctx

* fix: use correct sourceurl

* fix: getsourceurl from req payload

* fix: update created_at

* fix: fix reset password

* fix: sso signup and token password change

* fix: don't delete last admin

* fix: reset password and migration

* fix: migration

* fix: reset password for sso users

* fix: clean up invite

* fix: migration

* fix: update claims and store code

* fix: use correct error

* fix: proper nil checks

* fix: make migration multitenant

* fix: address comments

* fix: minor fixes

* fix: test

* fix: rename reset password

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-05-14 23:12:55 +05:30
Sahil Khan
16938c6cc0 feat: added 10 minute time range for traces co-relation in api monitoring (#7815)
* feat: added 10 minute time range for traces co-relation in api monitoring

* feat: url sharing for domain details drawer w/o filters for endpoint stats

* feat: added endpoint details persistence

* feat: external services to api monitoring co relation - 0

* feat: added api co relations to other panels on external services

* fix: cosmetic fix

* feat: addded tests for url sharing utils

* feat: minor cosmetic changes

* fix: changed traces co relation window from 10 minutes to 5 minutes

* fix: minor copy changes

* fix: pr comments

* fix: minor bug fix

* fix: pr comments improvements
2025-05-14 11:42:45 +00:00
Piyush Singariya
81b8f93177 chore: remove unnecessary dependency (#7938) 2025-05-14 15:40:25 +05:30
Vibhu Pandey
96cfb607d1 chore(go): add go-deps workflow (#7936)
* feat(go): add go-deps workflow

* chore(go): fix dependencies
2025-05-14 09:00:53 +00:00
primus-bot[bot]
f526e887cc chore(release): bump to v0.83.0 (#7931)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-05-14 12:18:04 +05:30
Piyush Singariya
03ab6e704b feat: S3 Sync (AWS Integrations) (#7718) 2025-05-14 05:12:41 +05:30
Vishal Sharma
9c0134da54 feat: user pilot reload (#7905)
* feat: user pilot reload

* feat: added test cases

---------

Co-authored-by: SagarRajput-7 <sagar@signoz.io>
2025-05-13 23:28:01 +05:30
Ekansh Gupta
175b059268 fix: quick filter dependency injection issue in CE (#7918)
* fix: quick filter dependency injection issue in CE

* fix: quick filter dependency injection issue in CE
2025-05-13 19:45:48 +05:30
Nageshbansal
dfca5b13c0 fix: OSS telemetry for the number of services (#7908)
Fixes the OSS telemetry for sending the `Number of services` events.
2025-05-13 16:18:59 +05:30
Aditya Singh
ad392e81ff Fix: Update query_range api from v3 to v4 (Logs and Traces) (#7906)
* fix: change query range api from v3 to v4

* test: update test cases

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-05-13 09:54:37 +00:00
Yunus M
92ceefccee Update pull_request_template.md (#7865) 2025-05-13 09:41:16 +00:00
Vikrant Gupta
9cc4e1b56f chore(frontend): update the api folder structure (#7901)
* chore(api): update the api folder structure according to rest principles

* chore(api): update the api folder structure according to rest principles
2025-05-12 18:14:58 +05:30
Ekansh Gupta
3758ee7451 fix: changed the keys in the default quick filters to actual keys in … (#7863)
* fix: changed the keys in the default quick filters to actual keys in the v3.attributekeys

* fix: changed the keys in the default quick filters to actual keys in the v3.attributekeys

* fix: changed the keys in the default quick filters to actual keys in the v3.attributekeys

* fix: changed the keys in the default quick filters to actual keys in the v3.attributekeys

* fix: changed the keys in the default quick filters to actual keys in the v3.attributekeys
2025-05-12 16:20:47 +05:30
Vibhu Pandey
02b605d109 feat(analytics): add analytics package (#7808)
- add analytics package
2025-05-12 14:32:13 +05:30
Amlan Kumar Nandy
eb86aabf3e chore: improvements to the all attributes section in metric details (#7879) 2025-05-12 05:24:02 +00:00
Amlan Kumar Nandy
8810693bda chore: persist one chart per query toggle across refreshes and exports (#7884) 2025-05-12 05:16:13 +00:00
Shaheer Kochai
6334e09a60 feat: Funnel Details Page Base Structure (#7364)
* feat: funnels list page basic UI

* feat: get funnels list data from mock API, and handle data, loading and empty states

* feat: implement funnel rename

* chore: move useFunnels to hooks/TracesFunnels

* feat: create traces funnels details basic page + funnel -> details redirection

* fix: properly display created at in funnels list item + preventDefault

* chore: add tab bar to trace funnel details page

* chore: traces funnel details page overall skeleton

* chore: traces funnel details results skeleton

* fix: hide step count for add button only

* feat: funnel details page steps and configuration (#7424)

* chore: add a new tab for traces funnels

* feat: funnels list page basic UI

* feat: get funnels list data from mock API, and handle data, loading and empty states

* feat: implement funnel rename

* refactor: overall improvements

* feat: implement sorting in traces funnels list page

* feat: add sort column key and order to url params

* chore: move useFunnels to hooks/TracesFunnels

* feat: implement traces funnels search and refactor search and sort by extracting to custom hooks

* chore: overall improvements to rename trace funnel modal

* chore: make the rename input auto-focusable

* feat: handle create funnel modal

* feat: delete funnel modal and functionality

* fix: fix the layout shift in funnel item caused by getContainer={false}

* chore: overall improvements and use live api in traces funnels

* feat: create traces funnels details basic page + funnel -> details redirection

* fix: funnels traces light mode UI

* fix: properly display created at in funnels list item + preventDefault

* refactor: extract FunnelItemPopover into a separate component

* chore: hide funnel tab from traces explorer

* chore: add check to display trace funnels tab only in dev environment

* chore: improve funnels modals light mode

* chore: overall improvements

* fix: properly pass funnel details link

* chore: address PR review changes

* chore: add tab bar to trace funnel details page

* feat: funnel step UI with service, span, and where filters

* feat: build radio button component

* refactor: use the SignozRadioButton in funnel results -> step transitions radio buttons

* feat: inter step config (i.e. latency type) UI

* chore: improve steps header styles by removing divider width

* feat: funnel steps title, description, popover UI + pass data from API

* chore: update FilterSelect component to conditionally add url params and accept on change

* fix: fix funnel step where clause and update the state variables for filters

* chore: add support for isMultiple and fix the type in FilterSelect

* feat: centralize the steps state management in StepsContent

* fix: move steps state up + pass steps count from state

* feat: implement auto save for updating the steps whenever any step changes

* feat: implement auto save for validating steps if service name or span names change

* feat: impelement funnel step removal

* feat: implement add details modal for funnel steps

* fix: fix the overflowing time range picker

* feat: funnel details empty state

* feat: add support for saving funnel description

* chore: overall improvements

* fix: fix the light mode styles

* fix: fix the failing build + broken search UI

* refactor: remove the reference of useLocation from traceFunnel item in TraceModulePage constant

* fix: fix the issue of update steps getting triggered on initial render if we have filters

* fix: fix the edge case of stale state causing filters to be re-added after removing

* feat: funnel details page results (#7451)

* feat: funnel metrics table component

* feat: funnel metrics and steps transition metrics components UI

* feat: funnel table component

* feat: slowest traces and traces with error components

* fix: overall light theme fixes

* fix: fix the warning

* chore: add empty and loading states to FunnelMetricsTable

* feat: get overall funnel metrics from the API

* fix: fix the empty state of funnel metrics table

* feat: get data for slowest traces and traces with errors

* fix: link trace id to trace details page

* fix: get data for funnel step transition metrics and refactor the existing data fetching logic

* refactor: add funnel context + overall refactoring and optimizations

* refactor: move steps states to funnel context + handle empty and run funnel disabled states

* feat: handle run funnel

* fix: improve empty state

* chore: rename isValidateStepsMutationLoading -> isValidateStepsLoading

* chore: improve query key

* fix: display loading state if funnel results are fetching

* refactor: move steps validation fetching and states to the context API

* fix: display loading state in funnel results while steps validation is fetching

* fix: call validate steps API only on changing the service name or span name of any step

* refactor: move validateStepsQuery key out of useEffect and update the dependencies

* chore: centralize hasIncompleteSteps and run validate only if steps have service and spans

* fix: handle all empty fields state + overall improvements

* fix: handle long where query tags

* feat: build the funnel result graph component

* feat: build the funnel result graph component

* feat: handle loading, error, empty states in funnel graph

* fix: don't display change percentage if % is 0

* refactor: overall improvements

* feat: get funnel steps graph data from API + move logic to custom hook

* fix: improve empty and error states

* fix: handle funnel graph legends width using css

* fix: redirect to trace funnels list page on clicking delete from funnel details

* fix: update the query cache while updating steps

* fix: implement debounced search for funnel list search

* fix: refetch steps graph data query on clicking run funnel / sync button

* fix: improve the step footer spacing

* chore: add gap between divider to inter-step-config

* fix: handle loading state while fetching

* feat: add span to funnel flow (from trace details page) (#7477)

* chore: display add to funnel icon on hovering any span in trace details page

* chore: add className to funnel item actions popover

* feat: add funnels tab to trace details v2 tab bar

* feat: add span to funnel flow

* chore: hide actions popover button from funnel item in span -> funnel flows

* chore: improve the funnel details UI in add span to funnel modal

* fix: display empty state + don't redirect to funnels list on delete success + overall improvements

* chore: add null check

* fix: display add to funnel button based on feature flag

* fix: display funnels tab in trace details based on feature flag

* fix: remove maxTagCount

* feat: change ms to ns

* chore: address review comments

* chore: remove feature flag and display trace funnels only in dev envirnoment

* fix: handle restoring steps if updating funnel steps fail

* refactor: update the get and delete funnel endpoints to adjust to the BE changes (#7697)

* refactor: address review comments

* fix: handle nested funnel response structure to fix missing funnel_id… (#7740)

* fix: handle nested funnel response structure to fix missing funnel_id in updates

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: remove console.og

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: revert explicitly passing funnelId to updateFunnelSteps

---------

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
Co-authored-by: ahmadshaheer <ashaheerki@gmail.com>

* chore: fix api endpoint

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* refactor: incorporate the recent funnel details API changes (#7760)

* chore: trace funnels feedback changes (#7772)

* chore: change the copy from x traces to valid traces found / not found

* chore: add open funnel button in add span to funnel modal

* feat: display buttons for adding step details and funnel description + copy to clipboard

* feat: highlight funnel graph column based on selected (total / error span) from the legend items

* chore: trace funnel changes (#7780)

* refactor: handle funnels list search on frontend

* refactor: use funnel steps update API for adding / updating step title and description

* feat: allow selecting user's typed option in trace funnel service and span name dropdowns

* chore: properly render the -> between steps in funnel results

* fix: sync funnel step name with add details modal text fields

---------

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
Co-authored-by: Yunus M <myounis.ar@live.com>
Co-authored-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-05-12 10:16:26 +05:30
Vikrant Gupta
1d379931b2 chore(integration-test): remove the outdated-setup (#7887)
* chore(integration-test): remove the outdated-setup

* chore(integration-test): remove the outdated-setup
2025-05-11 17:10:55 +05:30
dependabot[bot]
815a6d13c5 chore(deps): bump @babel/helpers from 7.21.0 to 7.26.10 in /frontend (#7272)
Bumps [@babel/helpers](https://github.com/babel/babel/tree/HEAD/packages/babel-helpers) from 7.21.0 to 7.26.10.
- [Release notes](https://github.com/babel/babel/releases)
- [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md)
- [Commits](https://github.com/babel/babel/commits/v7.26.10/packages/babel-helpers)

---
updated-dependencies:
- dependency-name: "@babel/helpers"
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-11 16:38:45 +05:30
Vibhu Pandey
59af9d1c2f chore(go): upgrade to 1.23 (#7885) 2025-05-11 14:09:24 +05:30
dependabot[bot]
19d24da147 chore(deps): bump @babel/runtime from 7.21.0 to 7.26.10 in /frontend (#7289)
Bumps [@babel/runtime](https://github.com/babel/babel/tree/HEAD/packages/babel-runtime) from 7.21.0 to 7.26.10.
- [Release notes](https://github.com/babel/babel/releases)
- [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md)
- [Commits](https://github.com/babel/babel/commits/v7.26.10/packages/babel-runtime)

---
updated-dependencies:
- dependency-name: "@babel/runtime"
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-09 23:28:01 +05:30
Vibhu Pandey
cd1c9ddf11 fix(dashboards): fix lock/unlock functionality (#7880)
Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-05-09 16:44:57 +00:00
Sahil Khan
7ff3286c9c fix: added prismjs 1.30.0 in resolutions (#7872) 2025-05-09 21:47:22 +05:30
Amlan Kumar Nandy
27830742f9 fix: resolve typescript issues in metrics explorer (#7878) 2025-05-09 18:55:43 +05:30
Vikrant Gupta
39f07e7477 chore(error): update the channels module to use the new api errors (#7856)
* chore(error): integrate new errors for channels create and test

* chore(error): update all the channel APIs

* chore(error): update the edit org http issue

* chore(error): fix create channel test

* chore(error): fix create channel test

* chore(error): fix create channel test

* chore(error): fix create channel test

* chore(error): remove console logs
2025-05-09 07:56:47 +00:00
Amlan Kumar Nandy
0ab50da7b0 chore: change graph list layout to grid in explorer view (#7852) 2025-05-09 05:38:51 +00:00
Amlan Kumar Nandy
c03541cd6c chore: improve error handling and loading states in summary view of metrics explorer (#7862) 2025-05-09 04:41:41 +00:00
Amlan Kumar Nandy
727a039eb9 fix: fix 'open in explorer' functionality in metrics explorer (#7873) 2025-05-09 11:34:32 +07:00
Yunus M
c7db85f44c Update CODEOWNERS (#7861) 2025-05-08 08:22:41 +00:00
Vikrant Gupta
08d9a74055 fix(api-key): make the expires in human readable in api keys (#7864)
* fix(api-key): human readable expires in

* fix(api-key): human readable expires in
2025-05-08 08:14:02 +00:00
Ekansh Gupta
503e4cdf00 Feat trace ordering on the basis of span_count or Trace_duration (#7842)
* feat: added order by span_count in traces tab

* feat: added order by span_count in traces tab

* feat: added order by span_count in traces tab

* feat: added order by span_count in traces tab

* feat: added order by span_count in traces tab

* feat: added order by span_count in traces tab

---------

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-05-08 06:38:41 +00:00
Srikanth Chekuri
224f952da7 chore: add notification for upcoming migration for cloud region IN users (#7848) 2025-05-07 13:41:41 +00:00
Vikrant Gupta
0c28067f89 feat(error): base setup for error handling in frontend (#7851)
* feat(login): add error response v2 and error handler v2

* feat(error): added the base error class

* feat(error): added the base error class

* feat(error): remove unnecessary code

* feat(error): fix types

* feat(error): add http status code helper
2025-05-07 16:31:20 +05:30
Vibhu Pandey
8dc749b9dd fix(migration): fix cascading drops in sqlite (#7844)
* fix(foreign-key): fix cascading drops in sqlite

* fix(foreign-key): fix comments

* fix(foreign-key): fix function names

* fix(foreign-key): fix order of migration

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-05-07 08:18:13 +00:00
Prashant Shahi
82a111e5b1 chore(signoz): remove deprecated signoz arguments (#7849)
### Summary

- remove deprecated signoz arguments

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-05-07 07:15:37 +00:00
primus-bot[bot]
e2e6c65b4d chore(release): bump to v0.82.0 (#7847)
#### Summary
 - Release SigNoz v0.82.0
 - Bump SigNoz OTel Collector to v0.111.41

 Created by [Primus-Bot](https://github.com/apps/primus-bot)
2025-05-07 06:46:59 +00:00
Amlan Kumar Nandy
f01d21cbf2 feat: implement inspect feature for metrics explorer (#7549) 2025-05-07 05:18:56 +00:00
aniketio-ctrl
36886135d1 chore: disable writing to v2 tables and add signozclickhousemetrics in signozspanmetrics
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-05-07 03:26:01 +00:00
Vikrant Gupta
3648027576 fix(ruler): improve the user experience for rule id migration (#7841)
* fix(ruler): improve the user experience for rule id migration

* fix(ruler): improve the user experience for rule id migration
2025-05-06 22:37:59 +05:30
Shaheer Kochai
b80626f5e2 fix: add dark class to the elements when dark mode is enabled to support components library modes (#7607) 2025-05-06 15:44:26 +00:00
Shaheer Kochai
08579242eb fix: add hideSpanScopeSelector prop to QueryBuilderSearchV2 and hide from non qb consumers (#7716)
* feat: add hideSpanScopeSelector prop to QueryBuilderSearchV2 and hide from non qb consumers

* fix: update the tests to check rendering based on hideSpanScopeSelector

* feat: display span selector in exceptions page
2025-05-06 19:02:57 +04:30
aniketio-ctrl
6e0b50dd60 fix(7832): added filters in inspect metrics api (#7833)
* fix(7842): added filters in inspect metrics api

* fix(metrics-explorer): added check for 40 time series only
2025-05-06 07:06:12 +00:00
Aditya Singh
76ed58c481 Fix/logs issues main (#7758)
* fix: context log data fix in list view

* fix: fix query builder and quick filters in light mode

* chore: add desc

* chore: added test case

* fix: fix redirect url when not in logs view

* chore: minor fix

* chore: minor fix

* chore: minor test fix

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-05-06 11:02:40 +05:30
Shivanshu Raj Shrivastava
f4d029bd12 fix: correctly populate response_status (#7822)
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-05-05 13:57:09 +05:30
Amlan Kumar Nandy
b66af786e6 fix: description tooltip coming up twice in metrics list table (#7823) 2025-05-05 05:58:56 +00:00
Vibhu Pandey
5ad68a3310 docs(contributing): add sql docs (#7819)
### Summary

add sql docs
2025-05-04 02:23:44 +05:30
Vikrant Gupta
0f0693f6eb fix(ruler): scan orgIDs in string slice instead of valuer struct (#7818) 2025-05-04 00:04:20 +05:30
Ekansh Gupta
16e3c185e9 feat: quick_filter_fix (#7816)
* feat: quick_filter_fix

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters
2025-05-03 17:09:20 +00:00
Vibhu Pandey
8d6671e362 docs(contributing): add docs/contributing/go/readme (#7814)
* docs(readme): add docs/contributing/go/readme

* docs(readme): add docs/contributing/go/readme

* docs(readme): add errors package

* docs(readme): add errors package

* docs(readme): add errors package

* docs(readme): add errors package

* docs(readme): add errors package

* docs(readme): add errors package

* docs(readme): add errors package

* docs(readme): add errors package

* docs(readme): add errors package

* docs(readme): add errors package

* Update docs/contributing/go/errors.md

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* Update docs/contributing/go/errors.md

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-05-03 13:07:18 +00:00
Vikrant Gupta
5b237ee628 feat(cache): multi-tenant cache (#7805)
* feat(cache): remove the references of old cache

* feat(cache): add orgID in query range modules pt1

* feat(cache): add orgID in query range modules pt2

* feat(cache): add orgID in query range modules pt3

* feat(cache): preload metrics for all orgs

* feat(cache): fix ruler

* feat(cache): fix go build

* feat(cache): add orgID to rule

* feat(cache): fix tests

* feat(cache): address review comments

* feat(cache): use correct errors

* feat(cache): fix tests

* feat(cache): add the cache test package
2025-05-03 18:30:07 +05:30
Nageshbansal
cb08ce5e5d chore: updates os for Docker Engine Installation for redhat (#7809) 2025-05-03 10:06:16 +00:00
Ekansh Gupta
3fbc3dec48 feat: added changes related to custom options for quick filters (#7712)
* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added support for custom quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters

* feat: added changes related to custom options for quick filters
2025-05-02 22:39:26 +05:30
Vikrant Gupta
5b2f897a00 chore(ruler): remove the notification for rule ID migration (#7806) 2025-05-01 19:59:34 +05:30
Vibhu Pandey
73f57d8bee chore(codeowners): add codeowners for sqlmigration (#7779)
### Summary

- add codeowners for sqlmigration
2025-04-30 08:41:31 +00:00
Prashant Shahi
ab17bf3558 ci(build): include USERPILOT_KEY FE envs (#7777)
### Summary

- include USERPILOT_KEY FE envs in the build workflows

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-04-30 13:32:19 +05:30
primus-bot[bot]
eb5a1b76b8 chore(release): bump to v0.81.0 (#7776)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-04-30 12:19:26 +05:30
Shivanshu Raj Shrivastava
130ff925bd feat: adds error toggle in top error page (#7773)
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-04-30 11:05:30 +05:30
Sahil Khan
75d86cea60 fix: api monitoring cosmetic changes (#7771)
fix: minor changes
2025-04-29 21:26:07 +00:00
CheetoDa
cf451d335c feat: added new datasources (#7769) 2025-04-29 22:05:34 +05:30
Yunus M
e47c7cc17b feat: initialize sentry only once (#7768) 2025-04-29 16:01:28 +00:00
Srikanth Chekuri
629c54d3f9 fix: nil pointer error on failed to create rule (#7767) 2025-04-29 15:01:31 +00:00
sawhil
ed3026eeb5 fix: removed unused file 2025-04-29 20:21:12 +05:30
Sahil Khan
ccf26883c4 chore: api monitoring tests (#7750)
* feat: added url sharing for main domain list page api monitoring

* feat: added shivanshus suggestions in qb payloads for spanid and kind string client filter

* fix: limited the endpoints table limit to 1000

* feat: date picker in domain details drawer

* feat: added top errors tab in domain details

* fix: removed console logs

* feat: new dep services top 10 errors localised date picker agrregate domain details etc

* feat: added domain level and endpoint level stats

* feat: added custom cell rendering in gridcard, added new table view in all endpoints

* feat: added port column in endpoints table

* feat: added custom title handling in gridtablecomponent

* fix: fixed the traces corelation query for status code bar charts

* feat: added zoom functionality on domain details charts

* chore: add constants for standardisation

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* chore: add constants for standardisation in the API

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* feat: add tooltip to Endpoint Overview

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* feat: api monitoring feedback till 28th april

* feat: added top errors to traces corelation

* feat: added new rate col to status code table

* feat: custom color mapping for uplot tooltip implemented

* chore: added ApiMonitoringPage.test

* chore: added uts for all endpoints, top errors and their utils

* fix: minor fix

* chore: moved test files to proper folder

* chore: added endpoint details uts and its imported utils ut

* chore: added endpoint dropdown uts and its imported utils ut

* chore: added endpoint metrics uts and its imported utils ut

* chore: added dependent services uts and its imported utils ut

* chore: added status code bar chart uts and its imported utils ut

* chore: added status code table uts and its imported utils ut
2025-04-29 20:21:12 +05:30
sawhil
958924befe feat: custom color mapping for uplot tooltip implemented 2025-04-29 20:21:12 +05:30
sawhil
b70c570cdc feat: added new rate col to status code table 2025-04-29 20:21:12 +05:30
sawhil
42a026469b feat: added top errors to traces corelation 2025-04-29 20:21:12 +05:30
sawhil
6de0908a62 feat: api monitoring feedback till 28th april 2025-04-29 20:21:12 +05:30
Shivanshu Raj Shrivastava
fd21a4955e feat: add tooltip to Endpoint Overview
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-04-29 20:21:12 +05:30
Shivanshu Raj Shrivastava
3dce13d29f chore: add constants for standardisation in the API
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-04-29 20:21:12 +05:30
Shivanshu Raj Shrivastava
2ce4b60c55 chore: add constants for standardisation
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-04-29 20:21:12 +05:30
sawhil
c9888804cd feat: added zoom functionality on domain details charts 2025-04-29 20:21:12 +05:30
sawhil
413b0d9fae fix: fixed the traces corelation query for status code bar charts 2025-04-29 20:21:12 +05:30
sawhil
b24095236f feat: added custom title handling in gridtablecomponent 2025-04-29 20:21:12 +05:30
sawhil
21d239ce68 feat: added port column in endpoints table 2025-04-29 20:21:12 +05:30
sawhil
d6e4e3c5ed feat: added custom cell rendering in gridcard, added new table view in all endpoints 2025-04-29 20:21:12 +05:30
sawhil
552b103e8b feat: added domain level and endpoint level stats 2025-04-29 20:21:12 +05:30
sawhil
1123a9a93d feat: new dep services top 10 errors localised date picker agrregate domain details etc 2025-04-29 20:21:12 +05:30
sawhil
8b30e3cc5c fix: removed console logs 2025-04-29 20:21:12 +05:30
sawhil
b86e65d2ca feat: added top errors tab in domain details 2025-04-29 20:21:12 +05:30
sawhil
d5e2841083 feat: date picker in domain details drawer 2025-04-29 20:21:12 +05:30
sawhil
7dad5dcd17 fix: limited the endpoints table limit to 1000 2025-04-29 20:21:12 +05:30
sawhil
ac0b640146 feat: added shivanshus suggestions in qb payloads for spanid and kind string client filter 2025-04-29 20:21:12 +05:30
sawhil
e125d146b5 feat: added url sharing for main domain list page api monitoring 2025-04-29 20:21:12 +05:30
sawhil
a41ffceca4 fix: changed the error percentage calculation 2025-04-29 20:21:12 +05:30
sawhil
7edb047c0c fix: added support for group by sorting in endpoints table 2025-04-29 20:21:12 +05:30
sawhil
6504f2565b fix: fixed last seen sorting in endpoint table 2025-04-29 20:21:12 +05:30
sawhil
6b418a125b fix: changed error rate to error percentage 2025-04-29 20:21:12 +05:30
sawhil
36827a1667 fix: added fallback for undefined data and added support for sorting 2025-04-29 20:21:12 +05:30
sawhil
1118c56356 feat: new dep. services table added 2025-04-29 20:21:12 +05:30
sawhil
bd071e3e60 feat: added new queries to handle error rates of endpoints 2025-04-29 20:21:12 +05:30
sawhil
36f3a2e26d feat: added sorting and error rate to endpoints table 2025-04-29 20:21:12 +05:30
sawhil
fee7e96176 feat: added sorting in domains list page 2025-04-29 20:21:12 +05:30
Gabber235
ef4e3a30fb fix: black gap on on cloud in sidebar (#7383) (#7427)
Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-04-28 20:32:32 +00:00
Vikrant Gupta
39532d5da0 fix(zeus): build pipelines LD flags (#7754) 2025-04-28 19:40:22 +00:00
Vishal Sharma
4d216bae4d feat: init userpilot (#7579) 2025-04-28 18:42:14 +00:00
Vikrant Gupta
21563914c7 fix(ruler): telemetry for rules (#7751)
### Summary

- fix the telemetry for rules and notification channels 
- not adding bun as this needs to go away soon
2025-04-28 23:07:28 +05:30
Vibhu Pandey
accb77f227 chore(use-*): remove use-new-traces-schema and use-new-logs-schema flags (#7741)
### Summary

remove use-new-traces-schema and use-new-logs-schema flags
2025-04-28 21:01:35 +05:30
Vibhu Pandey
e73e1bd078 feat(zeus): add zeus package (#7745)
* feat(zeus): add zeus package

* feat(signoz): add DI for zeus

* feat(zeus): integrate with the codebase

* ci(make): change makefile

* ci: change workflows to point to the new zeus url

* Update ee/query-service/usage/manager.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* Update ee/query-service/license/manager.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: fix nil retriable

* fix: fix zeus DI

* fix: fix path of ldflag

* feat(zeus): added zeus integration tests

* feat(zeus): added zeus integration tests

* feat(zeus): format the pytest

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
Co-authored-by: vikrantgupta25 <vikrant.thomso@gmail.com>
Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-04-28 14:20:47 +00:00
Vikrant Gupta
940313d28b fix(organization): return display name instead of name for organization (#7747)
### Summary

- return display name instead of name for organization
2025-04-28 10:54:53 +00:00
Vibhu Pandey
9815ec7d81 chore: remove references to unused flags (#7739)
### Summary

remove references to unused flags
2025-04-28 09:27:26 +00:00
Vibhu Pandey
a7cad0f1a5 chore(conf): add clickhouse settings (#7743) 2025-04-27 14:26:36 +00:00
SagarRajput-7
a624b4758d chore: fix failing typecheck (#7742) 2025-04-27 19:51:05 +05:30
SagarRajput-7
ee5684b130 feat: added permission restriction for viewer for planned Maintaince (#7736)
* feat: added permission restriction for viewer for planned Maintaince

* feat: added test cases
2025-04-27 17:24:56 +05:30
SagarRajput-7
2f8da5957b feat: added custom single and multiselect components (#7497)
* feat: added new Select component for multi and single select

* feat: refactored code and added keyboard navigations in single select

* feat: different state handling in single select

* feat: updated the playground page

* feat: multi-select updates

* feat: fixed multiselect selection issues

* feat: multiselect cleanup

* feat: multiselect key navigation cleanup

* feat: added tokenization in multiselect

* feat: add on enter and handle duplicates

* feat: design update to the components

* feat: design update to the components

* feat: design update to the components

* feat: updated the playground page

* feat: edited playground data

* feat: edited styles

* feat: code cleanup

* feat: added shift + keys navigation and selection

* feat: improved styles and added darkmode styles

* feat: removed scroll bar hover style

* feat: added scroll bar on hover

* feat: added regex wrapper support

* feat: fixed right arrow navigation across chips

* feat: addressed all the single select feedbacks

* feat: addressed all the single select feedbacks

* feat: added only-all-toggle feat with ALL selection tag

* feat: remove clear, update footer info content and style and misc fixes

* feat: misc style fixes

* feat: added quotes exception to the multiselect tagging

* feat: removing demo page, and cleanup PR for reviews

* feat: resolved comments and refactoring

* feat: added test cases
2025-04-27 16:55:53 +05:30
dependabot[bot]
3f6f77d0e2 chore(deps): bump axios from 1.7.7 to 1.8.2 in /frontend (#7249)
Bumps [axios](https://github.com/axios/axios) from 1.7.7 to 1.8.2.
- [Release notes](https://github.com/axios/axios/releases)
- [Changelog](https://github.com/axios/axios/blob/v1.x/CHANGELOG.md)
- [Commits](https://github.com/axios/axios/compare/v1.7.7...v1.8.2)

---
updated-dependencies:
- dependency-name: axios
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-27 11:16:16 +00:00
Vibhu Pandey
5bceffbeaa fix: fix modules and handler (#7737)
* fix: fix modules and handler

* fix: fix sqlmigration package

* fix: fix other fmt issues

* fix: fix tests

* fix: fix tests
2025-04-27 16:38:34 +05:30
Vibhu Pandey
9e449e2858 feat(auth): drop group table (#7672)
### Summary

drop group table
2025-04-26 15:50:02 +05:30
Shivanshu Raj Shrivastava
b60588a749 chore: use count instead of count distinct (#7711)
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-04-25 21:42:05 +05:30
Vikrant Gupta
c322657666 feat(organization): schema changes for the organizations entity (#7684)
* feat(organization): add hname and alias for organization

* fix: boolean values are not shown in the list panel's column

* fix: moved logic to component level

* fix: added type

* fix: added test cases

* fix: added test cases

* chore: update copy webpack plugin

* Revert "fix: display same key with multiple data types in filter suggestions by enhancing the deduping logic (#7255)"

This reverts commit 1e85981a17.

* fix: use query search v2 for traces data source to handle multiple data types for the same key

* fix(QueryBuilderSearchV2): add user typed option if it doesn't exist in the payload

* fix(QueryBuilderSearchV2): increase the height of search dropdown for non-logs data sources

* fix: display span scope selector for trace data source

* chore: remove the span scope selector from qb search v1 and move the component to search v2

* fix: write test to ensure that we display span scope selector for traces data source

* fix: limit converting  ->   only to log data source

* fix: don't display empty suggestion if only spaces are typed

* chore: tests for span scope selector

* chore: qb search flow (key, operator, value) test cases

* refactor: fix the Maximum update depth reached issue while running tests

* chore: overall improvements to span scope selector tests

Resource attr filter: style fix and quick filter changes (#7691)

* chore: resource attr filter init

* chore: resource attr filter api integration

* chore: operator config updated

* chore: fliter show hide logic and styles

* chore: add support for custom operator list to qb

* chore: minor refactor

* chore: minor code refactor

* test: quick filters test suite added

* test: quick filters test suite added

* test: all errors test suite added

* chore: style fix

* test: all errors mock fix

* chore: test case fix and mixpanel update

* chore: color update

* chore: minor refactor

* chore: style fix

* chore: set default query in exceptions tab

* chore: style fix

* chore: minor refactor

* chore: minor refactor

* chore: minor refactor

* chore: test update

* chore: fix filter header with no query name

* fix: scroll fix

* chore: add data source traces to quick filters

* chore: replace div with fragment

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>

fix: handle rate operators for table panel (#7695)

* fix: handle rate operators for table panel

chore: fix error rate (#7701)

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>

* feat(organization): minor cleanups

* feat(organization): better naming for api and usecase

* feat(organization): better packaging for modules

* feat(organization): change hname to displayName

* feat(organization): update the migration to use dialect

* feat(organization): update the migration to use dialect

* feat(organization): update the migration to use dialect

* feat(organization): revert back to impl

* feat(organization): remove DI from organization

* feat(organization): address review comments

* feat(organization): address review comments

* feat(organization): address review comments

---------

Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-04-25 19:38:15 +05:30
CheetoDa
a1846c008a chore: added new datasources (#7659)
* chore: added new datasources

* chore: added integrations to json

* chore: added quickstart

* feat: handle internal redirects

* chore: update onboarding-config-with-links.json

* Update onboarding-config-with-links.json

---------

Co-authored-by: Yunus M <myounis.ar@live.com>
2025-04-24 02:08:59 +05:30
Amlan Kumar Nandy
a6824db622 fix: metric details something went wrong message (#7686) 2025-04-23 10:42:09 +00:00
Amlan Kumar Nandy
e6f69aa74c fix: query builder in metrics explorer picking up wrong datasource (#7676)
* fix: query builder in metrics explorer picking up wrong datasource

* chore: add UTs

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-04-23 10:36:33 +00:00
Srikanth Chekuri
a9c09f33cb chore: always add reserved vars (#7689) 2025-04-23 09:14:10 +00:00
Srikanth Chekuri
9eb2196617 chore: use attributes table for metric keys and values (#7680) 2025-04-23 09:05:56 +00:00
primus-bot[bot]
131759ec96 chore(release): bump to v0.80.0 (#7703)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-04-23 12:20:51 +05:30
Shivanshu Raj Shrivastava
365a3e250f chore: fix error rate (#7701)
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-04-22 20:26:37 +00:00
Nityananda Gohain
f3a1f3cc20 fix: handle rate operators for table panel (#7695)
* fix: handle rate operators for table panel
2025-04-22 19:09:08 +00:00
Aditya Singh
ae509b4ae9 Resource attr filter: style fix and quick filter changes (#7691)
* chore: resource attr filter init

* chore: resource attr filter api integration

* chore: operator config updated

* chore: fliter show hide logic and styles

* chore: add support for custom operator list to qb

* chore: minor refactor

* chore: minor code refactor

* test: quick filters test suite added

* test: quick filters test suite added

* test: all errors test suite added

* chore: style fix

* test: all errors mock fix

* chore: test case fix and mixpanel update

* chore: color update

* chore: minor refactor

* chore: style fix

* chore: set default query in exceptions tab

* chore: style fix

* chore: minor refactor

* chore: minor refactor

* chore: minor refactor

* chore: test update

* chore: fix filter header with no query name

* fix: scroll fix

* chore: add data source traces to quick filters

* chore: replace div with fragment

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-04-22 15:45:49 +00:00
Shaheer Kochai
43e2be0333 feat: use search v2 component for traces (#7537)
* Revert "fix: display same key with multiple data types in filter suggestions by enhancing the deduping logic (#7255)"

This reverts commit 1e85981a17.

* fix: use query search v2 for traces data source to handle multiple data types for the same key

* fix(QueryBuilderSearchV2): add user typed option if it doesn't exist in the payload

* fix(QueryBuilderSearchV2): increase the height of search dropdown for non-logs data sources

* fix: display span scope selector for trace data source

* chore: remove the span scope selector from qb search v1 and move the component to search v2

* fix: write test to ensure that we display span scope selector for traces data source

* fix: limit converting  ->   only to log data source

* fix: don't display empty suggestion if only spaces are typed

* chore: tests for span scope selector

* chore: qb search flow (key, operator, value) test cases

* refactor: fix the Maximum update depth reached issue while running tests

* chore: overall improvements to span scope selector tests
2025-04-22 15:24:03 +00:00
Yunus M
20a40b33ce chore: update copy webpack plugin (#7687)
* chore: update copy webpack plugin
2025-04-22 20:36:57 +05:30
sawhil
a9b07c4b47 feat: added test case for checking copying functionality 2025-04-22 15:18:38 +05:30
sawhil
2a5c7cc0ab feat: added test cases for copy span link functionality 2025-04-22 15:18:38 +05:30
sawhil
afb18b8142 fix: pr comments - used useSafeNavigate hook 2025-04-22 15:18:38 +05:30
sawhil
9a580915e6 fix: removed not required prop 2025-04-22 15:18:38 +05:30
sawhil
0944af3d31 feat: added copy span link support alongside span click expand in waterfall graph 2025-04-22 15:18:38 +05:30
SagarRajput-7
9338efcefc fix: boolean values are not shown in the list panel's column (#7668)
* fix: boolean values are not shown in the list panel's column

* fix: moved logic to component level

* fix: added type

* fix: added test cases

* fix: added test cases
2025-04-22 12:03:28 +05:30
sawhil
6b9e0ce799 fix: minor comment update 2025-04-21 12:45:04 +05:30
sawhil
d4c3c24849 feat: added test cases 2025-04-21 12:45:04 +05:30
sawhil
30d935a768 fix: used existing constant 2025-04-21 12:45:04 +05:30
sawhil
073d42c416 fix: removed timestamp and id from being passed to query from log details drawer 2025-04-21 12:45:04 +05:30
Aditya Singh
f11b9644cf Introduce new Resource Attribute FIlter in exceptions tab (#7589)
* chore: resource attr filter init

* chore: resource attr filter api integration

* chore: operator config updated

* chore: fliter show hide logic and styles

* chore: add support for custom operator list to qb

* chore: minor refactor

* chore: minor code refactor

* test: quick filters test suite added

* test: quick filters test suite added

* test: all errors test suite added

* chore: style fix

* test: all errors mock fix

* chore: test case fix and mixpanel update

* chore: color update

* chore: minor refactor

* chore: style fix

* chore: set default query in exceptions tab

* chore: style fix

* chore: minor refactor

* chore: minor refactor

* chore: minor refactor

* chore: test update

* chore: fix filter header with no query name

---------

Co-authored-by: Aditya Singh <adityasingh@Adityas-MacBook-Pro.local>
2025-04-21 05:41:05 +00:00
Yunus M
87922e9577 feat: show notification for alert rule ID logic change (#7673)
* feat: show notification for alert rule ID logic change

* Update frontend/src/container/Home/Home.tsx

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-04-19 14:25:07 +05:30
Prashant Shahi
8412727414 chore: deprecate stagingapp/testingapp (#7649)
### Summary

- deprecate stagingapp/testingapp workflows
- remove `docker-compose.testing.yaml`

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-04-18 15:31:52 +00:00
Srikanth Chekuri
f0a95503d9 chore: add API endpoints for fields keys and values (#7560) 2025-04-18 13:33:17 +00:00
Vikrant Gupta
16e0fa2eef feat(ruler): update the ruler and planned maintenance tables (#7535)
* feat(ruler): base setup for rules and planned maintenance tables

* feat(ruler): more changes for making ruler org aware

* feat(ruler): fix lint

* feat(ruler): update the edit planned maintenance function

* feat(ruler): local testing edits for planned maintenance

* feat(ruler): abstract store and types from rules pkg

* feat(ruler): abstract store and types from rules pkg

* feat(ruler): abstract out store and add migration

* feat(ruler): frontend changes and review comments

* feat(ruler): add back compareAndSelectConfig

* feat(ruler): changes for alertmanager matchers

* feat(ruler): addressed review comments

* feat(ruler): remove the cascade operations from rules table

* feat(ruler): update the template for alertmanager

* feat(ruler): implement the rule history changes

* feat(ruler): implement the rule history changes

* feat(ruler): implement the rule history changes

---------

Co-authored-by: Vibhu Pandey <vibhupandey28@gmail.com>
2025-04-18 00:04:25 +05:30
Yunus M
2fa944d254 feat: update callback url for billing (#7667) 2025-04-17 20:21:14 +05:30
Srikanth Chekuri
b0d19035a4 chore: add where clause visitor implementation for query expression (#7564) 2025-04-17 15:54:36 +05:30
Nityananda Gohain
054dea366e fix: proper formatting of floating point (#7653)
* fix: proper formatting of floating point

* fix: old trace

* fix: add tests

* fix: use strconv.FormatFloat instead
2025-04-17 06:02:41 +00:00
primus-bot[bot]
aaf0b597dc chore(release): bump to v0.79.1 (#7655)
#### Summary
 - Release SigNoz v0.79.1

 Created by [Primus-Bot](https://github.com/apps/primus-bot)

Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-04-17 00:02:05 +05:30
Prashant Shahi
19372c8194 ci(build): use unique cache key for the internal/public builds (#7654)
### Summary

- unique cache keys for the internal/public builds

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-04-16 23:37:05 +05:30
Vibhu Pandey
eb74adad44 test(integration): set the base for integration tests (#7606)
* test(integration): set the base for integration tests

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test

* ci: add ci pipeline for integration test
2025-04-16 18:54:05 +05:30
Srikanth Chekuri
d5c04e1342 chore: log original query failed to transform (#7641) 2025-04-16 14:40:54 +05:30
primus-bot[bot]
2b9632c8fd chore(release): bump to v0.79.0 (#7643)
#### Summary
 - Release SigNoz v0.79.0
 - Bump SigNoz OTel Collector to v0.111.39

 Created by [Primus-Bot](https://github.com/apps/primus-bot)

Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-04-16 13:36:31 +05:30
Prashant Shahi
24920ae903 chore(prereleaser): update cron schedule - 6:30AM UTC (#7640)
### Summary

- update preleaser cron schedule to 6:30AM UTC

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-04-16 07:20:47 +00:00
Prashant Shahi
6f096632a2 chore(build-staging): only include telemetry tunnel FE envs (#7637)
### Summary

- only include telemetry tunnel FE environment variables for the staging build

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-04-16 12:38:30 +05:30
Piyush Singariya
a42eacec4b chore: enhancing JSON Parser handling (#7591)
* feat: enhancing JSON Parser handling

* fix: updating collector version

* chore: updating go.mod reference for Collector

---------

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-04-16 11:24:59 +05:30
Nityananda Gohain
e723399f7f fix: add check for empty services (#7611) 2025-04-15 16:39:33 +00:00
Nityananda Gohain
48936bed9b chore: multitenancy in integrations (#7507)
* chore: multitenancy in integrations

* chore: multitenancy in cloud integration accounts

* chore: changes to cloudintegrationservice

* chore: rename migration

* chore: update scan function

* chore: update scan function

* chore: fix migration

* chore: fix struct

* chore: remove unwanted code

* chore: update scan function

* chore: migrate user and pat for integrations

* fix: changes to the user for integrations

* fix: address comments

* fix: copy created_at

* fix: update non revoked token

* chore: don't allow deleting pat and user for integrations

* fix: address comments

* chore: address comments

* chore: add checks for fk in dialect

* fix: service migration

* fix: don't update user if user is already migrated

* fix: update correct service config

* fix: remove unwanted code

* fix: remove migration for multiple same services which is not required

* fix: fix migration and disable disaboard if metrics disabled

* fix: don't use ee types

---------

Co-authored-by: Vikrant Gupta <vikrant@signoz.io>
2025-04-15 15:35:36 +00:00
Srikanth Chekuri
ee70474cc7 fix: missing receivers in json payload for legacy postableAlert (#7603) 2025-04-14 13:20:39 +00:00
Srikanth Chekuri
c3fa7144ee chore: add tag type filter support in attribute keys (#7522) 2025-04-14 18:43:15 +05:30
Nityananda Gohain
5dd02a5b8e fix: remove unnecssary code for email domain check error (#7566)
* fix: proper check for emailComponents

* fix: correct error handling
2025-04-14 11:15:21 +05:30
Srikanth Chekuri
c0f01e4cb9 chore: add metadatastore implementation for logs and traces (#7559)
* chore: add metadatastore implementation for logs and traces

* chore: use telemetrystore mock
2025-04-11 19:41:02 +05:30
Srikanth Chekuri
fed84cb50a chore: add condition builder attributes metadata (#7558) 2025-04-11 16:20:27 +05:30
Srikanth Chekuri
80545c4d07 chore: add materialized field extractor from table schema (#7557) 2025-04-11 15:53:55 +05:30
Srikanth Chekuri
0b1faec092 chore: add condition builder for span index v3 (#7556) 2025-04-11 15:13:04 +05:30
Srikanth Chekuri
ba6f31b1c3 chore: add virtual fields table (#7586) 2025-04-11 07:36:31 +05:30
Srikanth Chekuri
eed92978a4 chore: add non-json condition builder for logs v2 (#7555) 2025-04-10 18:23:01 +00:00
Prashant Shahi
41cbd316b5 Feat/staging (#7585)
### Summary

- Non-production build workflow using Primus
- Staging CD: new staging app and dev staging deployments
- cleanup used docker resources in stagingapp/testingapp machines

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-04-10 17:46:13 +05:30
Vishal Sharma
8d7d33393d feat: include tenant_url in event attributes for logging (#7582) 2025-04-10 15:17:14 +05:30
sawhil
8d143b44b1 feat: removed ff for tp-api-monitoring from fe - 1 2025-04-09 15:44:42 +05:30
sawhil
423aebd6eb feat: removed ff for tp-api-monitoring from fe 2025-04-09 15:44:42 +05:30
primus-bot[bot]
8d630707af chore(release): bump to v0.78.1 (#7573)
#### Summary
 - Release SigNoz v0.78.1

 Created by [Primus-Bot](https://github.com/apps/primus-bot)

Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-04-09 15:26:04 +05:30
Prashant Shahi
a5b52431b7 chore(build): include missing LDFLAGS in the community/enterprise builds (#7571)
### Summary

- include missing LDFLAGS in the community/enterprise builds

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-04-09 15:10:49 +05:30
primus-bot[bot]
0138d757c8 chore(release): bump to v0.78.0 (#7568)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-04-09 12:28:18 +05:30
SagarRajput-7
844195b84f Revert "fix: reevaluated newdashboard bool to ensure that widget doesn't exis…" (#7567)
This reverts commit e53d3d1269.
2025-04-09 10:38:31 +05:30
Srikanth Chekuri
8ff05b2e8f chore: add field type definitions for qb v5 (#7552) 2025-04-08 22:34:58 +05:30
Srikanth Chekuri
c8c56c544e chore: add generated parser files for go (#7538) 2025-04-08 13:52:40 +00:00
Vikrant Gupta
1c43655336 fix(ff): alert creation disabled for non metric alerts (#7561) 2025-04-08 13:21:59 +00:00
Prashant Shahi
c269c8c6b8 feat: publish signoz to multiple registry using primus (#7504)
### Summary

- publish signoz images to multiple registry using primus
- deprecate old build workflow

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-04-08 18:06:22 +05:30
Vibhu Pandey
3142b6cc6d chore(ff): remove some more unused ffs (#7532)
### Summary

remove unused feature flags
- ENTERPRISE_PLAN = 'ENTERPRISE_PLAN',
- BASIC_PLAN = 'BASIC_PLAN',
- ALERT_CHANNEL_SLACK = 'ALERT_CHANNEL_SLACK',
- ALERT_CHANNEL_WEBHOOK = 'ALERT_CHANNEL_WEBHOOK',
- ALERT_CHANNEL_PAGERDUTY = 'ALERT_CHANNEL_PAGERDUTY',
- ALERT_CHANNEL_OPSGENIE = 'ALERT_CHANNEL_OPSGENIE',
- ALERT_CHANNEL_MSTEAMS = 'ALERT_CHANNEL_MSTEAMS',
- CUSTOM_METRICS_FUNCTION = 'CUSTOM_METRICS_FUNCTION',
- QUERY_BUILDER_PANELS = 'QUERY_BUILDER_PANELS',
- QUERY_BUILDER_ALERTS = 'QUERY_BUILDER_ALERTS',
- DISABLE_UPSELL = 'DISABLE_UPSELL',
- OSS = 'OSS',
- QUERY_BUILDER_SEARCH_V2 = 'QUERY_BUILDER_SEARCH_V2',
- AWS_INTEGRATION = 'AWS_INTEGRATION',

remove ProPlan concept
2025-04-07 22:58:46 +05:30
Vibhu Pandey
58e141685a revert(smart): add smart trace detail search back (#7547)
### Summary

- Added the trace search endpoint back
- Enabled the `smart` algorithm for all users
- Added `"algo":"smart"` for telemetry events in the old endpoint
2025-04-07 17:19:39 +00:00
Srikanth Chekuri
e17f63a50c chore: error log on query error (#7553)
* chore: error log or query error

* chore: no error on context cancel
2025-04-07 15:15:49 +00:00
Vibhu Pandey
838ef5dcc5 feat(valuer): add string valuer (#7554) 2025-04-07 20:36:04 +05:30
SagarRajput-7
e53d3d1269 fix: reevaluated newdashboard bool to ensure that widget doesn't exist, incase of a prior PUT call (#7525)
* fix: reevaluated newdashboard bool to ensure that widget doesn't exist, incase of a prior PUT call

* fix: resolved comment
2025-04-07 15:31:08 +05:30
Vibhu Pandey
2330420c0d fix(querier): remove ff (#7531) 2025-04-05 18:08:06 +00:00
Vibhu Pandey
65ac277074 fix(duration|timestamp): remove duration/timestamp sort (#7530) 2025-04-05 23:26:50 +05:30
Vibhu Pandey
b7982ca348 fix(ff): remove feature interface from ruler (#7529)
### Summary

remove feature interface from ruler
2025-04-05 12:52:26 +00:00
dependabot[bot]
2748b49a44 chore(deps): bump github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 (#7401)
Bumps [github.com/golang-jwt/jwt/v5](https://github.com/golang-jwt/jwt) from 5.2.1 to 5.2.2.
- [Release notes](https://github.com/golang-jwt/jwt/releases)
- [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md)
- [Commits](https://github.com/golang-jwt/jwt/compare/v5.2.1...v5.2.2)

---
updated-dependencies:
- dependency-name: github.com/golang-jwt/jwt/v5
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-04-05 01:43:43 +00:00
Vibhu Pandey
7345027762 fix(ff): remove prefer rpm (#7528) 2025-04-04 23:38:16 +05:30
Vibhu Pandey
68f874e433 chore(ff): remove unused SMART_TRACE_DETAIL feature flag (#7527) 2025-04-04 20:28:54 +05:30
Vibhu Pandey
54a82b1664 fix(dashboards): remove ff interface (#7526) 2025-04-04 19:12:31 +05:30
Yunus M
93dc585145 fix: disable sidenav items for cloud users whose license has expired (#7524) 2025-04-04 18:33:55 +05:30
Vikrant Gupta
6a143efd2c feat(sqlmigration): update the user related tables according to new schema (#7518)
* feat(sqlmigration): update the alertmanager tables

* feat(sqlmigration): update the alertmanager tables

* feat(sqlmigration): make the preference package multi tenant

* feat(preference): address nit pick comments

* feat(preference): added the cascade delete for preferences

* feat(sqlmigration): update apdex and TTL status tables  (#7481)

* feat(sqlmigration): update the apdex and ttl tables

* feat(sqlmigration): register the new migration and rename table

* feat(sqlmigration): fix the ttl queries

* feat(sqlmigration): update the TTL and apdex tables

* feat(sqlmigration): update the TTL and apdex tables

* feat(sqlmigration): fix the reset password and pat tables (#7482)

* feat(sqlmigration): fix the reset password and pat tables

* feat(sqlmigration): revert PAT changes

* feat(sqlmigration): register and rename the new migration

* feat(sqlmigration): handle updates for user tables

* feat(sqlmigration): remove unwanted changes
2025-04-04 01:46:28 +05:30
Vikrant Gupta
0116eb20ab feat(sqlmigration): update apdex and TTL status tables (#7517)
* feat(sqlmigration): update the alertmanager tables

* feat(sqlmigration): update the alertmanager tables

* feat(sqlmigration): make the preference package multi tenant

* feat(preference): address nit pick comments

* feat(preference): added the cascade delete for preferences

* feat(sqlmigration): update apdex and TTL status tables  (#7481)

* feat(sqlmigration): update the apdex and ttl tables

* feat(sqlmigration): register the new migration and rename table

* feat(sqlmigration): fix the ttl queries

* feat(sqlmigration): update the TTL and apdex tables

* feat(sqlmigration): update the TTL and apdex tables
2025-04-04 01:36:47 +05:30
Vikrant Gupta
79e9d1b357 feat(preference): multi tenant preference module (#7516)
* feat(sqlmigration): update the alertmanager tables

* feat(sqlmigration): update the alertmanager tables

* feat(sqlmigration): make the preference package multi tenant

* feat(preference): address nit pick comments

* feat(preference): added the cascade delete for preferences
2025-04-04 01:25:24 +05:30
Vikrant Gupta
b89ce82e25 feat(sqlmigration): update the alertmanager tables (#7513)
* feat(sqlmigration): update the alertmanager tables
2025-04-03 17:56:49 +00:00
dependabot[bot]
b43a198fd8 chore(deps): bump github.com/expr-lang/expr from 1.16.9 to 1.17.0 (#7342)
Bumps [github.com/expr-lang/expr](https://github.com/expr-lang/expr) from 1.16.9 to 1.17.0.
- [Release notes](https://github.com/expr-lang/expr/releases)
- [Commits](https://github.com/expr-lang/expr/compare/v1.16.9...v1.17.0)

---
updated-dependencies:
- dependency-name: github.com/expr-lang/expr
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-04-03 08:17:40 +00:00
Srikanth Chekuri
b40ca4baf3 fix: do not crash service on panic during rule eval (#7514) 2025-04-03 13:13:58 +05:30
SagarRajput-7
8df77c9221 fix: fixed trace funnel - header style overriding other pages (#7512)
* fix: fixed trace funnel - header style overriding other pages

* fix: fixed trace funnel - header style overriding other pages

* fix: handled nesting
2025-04-03 07:29:39 +00:00
Srikanth Chekuri
f67555576f chore: add info icon tool tips for webhook/routing/integration key (#7405) 2025-04-03 09:40:41 +05:30
Srikanth Chekuri
f0a4c37073 fix: handle maintenance windows that cross day boundaries (#7494) 2025-04-02 20:48:01 +00:00
Vikrant Gupta
7972261237 Revert "fix: use search v2 component for traces data source & minor improveme…" (#7511)
This reverts commit d7a6607a25.
2025-04-03 01:15:20 +05:30
2195 changed files with 264977 additions and 53781 deletions

View File

@@ -1,5 +1,4 @@
services:
clickhouse:
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: clickhouse
@@ -24,7 +23,6 @@ services:
retries: 3
depends_on:
- zookeeper
zookeeper:
image: bitnami/zookeeper:3.7.1
container_name: zookeeper
@@ -41,9 +39,8 @@ services:
interval: 30s
timeout: 5s
retries: 3
schema-migrator-sync:
image: signoz/signoz-schema-migrator:0.111.29
image: signoz/signoz-schema-migrator:v0.129.0
container_name: schema-migrator-sync
command:
- sync
@@ -55,9 +52,8 @@ services:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
image: signoz/signoz-schema-migrator:0.111.29
image: signoz/signoz-schema-migrator:v0.129.0
container_name: schema-migrator-async
command:
- async

View File

@@ -0,0 +1,27 @@
services:
postgres:
image: postgres:15
container_name: postgres
environment:
POSTGRES_DB: signoz
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
healthcheck:
test:
[
"CMD",
"pg_isready",
"-d",
"signoz",
"-U",
"postgres"
]
interval: 30s
timeout: 30s
retries: 3
restart: on-failure
ports:
- "127.0.0.1:5432:5432/tcp"
volumes:
- ${PWD}/fs/tmp/var/lib/postgresql/data/:/var/lib/postgresql/data/

View File

@@ -0,0 +1,29 @@
services:
signoz-otel-collector:
image: signoz/signoz-otel-collector:v0.128.2
container_name: signoz-otel-collector-dev
command:
- --config=/etc/otel-collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
- "13133:13133" # health check extension
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:13133
interval: 30s
timeout: 5s
retries: 3
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"

View File

@@ -0,0 +1,96 @@
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: signozclickhousemetrics
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
enable_exp_histogram: true
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousetraces:
datasource: tcp://host.docker.internal:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
signozclickhousemetrics:
dsn: tcp://host.docker.internal:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://host.docker.internal:9000/signoz_logs
timeout: 10s
use_new_schema: true
service:
telemetry:
logs:
encoding: json
extensions:
- health_check
- pprof
pipelines:
traces:
receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [signozclickhousemetrics]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [signozclickhousemetrics]
logs:
receivers: [otlp]
processors: [batch]
exporters: [clickhouselogsexporter]

View File

@@ -1,6 +1,7 @@
.git
.github
.vscode
.devenv
README.md
deploy
sample-apps

33
.github/CODEOWNERS vendored
View File

@@ -2,12 +2,43 @@
# Owners are automatically requested for review for PRs that changes code
# that they own.
/frontend/ @YounixM
/frontend/ @SigNoz/frontend @YounixM
/frontend/src/container/MetricsApplication @srikanthccv
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
/deploy/ @SigNoz/devops
.github @SigNoz/devops
# Scaffold Owners
/pkg/config/ @grandwizard28
/pkg/errors/ @grandwizard28
/pkg/factory/ @grandwizard28
/pkg/types/ @grandwizard28
/pkg/valuer/ @grandwizard28
/cmd/ @grandwizard28
.golangci.yml @grandwizard28
# Zeus Owners
/pkg/zeus/ @vikrantgupta25
/ee/zeus/ @vikrantgupta25
/pkg/licensing/ @vikrantgupta25
/ee/licensing/ @vikrantgupta25
# SQL Owners
/pkg/sqlmigration/ @vikrantgupta25
/ee/sqlmigration/ @vikrantgupta25
/pkg/sqlschema/ @vikrantgupta25
/ee/sqlschema/ @vikrantgupta25
# Analytics Owners
/pkg/analytics/ @vikrantgupta25
/pkg/statsreporter/ @vikrantgupta25
# Querier Owners
/pkg/querier/ @srikanthccv
/pkg/variables/ @srikanthccv
/pkg/types/querybuildertypes/ @srikanthccv
/pkg/querybuilder/ @srikanthccv
/pkg/telemetrylogs/ @srikanthccv
/pkg/telemetrymetadata/ @srikanthccv
/pkg/telemetrymetrics/ @srikanthccv
/pkg/telemetrytraces/ @srikanthccv

View File

@@ -1,17 +1,72 @@
### Summary
## 📄 Summary
<!-- ✍️ A clear and concise description...-->
<!-- Describe the purpose of the PR in a few sentences. What does it fix/add/update? -->
#### Related Issues / PR's
---
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
## ✅ Changes
#### Screenshots
- [ ] Feature: Brief description
- [ ] Bug fix: Brief description
NA
---
<!-- ✍️ Add screenshots of before and after changes where applicable-->
## 🏷️ Required: Add Relevant Labels
#### Affected Areas and Manually Tested Areas
> ⚠️ **Manually add appropriate labels in the PR sidebar**
Please select one or more labels (as applicable):
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->
ex:
- `frontend`
- `backend`
- `devops`
- `bug`
- `enhancement`
- `ui`
- `test`
---
## 👥 Reviewers
> Tag the relevant teams for review:
- frontend / backend / devops
---
## 🧪 How to Test
<!-- Describe how reviewers can test this PR -->
1. ...
2. ...
3. ...
---
## 🔍 Related Issues
<!-- Reference any related issues (e.g. Fixes #123, Closes #456) -->
Closes #
---
## 📸 Screenshots / Screen Recording (if applicable / mandatory for UI related changes)
<!-- Add screenshots or GIFs to help visualize changes -->
---
## 📋 Checklist
- [ ] Dev Review
- [ ] Test cases added (Unit/ Integration / E2E)
- [ ] Manually tested the changes
---
## 👀 Notes for Reviewers
<!-- Anything reviewers should keep in mind while reviewing -->

View File

@@ -1,42 +0,0 @@
# Github actions
## Testing the UI manually on each PR
First we need to make sure the UI is ready
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
* This job will run until the PR is merged or closed to keep the local tunneling alive
- github will cancel this job if the PR wasn't merged after 6h
- if the job was cancel, go to the action and press `Re-run all jobs`
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
## Environment Variables
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
<table>
<tr>
<th> Variables </th>
<th> Description </th>
<th> Example </th>
</tr>
<tr>
<td> REPONAME </td>
<td> Provide the DockerHub user/organisation name of the image. </td>
<td> signoz</td>
</tr>
<tr>
<td> DOCKERHUB_USERNAME </td>
<td> Docker hub username </td>
<td> signoz</td>
</tr>
<tr>
<td> DOCKERHUB_TOKEN </td>
<td> Docker hub password/token with push permission </td>
<td> **** </td>
</tr>
<tr>
<td> SONAR_TOKEN </td>
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
<td> **** </td>
</tr>

83
.github/workflows/build-community.yaml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: build-community
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
defaults:
run:
shell: bash
env:
PRIMUS_HOME: .primus
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
jobs:
prepare:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.build-info.outputs.version }}
hash: ${{ steps.build-info.outputs.hash }}
time: ${{ steps.build-info.outputs.time }}
branch: ${{ steps.build-info.outputs.branch }}
steps:
- name: self-checkout
uses: actions/checkout@v4
- id: token
name: github-token-gen
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PRIMUS_APP_ID }}
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
- name: primus-checkout
uses: actions/checkout@v4
with:
repository: signoz/primus
ref: main
path: .primus
token: ${{ steps.token.outputs.token }}
- name: build-info
run: |
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
js-build:
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
needs: prepare
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
needs: [prepare, js-build]
secrets: inherit
with:
PRIMUS_REF: main
GO_VERSION: 1.23
GO_NAME: signoz-community
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./cmd/community
GO_BUILD_FLAGS: >-
-tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
-X github.com/SigNoz/signoz/pkg/version.variant=community
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./cmd/community/Dockerfile.multi-arch
DOCKER_MANIFEST: true
DOCKER_PROVIDERS: dockerhub

117
.github/workflows/build-enterprise.yaml vendored Normal file
View File

@@ -0,0 +1,117 @@
name: build-enterprise
on:
push:
tags:
- v*
defaults:
run:
shell: bash
env:
PRIMUS_HOME: .primus
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
jobs:
prepare:
runs-on: ubuntu-latest
outputs:
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
version: ${{ steps.build-info.outputs.version }}
hash: ${{ steps.build-info.outputs.hash }}
time: ${{ steps.build-info.outputs.time }}
branch: ${{ steps.build-info.outputs.branch }}
steps:
- name: self-checkout
uses: actions/checkout@v4
- id: token
name: github-token-gen
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PRIMUS_APP_ID }}
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
- name: primus-checkout
uses: actions/checkout@v4
with:
repository: signoz/primus
ref: main
path: .primus
token: ${{ steps.token.outputs.token }}
- name: build-info
id: build-info
run: |
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
- name: set-docker-providers
id: set-docker-providers
run: |
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
else
echo "providers=gcp" >> $GITHUB_OUTPUT
fi
- name: create-dotenv
run: |
mkdir -p frontend
echo 'CI=1' > frontend/.env
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> frontend/.env
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> frontend/.env
- name: cache-dotenv
uses: actions/cache@v4
with:
path: frontend/.env
key: enterprise-dotenv-${{ github.sha }}
js-build:
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
needs: prepare
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
JS_INPUT_ARTIFACT_PATH: frontend/.env
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
needs: [prepare, js-build]
secrets: inherit
with:
PRIMUS_REF: main
GO_VERSION: 1.23
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./cmd/enterprise
GO_BUILD_FLAGS: >-
-tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
DOCKER_MANIFEST: true
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}

128
.github/workflows/build-staging.yaml vendored Normal file
View File

@@ -0,0 +1,128 @@
name: build-staging
on:
push:
branches:
- main
pull_request:
types: [labeled]
defaults:
run:
shell: bash
env:
PRIMUS_HOME: .primus
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
jobs:
prepare:
runs-on: ubuntu-latest
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
outputs:
version: ${{ steps.build-info.outputs.version }}
hash: ${{ steps.build-info.outputs.hash }}
time: ${{ steps.build-info.outputs.time }}
branch: ${{ steps.build-info.outputs.branch }}
deployment: ${{ steps.build-info.outputs.deployment }}
steps:
- name: self-checkout
uses: actions/checkout@v4
- id: token
name: github-token-gen
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PRIMUS_APP_ID }}
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
- name: primus-checkout
uses: actions/checkout@v4
with:
repository: signoz/primus
ref: main
path: .primus
token: ${{ steps.token.outputs.token }}
- name: build-info
id: build-info
run: |
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
staging_label="${{ github.event.label.name }}"
if [[ "${staging_label}" == "staging:"* ]]; then
deployment=${staging_label#"staging:"}
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
deployment="staging"
else
echo "error: not able to determine deployment - please verify the PR label or the branch"
exit 1
fi
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
- name: create-dotenv
run: |
mkdir -p frontend
echo 'CI=1' > frontend/.env
echo 'TUNNEL_URL="${{ secrets.NP_TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.NP_TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'PYLON_APP_ID="${{ secrets.NP_PYLON_APP_ID }}"' >> frontend/.env
echo 'APPCUES_APP_ID="${{ secrets.NP_APPCUES_APP_ID }}"' >> frontend/.env
- name: cache-dotenv
uses: actions/cache@v4
with:
path: frontend/.env
key: staging-dotenv-${{ github.sha }}
js-build:
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
needs: prepare
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_INPUT_ARTIFACT_CACHE_KEY: staging-dotenv-${{ github.sha }}
JS_INPUT_ARTIFACT_PATH: frontend/.env
JS_OUTPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
needs: [prepare, js-build]
secrets: inherit
with:
PRIMUS_REF: main
GO_VERSION: 1.23
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./cmd/enterprise
GO_BUILD_FLAGS: >-
-tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.staging.signoz.cloud
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.staging.signoz.cloud
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
DOCKER_MANIFEST: true
DOCKER_PROVIDERS: gcp
staging:
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
secrets: inherit
needs: [prepare, go-build]
with:
PRIMUS_REF: main
GITHUB_ENVIRONMENT: staging
GITHUB_SILENT: true
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
GITHUB_EVENT_NAME: releaser
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"

View File

@@ -1,122 +0,0 @@
name: build
on:
push:
branches:
- main
tags:
- v*
jobs:
enterprise:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: setup
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: docker-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: create-env-file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
- name: github-ref-info
shell: bash
run: |
GH_REF=${{ github.ref }}
if [[ "${{ github.ref_type }}" == "tag" ]]; then
PREFIX="refs/tags/"
echo "GH_IS_TAG=true" >> $GITHUB_ENV
echo "GH_TAG=${GH_REF#$PREFIX}" >> $GITHUB_ENV
else
PREFIX="refs/heads/"
echo "GH_IS_TAG=false" >> $GITHUB_ENV
echo "GH_BRANCH_NAME=${GH_REF#$PREFIX}" >> $GITHUB_ENV
fi
- name: set-version
run: |
if [ '${{ env.GH_IS_TAG }}' == 'true' ]; then
echo "VERSION=${{ env.GH_TAG }}" >> $GITHUB_ENV
elif [ '${{ env.GH_BRANCH_NAME }}' == 'main' ]; then
echo "VERSION=latest" >> $GITHUB_ENV
else
echo "VERSION=${{ env.GH_BRANCH_NAME }}" >> $GITHUB_ENV
fi
- name: cross-compilation-tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: publish
run: make docker-buildx-enterprise
community:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: docker-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: github-ref-info
shell: bash
run: |
GH_REF=${{ github.ref }}
if [[ "${{ github.ref_type }}" == "tag" ]]; then
PREFIX="refs/tags/"
echo "GH_IS_TAG=true" >> $GITHUB_ENV
echo "GH_TAG=${GH_REF#$PREFIX}" >> $GITHUB_ENV
else
PREFIX="refs/heads/"
echo "GH_IS_TAG=false" >> $GITHUB_ENV
echo "GH_BRANCH_NAME=${GH_REF#$PREFIX}" >> $GITHUB_ENV
fi
- name: set-version
run: |
if [ '${{ env.GH_IS_TAG }}' == 'true' ]; then
echo "VERSION=${{ env.GH_TAG }}" >> $GITHUB_ENV
elif [ '${{ env.GH_BRANCH_NAME }}' == 'main' ]; then
echo "VERSION=latest" >> $GITHUB_ENV
else
echo "VERSION=${{ env.GH_BRANCH_NAME }}" >> $GITHUB_ENV
fi
- name: cross-compilation-tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: publish
run: make docker-buildx-community

View File

@@ -18,6 +18,7 @@ jobs:
with:
PRIMUS_REF: main
GO_TEST_CONTEXT: ./...
GO_VERSION: 1.23
fmt:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
@@ -26,6 +27,7 @@ jobs:
secrets: inherit
with:
PRIMUS_REF: main
GO_VERSION: 1.23
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
@@ -34,6 +36,16 @@ jobs:
secrets: inherit
with:
PRIMUS_REF: main
GO_VERSION: 1.23
deps:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-deps.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
GO_VERSION: 1.23
build:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
@@ -45,7 +57,7 @@ jobs:
- name: go-install
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.23"
- name: qemu-install
uses: docker/setup-qemu-action@v3
- name: aarch64-install

View File

@@ -36,7 +36,7 @@ jobs:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
CONFIG_PATH: cmd/community/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
@@ -58,7 +58,7 @@ jobs:
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.23"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
@@ -100,7 +100,7 @@ jobs:
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
WORKDIR: pkg/query-service
WORKDIR: cmd/community
steps:
- name: checkout
uses: actions/checkout@v4
@@ -122,7 +122,7 @@ jobs:
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.23"
# copy the caches from build
- name: get-sha

View File

@@ -33,8 +33,8 @@ jobs:
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> .env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> .env
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> .env
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> .env
- name: build-frontend
run: make js-build
- name: upload-frontend-artifact
@@ -50,7 +50,7 @@ jobs:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: ee/query-service/.goreleaser.yaml
CONFIG_PATH: cmd/enterprise/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
@@ -72,7 +72,7 @@ jobs:
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.23"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
@@ -135,7 +135,7 @@ jobs:
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.23"
# copy the caches from build
- name: get-sha

55
.github/workflows/integrationci.yaml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: integrationci
on:
pull_request:
types:
- labeled
pull_request_target:
types:
- labeled
jobs:
test:
strategy:
fail-fast: false
matrix:
src:
- bootstrap
- auth
- querier
sqlstore-provider:
- postgres
- sqlite
clickhouse-version:
- 24.1.2-alpine
- 25.5.6
schema-migrator-version:
- v0.128.1
postgres-version:
- 15
if: |
((github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))) && contains(github.event.pull_request.labels.*.name, 'safe-to-integrate')
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: python
uses: actions/setup-python@v5
with:
python-version: 3.13
- name: poetry
run: |
python -m pip install poetry==2.1.2
python -m poetry config virtualenvs.in-project true
cd tests/integration && poetry install --no-root
- name: run
run: |
cd tests/integration && \
poetry run pytest \
--basetemp=./tmp/ \
src/${{matrix.src}} \
--sqlstore-provider ${{matrix.sqlstore-provider}} \
--postgres-version ${{matrix.postgres-version}} \
--clickhouse-version ${{matrix.clickhouse-version}} \
--schema-migrator-version ${{matrix.schema-migrator-version}}

View File

@@ -1,10 +1,6 @@
name: prereleaser
on:
# schedule every wednesday 9:30 AM UTC (3pm IST)
schedule:
- cron: '30 9 * * 3'
# allow manual triggering of the workflow by a maintainer
workflow_dispatch:
inputs:

View File

@@ -1,16 +0,0 @@
name: remove-label
on:
pull_request_target:
types: [synchronize]
jobs:
remove:
runs-on: ubuntu-latest
steps:
- name: Remove label testing-deploy from PR
uses: buildsville/add-remove-label@v2.0.0
with:
label: testing-deploy
type: remove
token: ${{ secrets.GITHUB_TOKEN }}

62
.github/workflows/run-e2e.yaml vendored Normal file
View File

@@ -0,0 +1,62 @@
name: e2eci
on:
workflow_dispatch:
inputs:
userRole:
description: "Role of the user (ADMIN, EDITOR, VIEWER)"
required: true
type: choice
options:
- ADMIN
- EDITOR
- VIEWER
jobs:
test:
name: Run Playwright Tests
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: lts/*
- name: Mask secrets and input
run: |
echo "::add-mask::${{ secrets.BASE_URL }}"
echo "::add-mask::${{ secrets.LOGIN_USERNAME }}"
echo "::add-mask::${{ secrets.LOGIN_PASSWORD }}"
echo "::add-mask::${{ github.event.inputs.userRole }}"
- name: Install dependencies
working-directory: frontend
run: |
npm install -g yarn
yarn
- name: Install Playwright Browsers
working-directory: frontend
run: yarn playwright install --with-deps
- name: Run Playwright Tests
working-directory: frontend
run: |
BASE_URL="${{ secrets.BASE_URL }}" \
LOGIN_USERNAME="${{ secrets.LOGIN_USERNAME }}" \
LOGIN_PASSWORD="${{ secrets.LOGIN_PASSWORD }}" \
USER_ROLE="${{ github.event.inputs.userRole }}" \
yarn playwright test
- name: Upload Playwright Report
uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-report
path: frontend/playwright-report/
retention-days: 30

View File

@@ -1,56 +0,0 @@
name: staging-deployment
# Trigger deployment only on push to main branch
on:
push:
branches:
- main
jobs:
deploy:
name: Deploy latest main branch to staging
runs-on: ubuntu-latest
environment: staging
permissions:
contents: 'read'
id-token: 'write'
steps:
- id: 'auth'
uses: 'google-github-actions/auth@v2'
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
- name: 'sdk'
uses: 'google-github-actions/setup-gcloud@v2'
- name: 'ssh'
shell: bash
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
GCP_ZONE: ${{ secrets.GCP_ZONE }}
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
run: |
read -r -d '' COMMAND <<EOF || true
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
export OTELCOL_TAG="main"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
export KAFKA_SPAN_EVAL="true"
docker system prune --force
docker pull signoz/signoz-otel-collector:main
docker pull signoz/signoz-schema-migrator:main
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make docker-build-enterprise-amd64
export VERSION="${GITHUB_SHA:0:7}-amd64"
docker-compose -f deploy/docker/docker-compose.testing.yaml up --build -d
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

View File

@@ -1,56 +0,0 @@
name: testing-deployment
# Trigger deployment only on testing-deploy label on pull request
on:
pull_request:
types: [labeled]
jobs:
deploy:
name: Deploy PR branch to testing
runs-on: ubuntu-latest
environment: testing
if: ${{ github.event.label.name == 'testing-deploy' }}
permissions:
contents: 'read'
id-token: 'write'
steps:
- id: 'auth'
uses: 'google-github-actions/auth@v2'
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
- name: 'sdk'
uses: 'google-github-actions/setup-gcloud@v2'
- name: 'ssh'
shell: bash
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
GCP_ZONE: ${{ secrets.GCP_ZONE }}
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
run: |
read -r -d '' COMMAND <<EOF || true
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
export DEV_BUILD="1"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
docker system prune --force
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout main
git pull
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}
git checkout --track origin/${GITHUB_BRANCH}
make docker-build-enterprise-amd64
export VERSION="${GITHUB_SHA:0:7}-amd64"
docker-compose -f deploy/docker/docker-compose.testing.yaml up --build -d
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

150
.gitignore vendored
View File

@@ -60,14 +60,13 @@ ee/query-service/db
e2e/node_modules/
e2e/test-results/
e2e/playwright-report/
e2e/blob-report/
e2e/playwright/.cache/
e2e/.auth
# go
vendor/
**/main/**
__debug_bin**
# git-town
.git-branches.toml
@@ -80,6 +79,153 @@ deploy/common/clickhouse/user_scripts/
queries.active
# tmp
**/tmp/**
# .devenv tmp files
.devenv/**/tmp/**
.qodo
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json
# End of https://www.toptal.com/developers/gitignore/api/python

34
.golangci.yml Normal file
View File

@@ -0,0 +1,34 @@
linters:
default: standard
enable:
- bodyclose
- misspell
- nilnil
- sloglint
- depguard
- iface
- unparam
linters-settings:
sloglint:
no-mixed-args: true
kv-only: true
no-global: all
context: all
static-msg: true
msg-style: lowercased
key-naming-case: snake
depguard:
rules:
nozap:
deny:
- pkg: "go.uber.org/zap"
desc: "Do not use zap logger. Use slog instead."
iface:
enable:
- identical
issues:
exclude-dirs:
- "pkg/query-service"
- "ee/query-service"
- "scripts/"

17
.versions/alpine Normal file
View File

@@ -0,0 +1,17 @@
#### Auto generated by make docker-version-alpine. DO NOT EDIT! ####
amd64=029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85
unknown=5fea95373b9ec85974843f31446fa6a9df4492dddae4e1cb056193c34a20a5be
arm=b4aef1a899e0271f06d948c9a8fa626ecdb2202d3a178bc14775dd559e23df8e
unknown=a4d1e27e63a9d6353046eb25a2f0ec02945012b217f4364cd83a73fe6dfb0b15
arm=4fdafe217d0922f3c3e2b4f64cf043f8403a4636685cd9c51fea2cbd1f419740
unknown=7f21ac2018d95b2c51a5779c1d5ca6c327504adc3b0fdc747a6725d30b3f13c2
arm64=ea3c5a9671f7b3f7eb47eab06f73bc6591df978b0d5955689a9e6f943aa368c0
unknown=a8ba68c1a9e6eea8041b4b8f996c235163440808b9654a865976fdcbede0f433
386=dea9f02e103e837849f984d5679305c758aba7fea1b95b7766218597f61a05ab
unknown=3c6629bec05c8273a927d46b77428bf4a378dad911a0ae284887becdc149b734
ppc64le=0880443bffa028dfbbc4094a32dd6b7ac25684e4c0a3d50da9e0acae355c5eaf
unknown=bb48308f976b266e3ab39bbf9af84521959bd9c295d3c763690cf41f8df2a626
riscv64=d76e6fbe348ff20c2931bb7f101e49379648e026de95dd37f96e00ce1909dcf7
unknown=dd807544365f6dc187cbe6de0806adce2ea9de3e7124717d1d8e8b7a18b77b64
s390x=b815fadf80495594eb6296a6af0bc647ae5f193e0044e07acec7e5b378c9ce2d
unknown=74681be74a280a88abb53ff1e048eb1fb624b30d0066730df6d8afd02ba82e01

62
ADVOCATE.md Normal file
View File

@@ -0,0 +1,62 @@
# SigNoz Community Advocate Program
Our community is filled with passionate developers who love SigNoz and have been helping spread the word about observability across the world. The SigNoz Community Advocate Program is our way of recognizing these incredible community members and creating deeper collaboration opportunities.
## What is the SigNoz Community Advocate Program?
The SigNoz Community Advocate Program celebrates and supports community members who are already passionate about observability and helping fellow developers. If you're someone who loves discussing SigNoz, helping others with their implementations, or sharing knowledge about observability practices, this program is designed with you in mind.
Our advocates are the heart of the SigNoz community, helping other developers succeed with observability and providing valuable insights that help us build better products.
## What Do Advocates Do?
1. **Community Support**
- Help fellow developers in our Slack community and GitHub Discussions
- Answer questions and share solutions
- Guide newcomers through SigNoz self-host implementations
2. **Knowledge Sharing**
- Spread awareness about observability best practices on developer forums
- Create content like blog posts, social media posts, and videos
- Host local meetups and events in their regions
3. **Product Collaboration**
- Provide insights on features, changes, and improvements the community needs
- Beta test new features and provide early feedback
- Help us understand real-world use cases and pain points
## What's In It For You?
**Recognition & Swag**
- Official recognition as a SigNoz advocate
- Welcome hamper upon joining
- Exclusive swag box within your first 3 months
- Feature on our website (with your permission)
**Early Access**
- First look at new features and updates
- Direct line to the SigNoz team for feedback and suggestions
- Opportunity to influence product roadmap
**Community Impact**
- Help shape the observability landscape
- Build your reputation in the developer community
- Connect with like-minded developers globally
## How Does It Work?
Currently, the SigNoz Community Advocate Program is **invite-only**. We're starting with a small group of passionate community members who have already been making a difference.
We'll be working closely with our first advocates to shape the program details, benefits, and structure based on what works best for everyone involved.
If you're interested in learning more about the program or want to get more involved in the SigNoz community, join our [Slack community](https://signoz-community.slack.com/) and let us know!
---
*The SigNoz Community Advocate Program recognizes and celebrates the amazing community members who are already passionate about helping fellow developers succeed with observability.*

View File

@@ -78,3 +78,4 @@ Need assistance? Join our Slack community:
- Set up your [development environment](docs/contributing/development.md)
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
- Explore the [SigNoz Community Advocate Program](ADVOCATE.md), which recognises contributors who support the community, share their expertise, and help shape SigNoz's future.

View File

@@ -2,7 +2,7 @@ Copyright (c) 2020-present SigNoz Inc.
Portions of this software are licensed as follows:
* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
* All content that resides under the "ee/" and the "cmd/enterprise/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.

View File

@@ -10,27 +10,28 @@ COMMIT_SHORT_SHA ?= $(shell git rev-parse --short HEAD)
BRANCH_NAME ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
VERSION ?= $(BRANCH_NAME)-$(COMMIT_SHORT_SHA)
TIMESTAMP ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
ARCHS = amd64 arm64
ARCHS ?= amd64 arm64
TARGET_DIR ?= $(shell pwd)/target
ZEUS_URL ?= https://api.signoz.cloud
GO_BUILD_LDFLAG_ZEUS_URL = -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=$(ZEUS_URL)
LICENSE_URL ?= https://license.signoz.io/api/v1
GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=$(LICENSE_URL)
GO_BUILD_LDFLAG_ZEUS_URL = -X github.com/SigNoz/signoz/ee/zeus.url=$(ZEUS_URL)
LICENSE_URL ?= https://license.signoz.io
GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=$(LICENSE_URL)
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/cmd/community
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/cmd/enterprise
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
DOCKERFILE_COMMUNITY = $(SRC)/pkg/query-service/Dockerfile
DOCKERFILE_COMMUNITY = $(SRC)/cmd/community/Dockerfile
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
DOCKERFILE_ENTERPRISE = $(SRC)/ee/query-service/Dockerfile
DOCKERFILE_ENTERPRISE = $(SRC)/cmd/enterprise/Dockerfile
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
JS_BUILD_CONTEXT = $(SRC)/frontend
@@ -55,6 +56,22 @@ devenv-clickhouse: ## Run clickhouse in devenv
@cd .devenv/docker/clickhouse; \
docker compose -f compose.yaml up -d
.PHONY: devenv-postgres
devenv-postgres: ## Run postgres in devenv
@cd .devenv/docker/postgres; \
docker compose -f compose.yaml up -d
.PHONY: devenv-signoz-otel-collector
devenv-signoz-otel-collector: ## Run signoz-otel-collector in devenv (requires clickhouse to be running)
@cd .devenv/docker/signoz-otel-collector; \
docker compose -f compose.yaml up -d
.PHONY: devenv-up
devenv-up: devenv-clickhouse devenv-signoz-otel-collector ## Start both clickhouse and signoz-otel-collector for local development
@echo "Development environment is ready!"
@echo " - ClickHouse: http://localhost:8123"
@echo " - Signoz OTel Collector: grpc://localhost:4317, http://localhost:4318"
##############################################################
# go commands
##############################################################
@@ -68,11 +85,9 @@ go-run-enterprise: ## Runs the enterprise go backend server
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \
$(GO_BUILD_CONTEXT_ENTERPRISE)/main.go \
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go \
--config ./conf/prometheus.yml \
--cluster cluster \
--use-logs-new-schema true \
--use-trace-new-schema true
--cluster cluster
.PHONY: go-test
go-test: ## Runs go unit tests
@@ -88,11 +103,9 @@ go-run-community: ## Runs the community go backend server
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \
$(GO_BUILD_CONTEXT_COMMUNITY)/main.go \
$(GO_BUILD_CONTEXT_COMMUNITY)/*.go server \
--config ./conf/prometheus.yml \
--cluster cluster \
--use-logs-new-schema true \
--use-trace-new-schema true
--cluster cluster
.PHONY: go-build-community $(GO_BUILD_ARCHS_COMMUNITY)
go-build-community: ## Builds the go backend server for community
@@ -119,6 +132,18 @@ $(GO_BUILD_ARCHS_ENTERPRISE): go-build-enterprise-%: $(TARGET_DIR)
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
fi
.PHONY: go-build-enterprise-race $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
go-build-enterprise-race: ## Builds the go backend server for enterprise with race
go-build-enterprise-race: $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
$(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
@mkdir -p $(TARGET_DIR)/$(OS)-$*
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
@if [ $* = "arm64" ]; then \
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
else \
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
fi
##############################################################
# js commands
##############################################################
@@ -167,3 +192,20 @@ docker-buildx-enterprise: go-build-enterprise js-build
--platform linux/arm64,linux/amd64 \
--push \
--tag $(DOCKER_REGISTRY_ENTERPRISE):$(VERSION) $(SRC)
##############################################################
# python commands
##############################################################
.PHONY: py-fmt
py-fmt: ## Run black for integration tests
@cd tests/integration && poetry run black .
.PHONY: py-lint
py-lint: ## Run lint for integration tests
@cd tests/integration && poetry run isort .
@cd tests/integration && poetry run autoflake .
@cd tests/integration && poetry run pylint .
.PHONY: py-test
py-test: ## Runs integration tests
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/

View File

@@ -8,7 +8,6 @@
<p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p>
<p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
@@ -231,6 +230,8 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
- [Shaheer Kochai](https://github.com/ahmadshaheer)
- [Amlan Kumar Nandy](https://github.com/amlannandy)
- [Sahil Khan](https://github.com/sawhil)
- [Aditya Singh](https://github.com/aks07)
- [Abhi Kumar](https://github.com/ahrefabhi)
#### DevOps

View File

@@ -11,7 +11,7 @@ before:
builds:
- id: signoz
binary: bin/signoz
main: pkg/query-service/main.go
main: ./cmd/community
env:
- CGO_ENABLED=1
- >-
@@ -35,6 +35,7 @@ builds:
- -X github.com/SigNoz/signoz/pkg/version.hash={{ .ShortCommit }}
- -X github.com/SigNoz/signoz/pkg/version.time={{ .CommitTimestamp }}
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
- -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr
- >-
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
mod_timestamp: "{{ .CommitTimestamp }}"

View File

@@ -11,11 +11,9 @@ RUN apk update && \
COPY ./target/${OS}-${TARGETARCH}/signoz-community /root/signoz
COPY ./conf/prometheus.yml /root/config/prometheus.yml
COPY ./templates/email /root/templates
COPY frontend/build/ /etc/signoz/web/
RUN chmod 755 /root /root/signoz
ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]
ENTRYPOINT ["./signoz", "server"]

View File

@@ -0,0 +1,20 @@
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
FROM alpine@sha256:${ALPINE_SHA}
LABEL maintainer="signoz"
WORKDIR /root
ARG OS="linux"
ARG ARCH
RUN apk update && \
apk add ca-certificates && \
rm -rf /var/cache/apk/*
COPY ./target/${OS}-${ARCH}/signoz-community /root/signoz-community
COPY ./templates/email /root/templates
COPY frontend/build/ /etc/signoz/web/
RUN chmod 755 /root /root/signoz-community
ENTRYPOINT ["./signoz-community", "server"]

18
cmd/community/main.go Normal file
View File

@@ -0,0 +1,18 @@
package main
import (
"log/slog"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/pkg/instrumentation"
)
func main() {
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
// register a list of commands to the root command
registerServer(cmd.RootCmd, logger)
cmd.Execute(logger)
}

116
cmd/community/server.go Normal file
View File

@@ -0,0 +1,116 @@
package main
import (
"context"
"log/slog"
"time"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/SigNoz/signoz/pkg/zeus/noopzeus"
"github.com/spf13/cobra"
)
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
var flags signoz.DeprecatedFlags
serverCmd := &cobra.Command{
Use: "server",
Short: "Run the SigNoz server",
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
RunE: func(currCmd *cobra.Command, args []string) error {
config, err := cmd.NewSigNozConfig(currCmd.Context(), flags)
if err != nil {
return err
}
return runServer(currCmd.Context(), config, logger)
},
}
flags.RegisterFlags(serverCmd)
parentCmd.AddCommand(serverCmd)
}
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
// print the version
version.Info.PrettyPrint(config.Version)
// add enterprise sqlstore factories to the community sqlstore factories
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
return err
}
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
signoz, err := signoz.New(
ctx,
config,
jwt,
zeus.Config{},
noopzeus.NewProviderFactory(),
licensing.Config{},
func(_ sqlstore.SQLStore, _ zeus.Zeus, _ organization.Getter, _ analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
return nooplicensing.NewFactory()
},
signoz.NewEmailingProviderFactories(),
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
return signoz.NewSQLSchemaProviderFactories(sqlstore)
},
signoz.NewSQLStoreProviderFactories(),
signoz.NewTelemetryStoreProviderFactories(),
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
return err
}
server, err := app.NewServer(config, signoz, jwt)
if err != nil {
logger.ErrorContext(ctx, "failed to create server", "error", err)
return err
}
if err := server.Start(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
return err
}
err = server.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}
err = signoz.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
return err
}
return nil
}

45
cmd/config.go Normal file
View File

@@ -0,0 +1,45 @@
package cmd
import (
"context"
"fmt"
"log/slog"
"os"
"github.com/SigNoz/signoz/pkg/config"
"github.com/SigNoz/signoz/pkg/config/envprovider"
"github.com/SigNoz/signoz/pkg/config/fileprovider"
"github.com/SigNoz/signoz/pkg/signoz"
)
func NewSigNozConfig(ctx context.Context, flags signoz.DeprecatedFlags) (signoz.Config, error) {
config, err := signoz.NewConfig(
ctx,
config.ResolverConfig{
Uris: []string{"env:"},
ProviderFactories: []config.ProviderFactory{
envprovider.NewFactory(),
fileprovider.NewFactory(),
},
},
flags,
)
if err != nil {
return signoz.Config{}, err
}
return config, nil
}
func NewJWTSecret(_ context.Context, _ *slog.Logger) string {
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
if len(jwtSecret) == 0 {
fmt.Println("🚨 CRITICAL SECURITY ISSUE: No JWT secret key specified!")
fmt.Println("SIGNOZ_JWT_SECRET environment variable is not set. This has dire consequences for the security of the application.")
fmt.Println("Without a JWT secret, user sessions are vulnerable to tampering and unauthorized access.")
fmt.Println("Please set the SIGNOZ_JWT_SECRET environment variable immediately.")
fmt.Println("For more information, please refer to https://github.com/SigNoz/signoz/issues/8400.")
}
return jwtSecret
}

View File

@@ -11,7 +11,7 @@ before:
builds:
- id: signoz
binary: bin/signoz
main: ee/query-service/main.go
main: ./cmd/enterprise
env:
- CGO_ENABLED=1
- >-
@@ -35,8 +35,11 @@ builds:
- -X github.com/SigNoz/signoz/pkg/version.hash={{ .ShortCommit }}
- -X github.com/SigNoz/signoz/pkg/version.time={{ .CommitTimestamp }}
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
- -X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
- -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
- -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr
- >-
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
mod_timestamp: "{{ .CommitTimestamp }}"

View File

@@ -11,11 +11,9 @@ RUN apk update && \
COPY ./target/${OS}-${TARGETARCH}/signoz /root/signoz
COPY ./conf/prometheus.yml /root/config/prometheus.yml
COPY ./templates/email /root/templates
COPY frontend/build/ /etc/signoz/web/
RUN chmod 755 /root /root/signoz
ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]
ENTRYPOINT ["./signoz", "server"]

View File

@@ -0,0 +1,46 @@
FROM node:18-bullseye AS build
WORKDIR /opt/
COPY ./frontend/ ./
RUN CI=1 yarn install
RUN CI=1 yarn build
FROM golang:1.23-bullseye
ARG OS="linux"
ARG TARGETARCH
ARG ZEUSURL
# This path is important for stacktraces
WORKDIR $GOPATH/src/github.com/signoz/signoz
WORKDIR /root
RUN set -eux; \
apt-get update; \
apt-get install -y --no-install-recommends \
g++ \
gcc \
libc6-dev \
make \
pkg-config \
; \
rm -rf /var/lib/apt/lists/*
COPY go.mod go.sum ./
RUN go mod download
COPY ./cmd/ ./cmd/
COPY ./ee/ ./ee/
COPY ./pkg/ ./pkg/
COPY ./templates/email /root/templates
COPY Makefile Makefile
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
COPY --from=build /opt/build ./web/
RUN chmod 755 /root /root/signoz
ENTRYPOINT ["/root/signoz", "server"]

View File

@@ -0,0 +1,20 @@
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
FROM alpine@sha256:${ALPINE_SHA}
LABEL maintainer="signoz"
WORKDIR /root
ARG OS="linux"
ARG ARCH
RUN apk update && \
apk add ca-certificates && \
rm -rf /var/cache/apk/*
COPY ./target/${OS}-${ARCH}/signoz /root/signoz
COPY ./templates/email /root/templates
COPY frontend/build/ /etc/signoz/web/
RUN chmod 755 /root /root/signoz
ENTRYPOINT ["./signoz", "server"]

18
cmd/enterprise/main.go Normal file
View File

@@ -0,0 +1,18 @@
package main
import (
"log/slog"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/pkg/instrumentation"
)
func main() {
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
// register a list of commands to the root command
registerServer(cmd.RootCmd, logger)
cmd.Execute(logger)
}

124
cmd/enterprise/server.go Normal file
View File

@@ -0,0 +1,124 @@
package main
import (
"context"
"log/slog"
"time"
"github.com/SigNoz/signoz/cmd"
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app"
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
enterprisezeus "github.com/SigNoz/signoz/ee/zeus"
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/spf13/cobra"
)
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
var flags signoz.DeprecatedFlags
serverCmd := &cobra.Command{
Use: "server",
Short: "Run the SigNoz server",
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
RunE: func(currCmd *cobra.Command, args []string) error {
config, err := cmd.NewSigNozConfig(currCmd.Context(), flags)
if err != nil {
return err
}
return runServer(currCmd.Context(), config, logger)
},
}
flags.RegisterFlags(serverCmd)
parentCmd.AddCommand(serverCmd)
}
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
// print the version
version.Info.PrettyPrint(config.Version)
// add enterprise sqlstore factories to the community sqlstore factories
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
return err
}
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
signoz, err := signoz.New(
ctx,
config,
jwt,
enterprisezeus.Config(),
httpzeus.NewProviderFactory(),
enterpriselicensing.Config(24*time.Hour, 3),
func(sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics)
},
signoz.NewEmailingProviderFactories(),
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore)
if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
panic(err)
}
return existingFactories
},
sqlstoreFactories,
signoz.NewTelemetryStoreProviderFactories(),
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
return err
}
server, err := enterpriseapp.NewServer(config, signoz, jwt)
if err != nil {
logger.ErrorContext(ctx, "failed to create server", "error", err)
return err
}
if err := server.Start(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
return err
}
err = server.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}
err = signoz.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
return err
}
return nil
}

33
cmd/root.go Normal file
View File

@@ -0,0 +1,33 @@
package cmd
import (
"log/slog"
"os"
"github.com/SigNoz/signoz/pkg/version"
"github.com/spf13/cobra"
"go.uber.org/zap" //nolint:depguard
)
var RootCmd = &cobra.Command{
Use: "signoz",
Short: "OpenTelemetry-Native Logs, Metrics and Traces in a single pane",
Version: version.Info.Version(),
SilenceUsage: true,
SilenceErrors: true,
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
}
func Execute(logger *slog.Logger) {
zapLogger := newZapLogger()
zap.ReplaceGlobals(zapLogger)
defer func() {
_ = zapLogger.Sync()
}()
err := RootCmd.Execute()
if err != nil {
logger.ErrorContext(RootCmd.Context(), "error running command", "error", err)
os.Exit(1)
}
}

15
cmd/zap.go Normal file
View File

@@ -0,0 +1,15 @@
package cmd
import (
"go.uber.org/zap" //nolint:depguard
"go.uber.org/zap/zapcore" //nolint:depguard
)
// Deprecated: Use `NewLogger` from `pkg/instrumentation` instead.
func newZapLogger() *zap.Logger {
config := zap.NewProductionConfig()
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logger, _ := config.Build()
return logger
}

View File

@@ -50,7 +50,7 @@ cache:
# Time-to-live for cache entries in memory. Specify the duration in ns
ttl: 60000000000
# The interval at which the cache will be cleaned up
cleanupInterval: 1m
cleanup_interval: 1m
# redis: Uses Redis as the caching backend.
redis:
# The hostname or IP address of the Redis server.
@@ -90,6 +90,15 @@ apiserver:
- /api/v1/version
- /
##################### Querier #####################
querier:
# The TTL for cached query results.
cache_ttl: 168h
# The interval for recent data that should not be cached.
flux_interval: 5m
# The maximum number of concurrent queries for missing ranges.
max_concurrent_queries: 4
##################### TelemetryStore #####################
telemetrystore:
# Maximum number of idle connections in the connection pool.
@@ -103,6 +112,17 @@ telemetrystore:
clickhouse:
# The DSN to use for clickhouse.
dsn: tcp://localhost:9000
# The cluster name to use for clickhouse.
cluster: cluster
# The query settings for clickhouse.
settings:
max_execution_time: 0
max_execution_time_leaf: 0
timeout_before_checking_execution_speed: 0
max_bytes_to_read: 0
max_result_rows: 0
ignore_data_skipping_indices: ""
secondary_indices_enable_bulk_filtering: false
##################### Prometheus #####################
prometheus:
@@ -157,3 +177,72 @@ alertmanager:
maintenance_interval: 15m
# Retention of the notification logs.
retention: 120h
##################### Emailing #####################
emailing:
# Whether to enable emailing.
enabled: false
templates:
# The directory containing the email templates. This directory should contain a list of files defined at pkg/types/emailtypes/template.go.
directory: /opt/signoz/conf/templates/email
smtp:
# The SMTP server address.
address: localhost:25
# The email address to use for the SMTP server.
from:
# The hello message to use for the SMTP server.
hello:
# The static headers to send with the email.
headers: {}
auth:
# The username to use for the SMTP server.
username:
# The password to use for the SMTP server.
password:
# The secret to use for the SMTP server.
secret:
# The identity to use for the SMTP server.
identity:
tls:
# Whether to enable TLS. It should be false in most cases since the authentication mechanism should use the STARTTLS extension instead.
enabled: false
# Whether to skip TLS verification.
insecure_skip_verify: false
# The path to the CA file.
ca_file_path:
# The path to the key file.
key_file_path:
# The path to the certificate file.
cert_file_path:
##################### Sharder (experimental) #####################
sharder:
# Specifies the sharder provider to use.
provider: noop
single:
# The org id to which this instance belongs to.
org_id: org_id
##################### Analytics #####################
analytics:
# Whether to enable analytics.
enabled: false
segment:
# The key to use for segment.
key: ""
##################### StatsReporter #####################
statsreporter:
# Whether to enable stats reporter. This is used to provide valuable insights to the SigNoz team. It does not collect any sensitive/PII data.
enabled: true
# The interval at which the stats are collected.
interval: 6h
collect:
# Whether to collect identities and traits (emails).
identities: true
##################### Gateway (License only) #####################
gateway:
# The URL of the gateway's api.
url: http://localhost:8080

View File

@@ -174,11 +174,9 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.77.0
image: signoz/signoz:v0.92.1
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
@@ -196,6 +194,7 @@ services:
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- SIGNOZ_JWT_SECRET=secret
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD
@@ -208,7 +207,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.111.37
image: signoz/signoz-otel-collector:v0.129.0
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -232,7 +231,7 @@ services:
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:v0.111.37
image: signoz/signoz-schema-migrator:v0.129.0
deploy:
restart_policy:
condition: on-failure

View File

@@ -100,28 +100,32 @@ services:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
configs:
- source: clickhouse-config
target: /etc/clickhouse-server/config.xml
- source: clickhouse-users
target: /etc/clickhouse-server/users.xml
- source: clickhouse-custom-function
target: /etc/clickhouse-server/custom-function.xml
- source: clickhouse-cluster
target: /etc/clickhouse-server/config.d/cluster.xml
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.77.0
image: signoz/signoz:v0.92.1
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
configs:
- source: signoz-prometheus-config
target: /root/config/prometheus.yml
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
@@ -131,6 +135,7 @@ services:
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD
@@ -143,15 +148,17 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.111.37
image: signoz/signoz-otel-collector:v0.129.0
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
configs:
- source: otel-collector-config
target: /etc/otel-collector-config.yaml
- source: otel-manager-config
target: /etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
@@ -167,7 +174,7 @@ services:
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:v0.111.37
image: signoz/signoz-schema-migrator:v0.129.0
deploy:
restart_policy:
condition: on-failure
@@ -188,3 +195,24 @@ volumes:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1
configs:
clickhouse-config:
file: ../common/clickhouse/config.xml
clickhouse-users:
file: ../common/clickhouse/users.xml
clickhouse-custom-function:
file: ../common/clickhouse/custom-function.xml
clickhouse-cluster:
file: ../common/clickhouse/cluster.xml
signoz-prometheus-config:
file: ../common/signoz/prometheus.yml
# If you have multiple dashboard files, you can list them individually:
# dashboard-foo:
# file: ../common/dashboards/foo.json
# dashboard-bar:
# file: ../common/dashboards/bar.json
otel-collector-config:
file: ./otel-collector-config.yaml
otel-manager-config:
file: ../common/signoz/otel-collector-opamp-config.yaml

View File

@@ -26,7 +26,7 @@ processors:
detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite
metrics_exporter: signozclickhousemetrics
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
@@ -60,25 +60,16 @@ exporters:
datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
signozclickhousemetrics:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
use_new_schema: true
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- pprof
@@ -90,11 +81,11 @@ service:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, signozclickhousemetrics]
exporters: [signozclickhousemetrics]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
exporters: [signozclickhousemetrics]
logs:
receivers: [otlp]
processors: [batch]

View File

@@ -177,12 +177,10 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.77.0}
image: signoz/signoz:${VERSION:-v0.92.1}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
@@ -199,6 +197,7 @@ services:
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD
@@ -212,7 +211,7 @@ services:
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.0}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -238,7 +237,7 @@ services:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0}
container_name: schema-migrator-sync
command:
- sync
@@ -249,7 +248,7 @@ services:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0}
container_name: schema-migrator-async
command:
- async

View File

@@ -1,199 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
restart: unless-stopped
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
init-clickhouse:
condition: service_completed_successfully
zookeeper-1:
condition: service_healthy
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-init-clickhouse
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
restart: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-1
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.77.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --gateway-url=https://api.staging.signoz.cloud
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
container_name: schema-migrator-sync
command:
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
restart: on-failure
networks:
signoz-net:
name: signoz-net
volumes:
clickhouse:
name: signoz-clickhouse
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1

View File

@@ -110,12 +110,10 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.77.0}
image: signoz/signoz:${VERSION:-v0.92.1}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
@@ -132,6 +130,7 @@ services:
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD
@@ -144,7 +143,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.0}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -166,7 +165,7 @@ services:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0}
container_name: schema-migrator-sync
command:
- sync
@@ -178,7 +177,7 @@ services:
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0}
container_name: schema-migrator-async
command:
- async

View File

@@ -26,7 +26,7 @@ processors:
detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite
metrics_exporter: signozclickhousemetrics
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
@@ -60,25 +60,16 @@ exporters:
datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
signozclickhousemetrics:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
use_new_schema: true
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- pprof
@@ -90,11 +81,11 @@ service:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, signozclickhousemetrics]
exporters: [signozclickhousemetrics]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
exporters: [signozclickhousemetrics]
logs:
receivers: [otlp]
processors: [batch]

View File

@@ -93,7 +93,7 @@ check_os() {
;;
Red\ Hat*)
desired_os=1
os="red hat"
os="rhel"
package_manager="yum"
;;
CentOS*)

View File

@@ -44,20 +44,35 @@ Before diving in, make sure you have these tools installed:
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
### 1. Setting up Clickhouse
### 1. Setting up ClickHouse
First, we need to get Clickhouse running:
First, we need to get ClickHouse running:
```bash
make devenv-clickhouse
```
This command:
- Starts Clickhouse in a single-shard, single-replica cluster
- Starts ClickHouse in a single-shard, single-replica cluster
- Sets up Zookeeper
- Runs the latest schema migrations
### 2. Starting the Backend
### 2. Setting up SigNoz OpenTelemetry Collector
Next, start the OpenTelemetry Collector to receive telemetry data:
```bash
make devenv-signoz-otel-collector
```
This command:
- Starts the SigNoz OpenTelemetry Collector
- Listens on port 4317 (gRPC) and 4318 (HTTP) for incoming telemetry data
- Forwards data to ClickHouse for storage
> 💡 **Quick Setup**: Use `make devenv-up` to start both ClickHouse and OTel Collector together
### 3. Starting the Backend
1. Run the backend server:
```bash
@@ -73,19 +88,24 @@ This command:
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
### 3. Setting up the Frontend
### 4. Setting up the Frontend
1. Install dependencies:
1. Navigate to the frontend directory:
```bash
cd frontend
```
2. Install dependencies:
```bash
yarn install
```
2. Create a `.env` file in the `frontend` directory:
3. Create a `.env` file in this directory:
```env
FRONTEND_API_ENDPOINT=http://localhost:8080
```
3. Start the development server:
4. Start the development server:
```bash
yarn dev
```
@@ -93,3 +113,25 @@ This command:
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
Now you're all set to start developing! Happy coding! 🎉
## Verifying Your Setup
To verify everything is working correctly:
1. **Check ClickHouse**: `curl http://localhost:8123/ping` (should return "Ok.")
2. **Check OTel Collector**: `curl http://localhost:13133` (should return health status)
3. **Check Backend**: `curl http://localhost:8080/api/v1/health` (should return `{"status":"ok"}`)
4. **Check Frontend**: Open `http://localhost:3301` in your browser
## How to send test data?
You can now send telemetry data to your local SigNoz instance:
- **OTLP gRPC**: `localhost:4317`
- **OTLP HTTP**: `localhost:4318`
For example, using `curl` to send a test trace:
```bash
curl -X POST http://localhost:4318/v1/traces \
-H "Content-Type: application/json" \
-d '{"resourceSpans":[{"resource":{"attributes":[{"key":"service.name","value":{"stringValue":"test-service"}}]},"scopeSpans":[{"spans":[{"traceId":"12345678901234567890123456789012","spanId":"1234567890123456","name":"test-span","startTimeUnixNano":"1609459200000000000","endTimeUnixNano":"1609459201000000000"}]}]}]}'
```

View File

@@ -0,0 +1,51 @@
# Endpoint
This guide outlines the recommended approach for designing endpoints, with a focus on entity relationships, RESTful structure, and examples from the codebase.
## How do we design an endpoint?
### Understand the core entities and their relationships
Start with understanding the core entities and their relationships. For example:
- **Organization**: an organization can have multiple users
### Structure Endpoints RESTfully
Endpoints should reflect the resource hierarchy and follow RESTful conventions. Use clear, **pluralized resource names** and versioning. For example:
- `POST /v1/organizations` — Create an organization
- `GET /v1/organizations/:id` — Get an organization by id
- `DELETE /v1/organizations/:id` — Delete an organization by id
- `PUT /v1/organizations/:id` — Update an organization by id
- `GET /v1/organizations/:id/users` — Get all users in an organization
- `GET /v1/organizations/me/users` — Get all users in my organization
Think in terms of resource navigation in a file system. For example, to find your organization, you would navigate to the root of the file system and then to the `organizations` directory. To find a user in an organization, you would navigate to the `organizations` directory and then to the `id` directory.
```bash
v1/
├── organizations/
│ └── 123/
│ └── users/
```
`me` endpoints are special. They are used to determine the actual id via some auth/external mechanism. For `me` endpoints, think of the `me` directory being symlinked to your organization directory. For example, if you are a part of the organization `123`, the `me` directory will be symlinked to `/v1/organizations/123`:
```bash
v1/
├── organizations/
│ └── me/ -> symlink to /v1/organizations/123
│ └── users/
│ └── 123/
│ └── users/
```
> 💡 **Note**: There are various ways to structure endpoints. Some prefer to use singular resource names instead of `me`. Others prefer to use singular resource names for all endpoints. We have, however, chosen to standardize our endpoints in the manner described above.
## What should I remember?
- Use clear, **plural resource names**
- Use `me` endpoints for determining the actual id via some auth mechanism
> 💡 **Note**: When in doubt, diagram the relationships and walk through the user flows as if navigating a file system. This will help you design endpoints that are both logical and user-friendly.

View File

@@ -0,0 +1,103 @@
# Errors
SigNoz includes its own structured [errors](/pkg/errors/errors.go) package. It's built on top of Go's `error` interface, extending it to add additional context that helps provide more meaningful error messages throughout the application.
## How to use it?
To use the SigNoz structured errors package, use these functions instead of the standard library alternatives:
```go
// Instead of errors.New()
errors.New(typ, code, message)
// Instead of fmt.Errorf()
errors.Newf(typ, code, message, args...)
```
### Typ
The Typ (read as Type, defined as `typ`) is used to categorize errors across the codebase and is loosely coupled with HTTP/GRPC status codes. All predefined types can be found in [pkg/errors/type.go](/pkg/errors/type.go). For example:
- `TypeInvalidInput` - Indicates invalid input was provided
- `TypeNotFound` - Indicates a resource was not found
By design, `typ` is unexported and cannot be declared outside of [errors](/pkg/errors/errors.go) package. This ensures that it is consistent across the codebase and is used in a way that is meaningful.
### Code
Codes are used to provide more granular categorization within types. For instance, a type of `TypeInvalidInput` might have codes like `CodeInvalidEmail` or `CodeInvalidPassword`.
To create new error codes, use the `errors.MustNewCode` function:
```go
var (
CodeThingAlreadyExists = errors.MustNewCode("thing_already_exists")
CodeThingNotFound = errors.MustNewCode("thing_not_found")
)
```
> 💡 **Note**: Error codes must match the regex `^[a-z_]+$` otherwise the code will panic.
## Show me some examples
### Using the error
A basic example of using the error:
```go
var (
CodeThingAlreadyExists = errors.MustNewCode("thing_already_exists")
)
func CreateThing(id string) error {
t, err := thing.GetFromStore(id)
if err != nil {
if errors.As(err, errors.TypeNotFound) {
// thing was not found, create it
return thing.Create(id)
}
// something else went wrong, wrap the error with more context
return errors.Wrapf(err, errors.TypeInternal, errors.CodeUnknown, "failed to get thing from store")
}
return errors.Newf(errors.TypeAlreadyExists, CodeThingAlreadyExists, "thing with id %s already exists", id)
}
```
### Changing the error
Sometimes you may want to change the error while preserving the message:
```go
func GetUserSecurely(id string) (*User, error) {
user, err := repository.GetUser(id)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
// Convert NotFound to Forbidden for security reasons
return nil, errors.New(errors.TypeForbidden, errors.CodeAccessDenied, "access denied to requested resource")
}
return nil, err
}
return user, nil
}
```
## Why do we need this?
In a large codebase like SigNoz, error handling is critical for maintaining reliability, debuggability, and a good user experience. We believe that it is the **responsibility of a function** to return **well-defined** errors that **accurately describe what went wrong**. With our structured error system:
- Functions can create precise errors with appropriate additional context
- Callers can make informed decisions based on the additional context
- Error context is preserved and enhanced as it moves up the call stack
The caller (which can be another function or a HTTP/gRPC handler or something else entirely), can then choose to use this error to take appropriate actions such as:
- A function can branch into different paths based on the context
- An HTTP/gRPC handler can derive the correct status code and message from the error and send it to the client
- Logging systems can capture structured error information for better diagnostics
Although there might be cases where this might seem too verbose, it makes the code more maintainable and consistent. A little verbose code is better than clever code that doesn't provide enough context.
## What should I remember?
- Think about error handling as you write your code, not as an afterthought.
- Always use the [errors](/pkg/errors/errors.go) package instead of the standard library's `errors.New()` or `fmt.Errorf()`.
- Always assign appropriate codes to errors when creating them instead of using the "catch all" error codes defined in [pkg/errors/code.go](/pkg/errors/code.go).
- Use `errors.Wrapf()` to add context to errors while preserving the original when appropriate.

View File

@@ -0,0 +1,106 @@
# Provider
SigNoz is built on the provider pattern, a design approach where code is organized into providers that handle specific application responsibilities. Providers act as adapter components that integrate with external services and deliver required functionality to the application.
> 💡 **Note**: Coming from a DDD background? Providers are similar (not exactly the same) to adapter/infrastructure services.
## How to create a new provider?
To create a new provider, create a directory in the `pkg/` directory named after your provider. The provider package consists of four key components:
- **Interface** (`pkg/<name>/<name>.go`): Defines the provider's interface. Other packages should import this interface to use the provider.
- **Config** (`pkg/<name>/config.go`): Contains provider configuration, implementing the `factory.Config` interface from [factory/config.go](/pkg/factory/config.go).
- **Implementation** (`pkg/<name>/<implname><name>/provider.go`): Contains the provider implementation, including a `NewProvider` function that returns a `factory.Provider` interface from [factory/provider.go](/pkg/factory/provider.go).
- **Mock** (`pkg/<name>/<name>test.go`): Provides mocks for the provider, typically used by dependent packages for unit testing.
For example, the [prometheus](/pkg/prometheus) provider delivers a prometheus engine to the application:
- `pkg/prometheus/prometheus.go` - Interface definition
- `pkg/prometheus/config.go` - Configuration
- `pkg/prometheus/clickhouseprometheus/provider.go` - Clickhouse-powered implementation
- `pkg/prometheus/prometheustest/provider.go` - Mock implementation
## How to wire it up?
The `pkg/signoz` package contains the inversion of control container responsible for wiring providers. It handles instantiation, configuration, and assembly of providers based on configuration metadata.
> 💡 **Note**: Coming from a Java background? Providers are similar to Spring beans.
Wiring up a provider involves three steps:
1. Wiring up the configuration
Add your config from `pkg/<name>/config.go` to the `pkg/signoz/config.Config` struct and in new factories:
```go
type Config struct {
...
MyProvider myprovider.Config `mapstructure:"myprovider"`
...
}
func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig, ....) (Config, error) {
...
configFactories := []factory.ConfigFactory{
myprovider.NewConfigFactory(),
}
...
}
```
2. Wiring up the provider
Add available provider implementations in `pkg/signoz/provider.go`:
```go
func NewMyProviderFactories() factory.NamedMap[factory.ProviderFactory[myprovider.MyProvider, myprovider.Config]] {
return factory.MustNewNamedMap(
myproviderone.NewFactory(),
myprovidertwo.NewFactory(),
)
}
```
3. Instantiate the provider by adding it to the `SigNoz` struct in `pkg/signoz/signoz.go`:
```go
type SigNoz struct {
...
MyProvider myprovider.MyProvider
...
}
func New(...) (*SigNoz, error) {
...
myprovider, err := myproviderone.New(ctx, settings, config.MyProvider, "one/two")
if err != nil {
return nil, err
}
...
}
```
## How to use it?
To use a provider, import its interface. For example, to use the prometheus provider, import `pkg/prometheus/prometheus.go`:
```go
import "github.com/SigNoz/signoz/pkg/prometheus/prometheus"
func CreateSomething(ctx context.Context, prometheus prometheus.Prometheus) {
...
prometheus.DoSomething()
...
}
```
## Why do we need this?
Like any dependency injection framework, providers decouple the codebase from implementation details. This is especially valuable in SigNoz's large codebase, where we need to swap implementations without changing dependent code. The provider pattern offers several benefits apart from the obvious one of decoupling:
- Configuration is **defined with each provider and centralized in one place**, making it easier to understand and manage through various methods (environment variables, config files, etc.)
- Provider mocking is **straightforward for unit testing**, with a consistent pattern for locating mocks
- **Multiple implementations** of the same provider are **supported**, as demonstrated by our sqlstore provider
## What should I remember?
- Use the provider pattern wherever applicable.
- Always create a provider **irrespective of the number of implementations**. This makes it easier to add new implementations in the future.

View File

@@ -0,0 +1,11 @@
# Go
This document provides an overview of contributing to the SigNoz backend written in Go. The SigNoz backend is built with Go, focusing on performance, maintainability, and developer experience. We strive for clean, idiomatic code that follows established Go practices while addressing the unique needs of an observability platform.
We adhere to three primary style guides as our foundation:
- [Effective Go](https://go.dev/doc/effective_go) - For writing idiomatic Go code
- [Code Review Comments](https://go.dev/wiki/CodeReviewComments) - For understanding common comments in code reviews
- [Google Style Guide](https://google.github.io/styleguide/go/) - Additional practices from Google
We **recommend** (almost enforce) reviewing these guides before contributing to the codebase. They provide valuable insights into writing idiomatic Go code and will help you understand our approach to backend development. In addition, we have a few additional rules that make certain areas stricter than the above which can be found in area-specific files in this package.

View File

@@ -0,0 +1,94 @@
# SQL
SigNoz utilizes a relational database to store metadata including organization information, user data and other settings.
## How to use it?
The database interface is defined in [SQLStore](/pkg/sqlstore/sqlstore.go). SigNoz leverages the Bun ORM to interact with the underlying database. To access the database instance, use the `BunDBCtx` function. For operations that require transactions across multiple database operations, use the `RunInTxCtx` function. This function embeds a transaction in the context, which propagates through various functions in the callback.
```go
type Thing struct {
bun.BaseModel
ID types.Identifiable `bun:",embed"`
SomeColumn string `bun:"some_column"`
TimeAuditable types.TimeAuditable `bun:",embed"`
OrgID string `bun:"org_id"`
}
func GetThing(ctx context.Context, id string) (*Thing, error) {
thing := new(Thing)
err := sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(thing).
Where("id = ?", id).
Scan(ctx)
return thing, err
}
func CreateThing(ctx context.Context, thing *Thing) error {
return sqlstore.
BunDBCtx(ctx).
NewInsert().
Model(thing).
Exec(ctx)
}
```
> 💡 **Note**: Always use line breaks while working with SQL queries to enhance code readability.
> 💡 **Note**: Always use the `new` function to create new instances of structs.
## What are hooks?
Hooks are user-defined functions that execute before and/or after specific database operations. These hooks are particularly useful for generating telemetry data such as logs, traces, and metrics, providing visibility into database interactions. Hooks are defined in the [SQLStoreHook](/pkg/sqlstore/sqlstore.go) interface.
## How is the schema designed?
SigNoz implements a star schema design with the organizations table as the central entity. All other tables link to the organizations table via foreign key constraints on the `org_id` column. This design ensures that every entity within the system is either directly or indirectly associated with an organization.
```mermaid
erDiagram
ORGANIZATIONS {
string id PK
timestamp created_at
timestamp updated_at
}
ENTITY_A {
string id PK
timestamp created_at
timestamp updated_at
string org_id FK
}
ENTITY_B {
string id PK
timestamp created_at
timestamp updated_at
string org_id FK
}
ORGANIZATIONS ||--o{ ENTITY_A : contains
ORGANIZATIONS ||--o{ ENTITY_B : contains
```
> 💡 **Note**: There are rare exceptions to the above star schema design. Consult with the maintainers before deviating from the above design.
All tables follow a consistent primary key pattern using a `id` column (referenced by the `types.Identifiable` struct) and include `created_at` and `updated_at` columns (referenced by the `types.TimeAuditable` struct) for audit purposes.
## How to write migrations?
For schema migrations, use the [SQLMigration](/pkg/sqlmigration/sqlmigration.go) interface and write the migration in the same package. When creating migrations, adhere to these guidelines:
- Do not implement **`ON CASCADE` foreign key constraints**. Deletion operations should be handled explicitly in application logic rather than delegated to the database.
- Do not **import types from the types package** in the `sqlmigration` package. Instead, define the required types within the migration package itself. This practice ensures migration stability as the core types evolve over time.
- Do not implement **`Down` migrations**. As the codebase matures, we may introduce this capability, but for now, the `Down` function should remain empty.
- Always write **idempotent** migrations. This means that if the migration is run multiple times, it should not cause an error.
- A migration which is **dependent on the underlying dialect** (sqlite, postgres, etc) should be written as part of the [SQLDialect](/pkg/sqlstore/sqlstore.go) interface. The implementation needs to go in the dialect specific package of the respective database.
## What should I remember?
- Use `BunDBCtx` and `RunInTxCtx` to access the database instance and execute transactions respectively.
- While designing new tables, ensure the consistency of `id`, `created_at`, `updated_at` and an `org_id` column with a foreign key constraint to the `organizations` table (unless the table serves as a transitive entity not directly associated with an organization but indirectly associated with one).
- Implement deletion logic in the application rather than relying on cascading deletes in the database.
- While writing migrations, adhere to the guidelines mentioned above.

View File

@@ -16,7 +16,7 @@ __Table of Contents__
- [Prerequisites](#prerequisites-1)
- [Install Helm Repo and Charts](#install-helm-repo-and-charts)
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
- [Moniitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
- [Monitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
- [What's next](#whats-next)

34
ee/anomaly/daily.go Normal file
View File

@@ -0,0 +1,34 @@
package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type DailyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*DailyProvider)(nil)
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &dp.BaseSeasonalProvider
}
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
dp := &DailyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(dp)
}
return dp
}
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
req.Seasonality = SeasonalityDaily
return p.getAnomalies(ctx, orgID, req)
}

35
ee/anomaly/hourly.go Normal file
View File

@@ -0,0 +1,35 @@
package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type HourlyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*HourlyProvider)(nil)
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &hp.BaseSeasonalProvider
}
// NewHourlyProvider now uses the generic option type
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
hp := &HourlyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(hp)
}
return hp
}
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
req.Seasonality = SeasonalityHourly
return p.getAnomalies(ctx, orgID, req)
}

223
ee/anomaly/params.go Normal file
View File

@@ -0,0 +1,223 @@
package anomaly
import (
"time"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Seasonality struct{ valuer.String }
var (
SeasonalityHourly = Seasonality{valuer.NewString("hourly")}
SeasonalityDaily = Seasonality{valuer.NewString("daily")}
SeasonalityWeekly = Seasonality{valuer.NewString("weekly")}
)
var (
oneWeekOffset = uint64(24 * 7 * time.Hour.Milliseconds())
oneDayOffset = uint64(24 * time.Hour.Milliseconds())
oneHourOffset = uint64(time.Hour.Milliseconds())
fiveMinOffset = uint64(5 * time.Minute.Milliseconds())
)
func (s Seasonality) IsValid() bool {
switch s {
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
return true
default:
return false
}
}
type AnomaliesRequest struct {
Params qbtypes.QueryRangeRequest
Seasonality Seasonality
}
type AnomaliesResponse struct {
Results []*qbtypes.TimeSeriesData
}
// anomalyParams is the params for anomaly detection
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
//
// ^ ^
// | |
// (rounded value for past peiod) + (seasonal growth)
//
// score = abs(value - prediction) / stddev (current_season_query)
type anomalyQueryParams struct {
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
// The results obtained from this query are used to compare with predicted values
// and to detect anomalies
CurrentPeriodQuery qbtypes.QueryRangeRequest
// PastPeriodQuery is the query range params for past period of seasonality
// Example: For weekly seasonality, (now-1w-5m, now-1w)
// : For daily seasonality, (now-1d-5m, now-1d)
// : For hourly seasonality, (now-1h-5m, now-1h)
PastPeriodQuery qbtypes.QueryRangeRequest
// CurrentSeasonQuery is the query range params for current period (seasonal)
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
CurrentSeasonQuery qbtypes.QueryRangeRequest
// PastSeasonQuery is the query range params for past seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
PastSeasonQuery qbtypes.QueryRangeRequest
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
Past2SeasonQuery qbtypes.QueryRangeRequest
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
Past3SeasonQuery qbtypes.QueryRangeRequest
}
func prepareAnomalyQueryParams(req qbtypes.QueryRangeRequest, seasonality Seasonality) *anomalyQueryParams {
start := req.Start
end := req.End
currentPeriodQuery := qbtypes.QueryRangeRequest{
Start: start,
End: end,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
var pastPeriodStart, pastPeriodEnd uint64
switch seasonality {
// for one week period, we fetch the data from the past week with 5 min offset
case SeasonalityWeekly:
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
pastPeriodEnd = end - oneWeekOffset
// for one day period, we fetch the data from the past day with 5 min offset
case SeasonalityDaily:
pastPeriodStart = start - oneDayOffset - fiveMinOffset
pastPeriodEnd = end - oneDayOffset
// for one hour period, we fetch the data from the past hour with 5 min offset
case SeasonalityHourly:
pastPeriodStart = start - oneHourOffset - fiveMinOffset
pastPeriodEnd = end - oneHourOffset
}
pastPeriodQuery := qbtypes.QueryRangeRequest{
Start: pastPeriodStart,
End: pastPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
// seasonality growth trend
var currentGrowthPeriodStart, currentGrowthPeriodEnd uint64
switch seasonality {
case SeasonalityWeekly:
currentGrowthPeriodStart = start - oneWeekOffset
currentGrowthPeriodEnd = start
case SeasonalityDaily:
currentGrowthPeriodStart = start - oneDayOffset
currentGrowthPeriodEnd = start
case SeasonalityHourly:
currentGrowthPeriodStart = start - oneHourOffset
currentGrowthPeriodEnd = start
}
currentGrowthQuery := qbtypes.QueryRangeRequest{
Start: currentGrowthPeriodStart,
End: currentGrowthPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
var pastGrowthPeriodStart, pastGrowthPeriodEnd uint64
switch seasonality {
case SeasonalityWeekly:
pastGrowthPeriodStart = start - 2*oneWeekOffset
pastGrowthPeriodEnd = start - 1*oneWeekOffset
case SeasonalityDaily:
pastGrowthPeriodStart = start - 2*oneDayOffset
pastGrowthPeriodEnd = start - 1*oneDayOffset
case SeasonalityHourly:
pastGrowthPeriodStart = start - 2*oneHourOffset
pastGrowthPeriodEnd = start - 1*oneHourOffset
}
pastGrowthQuery := qbtypes.QueryRangeRequest{
Start: pastGrowthPeriodStart,
End: pastGrowthPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
var past2GrowthPeriodStart, past2GrowthPeriodEnd uint64
switch seasonality {
case SeasonalityWeekly:
past2GrowthPeriodStart = start - 3*oneWeekOffset
past2GrowthPeriodEnd = start - 2*oneWeekOffset
case SeasonalityDaily:
past2GrowthPeriodStart = start - 3*oneDayOffset
past2GrowthPeriodEnd = start - 2*oneDayOffset
case SeasonalityHourly:
past2GrowthPeriodStart = start - 3*oneHourOffset
past2GrowthPeriodEnd = start - 2*oneHourOffset
}
past2GrowthQuery := qbtypes.QueryRangeRequest{
Start: past2GrowthPeriodStart,
End: past2GrowthPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
var past3GrowthPeriodStart, past3GrowthPeriodEnd uint64
switch seasonality {
case SeasonalityWeekly:
past3GrowthPeriodStart = start - 4*oneWeekOffset
past3GrowthPeriodEnd = start - 3*oneWeekOffset
case SeasonalityDaily:
past3GrowthPeriodStart = start - 4*oneDayOffset
past3GrowthPeriodEnd = start - 3*oneDayOffset
case SeasonalityHourly:
past3GrowthPeriodStart = start - 4*oneHourOffset
past3GrowthPeriodEnd = start - 3*oneHourOffset
}
past3GrowthQuery := qbtypes.QueryRangeRequest{
Start: past3GrowthPeriodStart,
End: past3GrowthPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
return &anomalyQueryParams{
CurrentPeriodQuery: currentPeriodQuery,
PastPeriodQuery: pastPeriodQuery,
CurrentSeasonQuery: currentGrowthQuery,
PastSeasonQuery: pastGrowthQuery,
Past2SeasonQuery: past2GrowthQuery,
Past3SeasonQuery: past3GrowthQuery,
}
}
type anomalyQueryResults struct {
CurrentPeriodResults []*qbtypes.TimeSeriesData
PastPeriodResults []*qbtypes.TimeSeriesData
CurrentSeasonResults []*qbtypes.TimeSeriesData
PastSeasonResults []*qbtypes.TimeSeriesData
Past2SeasonResults []*qbtypes.TimeSeriesData
Past3SeasonResults []*qbtypes.TimeSeriesData
}

11
ee/anomaly/provider.go Normal file
View File

@@ -0,0 +1,11 @@
package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Provider interface {
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error)
}

463
ee/anomaly/seasonal.go Normal file
View File

@@ -0,0 +1,463 @@
package anomaly
import (
"context"
"log/slog"
"math"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/valuer"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
var (
// TODO(srikanthccv): make this configurable?
movingAvgWindowSize = 7
)
// BaseProvider is an interface that includes common methods for all provider types
type BaseProvider interface {
GetBaseSeasonalProvider() *BaseSeasonalProvider
}
// GenericProviderOption is a generic type for provider options
type GenericProviderOption[T BaseProvider] func(T)
func WithQuerier[T BaseProvider](querier querier.Querier) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().querier = querier
}
}
func WithLogger[T BaseProvider](logger *slog.Logger) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().logger = logger
}
}
type BaseSeasonalProvider struct {
querier querier.Querier
logger *slog.Logger
}
func (p *BaseSeasonalProvider) getQueryParams(req *AnomaliesRequest) *anomalyQueryParams {
if !req.Seasonality.IsValid() {
req.Seasonality = SeasonalityDaily
}
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
}
func (p *BaseSeasonalProvider) toTSResults(ctx context.Context, resp *qbtypes.QueryRangeResponse) []*qbtypes.TimeSeriesData {
tsData := []*qbtypes.TimeSeriesData{}
if resp == nil {
p.logger.InfoContext(ctx, "nil response from query range")
return tsData
}
for _, item := range resp.Data.Results {
if resultData, ok := item.(*qbtypes.TimeSeriesData); ok {
tsData = append(tsData, resultData)
}
}
return tsData
}
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
// TODO(srikanthccv): parallelize this?
p.logger.InfoContext(ctx, "fetching results for current period", "anomaly_current_period_query", params.CurrentPeriodQuery)
currentPeriodResults, err := p.querier.QueryRange(ctx, orgID, &params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past period", "anomaly_past_period_query", params.PastPeriodQuery)
pastPeriodResults, err := p.querier.QueryRange(ctx, orgID, &params.PastPeriodQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for current season", "anomaly_current_season_query", params.CurrentSeasonQuery)
currentSeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past season", "anomaly_past_season_query", params.PastSeasonQuery)
pastSeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.PastSeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past 2 season", "anomaly_past_2season_query", params.Past2SeasonQuery)
past2SeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.Past2SeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past 3 season", "anomaly_past_3season_query", params.Past3SeasonQuery)
past3SeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.Past3SeasonQuery)
if err != nil {
return nil, err
}
return &anomalyQueryResults{
CurrentPeriodResults: p.toTSResults(ctx, currentPeriodResults),
PastPeriodResults: p.toTSResults(ctx, pastPeriodResults),
CurrentSeasonResults: p.toTSResults(ctx, currentSeasonResults),
PastSeasonResults: p.toTSResults(ctx, pastSeasonResults),
Past2SeasonResults: p.toTSResults(ctx, past2SeasonResults),
Past3SeasonResults: p.toTSResults(ctx, past3SeasonResults),
}, nil
}
// getMatchingSeries gets the matching series from the query result
// for the given series
func (p *BaseSeasonalProvider) getMatchingSeries(_ context.Context, queryResult *qbtypes.TimeSeriesData, series *qbtypes.TimeSeries) *qbtypes.TimeSeries {
if queryResult == nil || len(queryResult.Aggregations) == 0 || len(queryResult.Aggregations[0].Series) == 0 {
return nil
}
for _, curr := range queryResult.Aggregations[0].Series {
currLabelsKey := qbtypes.GetUniqueSeriesKey(curr.Labels)
seriesLabelsKey := qbtypes.GetUniqueSeriesKey(series.Labels)
if currLabelsKey == seriesLabelsKey {
return curr
}
}
return nil
}
func (p *BaseSeasonalProvider) getAvg(series *qbtypes.TimeSeries) float64 {
if series == nil || len(series.Values) == 0 {
return 0
}
var sum float64
for _, smpl := range series.Values {
sum += smpl.Value
}
return sum / float64(len(series.Values))
}
func (p *BaseSeasonalProvider) getStdDev(series *qbtypes.TimeSeries) float64 {
if series == nil || len(series.Values) == 0 {
return 0
}
avg := p.getAvg(series)
var sum float64
for _, smpl := range series.Values {
sum += math.Pow(smpl.Value-avg, 2)
}
return math.Sqrt(sum / float64(len(series.Values)))
}
// getMovingAvg gets the moving average for the given series
// for the given window size and start index
func (p *BaseSeasonalProvider) getMovingAvg(series *qbtypes.TimeSeries, movingAvgWindowSize, startIdx int) float64 {
if series == nil || len(series.Values) == 0 {
return 0
}
if startIdx >= len(series.Values)-movingAvgWindowSize {
startIdx = int(math.Max(0, float64(len(series.Values)-movingAvgWindowSize)))
}
var sum float64
points := series.Values[startIdx:]
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points))))
for i := 0; i < windowSize; i++ {
sum += points[i].Value
}
avg := sum / float64(windowSize)
return avg
}
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
if len(floats) == 0 {
return 0
}
var sum float64
for _, f := range floats {
sum += f
}
return sum / float64(len(floats))
}
func (p *BaseSeasonalProvider) getPredictedSeries(
ctx context.Context,
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
) *qbtypes.TimeSeries {
predictedSeries := &qbtypes.TimeSeries{
Labels: series.Labels,
Values: make([]*qbtypes.TimeSeriesValue, 0),
}
// for each point in the series, get the predicted value
// the predicted value is the moving average (with window size = 7) of the previous period series
// plus the average of the current season series
// minus the mean of the past season series, past2 season series and past3 season series
for idx, curr := range series.Values {
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
avg := p.getAvg(currentSeasonSeries)
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
predictedValue := movingAvg + avg - mean
if predictedValue < 0 {
// this should not happen (except when the data has extreme outliers)
// we will use the moving avg of the previous period series in this case
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", "anomaly_predicted_value", predictedValue, "anomaly_labels", series.Labels)
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
p.logger.DebugContext(ctx, "predicted value for series",
"anomaly_moving_avg", movingAvg,
"anomaly_avg", avg,
"anomaly_mean", mean,
"anomaly_labels", series.Labels,
"anomaly_predicted_value", predictedValue,
"anomaly_curr", curr.Value,
)
predictedSeries.Values = append(predictedSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: predictedValue,
})
}
return predictedSeries
}
// getBounds gets the upper and lower bounds for the given series
// for the given z score threshold
// moving avg of the previous period series + z score threshold * std dev of the series
// moving avg of the previous period series - z score threshold * std dev of the series
func (p *BaseSeasonalProvider) getBounds(
series, predictedSeries *qbtypes.TimeSeries,
zScoreThreshold float64,
) (*qbtypes.TimeSeries, *qbtypes.TimeSeries) {
upperBoundSeries := &qbtypes.TimeSeries{
Labels: series.Labels,
Values: make([]*qbtypes.TimeSeriesValue, 0),
}
lowerBoundSeries := &qbtypes.TimeSeries{
Labels: series.Labels,
Values: make([]*qbtypes.TimeSeriesValue, 0),
}
for idx, curr := range series.Values {
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
upperBoundSeries.Values = append(upperBoundSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: upperBound,
})
lowerBoundSeries.Values = append(lowerBoundSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: math.Max(lowerBound, 0),
})
}
return upperBoundSeries, lowerBoundSeries
}
// getExpectedValue gets the expected value for the given series
// for the given index
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
func (p *BaseSeasonalProvider) getExpectedValue(
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, idx int,
) float64 {
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
}
// getScore gets the anomaly score for the given series
// for the given index
// (value - expectedValue) / std dev of the series
func (p *BaseSeasonalProvider) getScore(
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, value float64, idx int,
) float64 {
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
if expectedValue < 0 {
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
return (value - expectedValue) / p.getStdDev(weekSeries)
}
// getAnomalyScores gets the anomaly scores for the given series
// for the given index
// (value - expectedValue) / std dev of the series
func (p *BaseSeasonalProvider) getAnomalyScores(
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
) *qbtypes.TimeSeries {
anomalyScoreSeries := &qbtypes.TimeSeries{
Labels: series.Labels,
Values: make([]*qbtypes.TimeSeriesValue, 0),
}
for idx, curr := range series.Values {
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
anomalyScoreSeries.Values = append(anomalyScoreSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: anomalyScore,
})
}
return anomalyScoreSeries
}
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
anomalyParams := p.getQueryParams(req)
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
if err != nil {
return nil, err
}
currentPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.CurrentPeriodResults {
currentPeriodResults[result.QueryName] = result
}
pastPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.PastPeriodResults {
pastPeriodResults[result.QueryName] = result
}
currentSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.CurrentSeasonResults {
currentSeasonResults[result.QueryName] = result
}
pastSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.PastSeasonResults {
pastSeasonResults[result.QueryName] = result
}
past2SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.Past2SeasonResults {
past2SeasonResults[result.QueryName] = result
}
past3SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.Past3SeasonResults {
past3SeasonResults[result.QueryName] = result
}
for _, result := range currentPeriodResults {
funcs := req.Params.FuncsForQuery(result.QueryName)
var zScoreThreshold float64
for _, f := range funcs {
if f.Name == qbtypes.FunctionNameAnomaly {
for _, arg := range f.Args {
if arg.Name != "z_score_threshold" {
continue
}
value, ok := arg.Value.(float64)
if ok {
zScoreThreshold = value
} else {
p.logger.InfoContext(ctx, "z_score_threshold not provided, defaulting")
zScoreThreshold = 3
}
break
}
}
}
pastPeriodResult, ok := pastPeriodResults[result.QueryName]
if !ok {
continue
}
currentSeasonResult, ok := currentSeasonResults[result.QueryName]
if !ok {
continue
}
pastSeasonResult, ok := pastSeasonResults[result.QueryName]
if !ok {
continue
}
past2SeasonResult, ok := past2SeasonResults[result.QueryName]
if !ok {
continue
}
past3SeasonResult, ok := past3SeasonResults[result.QueryName]
if !ok {
continue
}
// no data;
if len(result.Aggregations) == 0 {
continue
}
aggOfInterest := result.Aggregations[0]
for _, series := range aggOfInterest.Series {
stdDev := p.getStdDev(series)
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
pastPeriodSeries := p.getMatchingSeries(ctx, pastPeriodResult, series)
currentSeasonSeries := p.getMatchingSeries(ctx, currentSeasonResult, series)
pastSeasonSeries := p.getMatchingSeries(ctx, pastSeasonResult, series)
past2SeasonSeries := p.getMatchingSeries(ctx, past2SeasonResult, series)
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
prevSeriesAvg := p.getAvg(pastPeriodSeries)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
p.logger.InfoContext(ctx, "calculated mean for series",
"anomaly_prev_series_avg", prevSeriesAvg,
"anomaly_current_season_series_avg", currentSeasonSeriesAvg,
"anomaly_past_season_series_avg", pastSeasonSeriesAvg,
"anomaly_past_2season_series_avg", past2SeasonSeriesAvg,
"anomaly_past_3season_series_avg", past3SeasonSeriesAvg,
"anomaly_labels", series.Labels,
)
predictedSeries := p.getPredictedSeries(
ctx,
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
)
aggOfInterest.PredictedSeries = append(aggOfInterest.PredictedSeries, predictedSeries)
upperBoundSeries, lowerBoundSeries := p.getBounds(
series,
predictedSeries,
zScoreThreshold,
)
aggOfInterest.UpperBoundSeries = append(aggOfInterest.UpperBoundSeries, upperBoundSeries)
aggOfInterest.LowerBoundSeries = append(aggOfInterest.LowerBoundSeries, lowerBoundSeries)
anomalyScoreSeries := p.getAnomalyScores(
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
)
aggOfInterest.AnomalyScores = append(aggOfInterest.AnomalyScores, anomalyScoreSeries)
}
}
results := make([]*qbtypes.TimeSeriesData, 0, len(currentPeriodResults))
for _, result := range currentPeriodResults {
results = append(results, result)
}
return &AnomaliesResponse{
Results: results,
}, nil
}

34
ee/anomaly/weekly.go Normal file
View File

@@ -0,0 +1,34 @@
package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type WeeklyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*WeeklyProvider)(nil)
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &wp.BaseSeasonalProvider
}
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
wp := &WeeklyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(wp)
}
return wp
}
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
req.Seasonality = SeasonalityWeekly
return p.getAnomalies(ctx, orgID, req)
}

View File

@@ -1,85 +0,0 @@
package middleware
import (
"net/http"
"time"
eeTypes "github.com/SigNoz/signoz/ee/types"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"go.uber.org/zap"
)
type Pat struct {
store sqlstore.SQLStore
uuid *authtypes.UUID
headers []string
}
func NewPat(store sqlstore.SQLStore, headers []string) *Pat {
return &Pat{store: store, uuid: authtypes.NewUUID(), headers: headers}
}
func (p *Pat) Wrap(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var values []string
var patToken string
var pat eeTypes.StorablePersonalAccessToken
for _, header := range p.headers {
values = append(values, r.Header.Get(header))
}
ctx, err := p.uuid.ContextFromRequest(r.Context(), values...)
if err != nil {
next.ServeHTTP(w, r)
return
}
patToken, ok := authtypes.UUIDFromContext(ctx)
if !ok {
next.ServeHTTP(w, r)
return
}
err = p.store.BunDB().NewSelect().Model(&pat).Where("token = ?", patToken).Scan(r.Context())
if err != nil {
next.ServeHTTP(w, r)
return
}
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
next.ServeHTTP(w, r)
return
}
// get user from db
user := types.User{}
err = p.store.BunDB().NewSelect().Model(&user).Where("id = ?", pat.UserID).Scan(r.Context())
if err != nil {
next.ServeHTTP(w, r)
return
}
jwt := authtypes.Claims{
UserID: user.ID,
GroupID: user.GroupID,
Email: user.Email,
OrgID: user.OrgID,
}
ctx = authtypes.NewContextWithClaims(ctx, jwt)
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
pat.LastUsed = time.Now().Unix()
_, err = p.store.BunDB().NewUpdate().Model(&pat).Column("last_used").Where("token = ?", patToken).Where("revoked = false").Exec(r.Context())
if err != nil {
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
}
})
}

26
ee/licensing/config.go Normal file
View File

@@ -0,0 +1,26 @@
package licensing
import (
"fmt"
"sync"
"time"
"github.com/SigNoz/signoz/pkg/licensing"
)
var (
config licensing.Config
once sync.Once
)
// initializes the licensing configuration
func Config(pollInterval time.Duration, failureThreshold int) licensing.Config {
once.Do(func() {
config = licensing.Config{PollInterval: pollInterval, FailureThreshold: failureThreshold}
if err := config.Validate(); err != nil {
panic(fmt.Errorf("invalid licensing config: %w", err))
}
})
return config
}

View File

@@ -0,0 +1,168 @@
package httplicensing
import (
"context"
"encoding/json"
"net/http"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type licensingAPI struct {
licensing licensing.Licensing
}
func NewLicensingAPI(licensing licensing.Licensing) licensing.API {
return &licensingAPI{licensing: licensing}
}
func (api *licensingAPI) Activate(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
return
}
req := new(licensetypes.PostableLicense)
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil {
render.Error(rw, err)
return
}
err = api.licensing.Activate(r.Context(), orgID, req.Key)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusAccepted, nil)
}
func (api *licensingAPI) GetActive(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
return
}
license, err := api.licensing.GetActive(r.Context(), orgID)
if err != nil {
render.Error(rw, err)
return
}
gettableLicense := licensetypes.NewGettableLicense(license.Data, license.Key)
render.Success(rw, http.StatusOK, gettableLicense)
}
func (api *licensingAPI) Refresh(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
return
}
err = api.licensing.Refresh(r.Context(), orgID)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusNoContent, nil)
}
func (api *licensingAPI) Checkout(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
return
}
req := new(licensetypes.PostableSubscription)
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
render.Error(rw, err)
return
}
gettableSubscription, err := api.licensing.Checkout(ctx, orgID, req)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusCreated, gettableSubscription)
}
func (api *licensingAPI) Portal(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
return
}
req := new(licensetypes.PostableSubscription)
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
render.Error(rw, err)
return
}
gettableSubscription, err := api.licensing.Portal(ctx, orgID, req)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusCreated, gettableSubscription)
}

View File

@@ -0,0 +1,249 @@
package httplicensing
import (
"context"
"encoding/json"
"time"
"github.com/SigNoz/signoz/ee/licensing/licensingstore/sqllicensingstore"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/analyticstypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/tidwall/gjson"
)
type provider struct {
store licensetypes.Store
zeus zeus.Zeus
config licensing.Config
settings factory.ScopedProviderSettings
orgGetter organization.Getter
analytics analytics.Analytics
stopChan chan struct{}
}
func NewProviderFactory(store sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config) (licensing.Licensing, error) {
return New(ctx, providerSettings, config, store, zeus, orgGetter, analytics)
})
}
func New(ctx context.Context, ps factory.ProviderSettings, config licensing.Config, sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) (licensing.Licensing, error) {
settings := factory.NewScopedProviderSettings(ps, "github.com/SigNoz/signoz/ee/licensing/httplicensing")
licensestore := sqllicensingstore.New(sqlstore)
return &provider{
store: licensestore,
zeus: zeus,
config: config,
settings: settings,
orgGetter: orgGetter,
stopChan: make(chan struct{}),
analytics: analytics,
}, nil
}
func (provider *provider) Start(ctx context.Context) error {
tick := time.NewTicker(provider.config.PollInterval)
defer tick.Stop()
err := provider.Validate(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
}
for {
select {
case <-provider.stopChan:
return nil
case <-tick.C:
err := provider.Validate(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
}
}
}
}
func (provider *provider) Stop(ctx context.Context) error {
provider.settings.Logger().DebugContext(ctx, "license validation stopped")
close(provider.stopChan)
return nil
}
func (provider *provider) Validate(ctx context.Context) error {
organizations, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
if err != nil {
return err
}
for _, organization := range organizations {
err := provider.Refresh(ctx, organization.ID)
if err != nil {
return err
}
}
return nil
}
func (provider *provider) Activate(ctx context.Context, organizationID valuer.UUID, key string) error {
data, err := provider.zeus.GetLicense(ctx, key)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to fetch license data with upstream server")
}
license, err := licensetypes.NewLicense(data, organizationID)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to create license entity")
}
storableLicense := licensetypes.NewStorableLicenseFromLicense(license)
err = provider.store.Create(ctx, storableLicense)
if err != nil {
return err
}
return nil
}
func (provider *provider) GetActive(ctx context.Context, organizationID valuer.UUID) (*licensetypes.License, error) {
storableLicenses, err := provider.store.GetAll(ctx, organizationID)
if err != nil {
return nil, err
}
activeLicense, err := licensetypes.GetActiveLicenseFromStorableLicenses(storableLicenses, organizationID)
if err != nil {
return nil, err
}
return activeLicense, nil
}
func (provider *provider) Refresh(ctx context.Context, organizationID valuer.UUID) error {
activeLicense, err := provider.GetActive(ctx, organizationID)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
return nil
}
provider.settings.Logger().ErrorContext(ctx, "license validation failed", "org_id", organizationID.StringValue())
return err
}
data, err := provider.zeus.GetLicense(ctx, activeLicense.Key)
if err != nil {
if time.Since(activeLicense.LastValidatedAt) > time.Duration(provider.config.FailureThreshold)*provider.config.PollInterval {
activeLicense.UpdateFeatures(licensetypes.BasicPlan)
updatedStorableLicense := licensetypes.NewStorableLicenseFromLicense(activeLicense)
err = provider.store.Update(ctx, organizationID, updatedStorableLicense)
if err != nil {
return err
}
return nil
}
return err
}
err = activeLicense.Update(data)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to create license entity from license data")
}
updatedStorableLicense := licensetypes.NewStorableLicenseFromLicense(activeLicense)
err = provider.store.Update(ctx, organizationID, updatedStorableLicense)
if err != nil {
return err
}
stats := licensetypes.NewStatsFromLicense(activeLicense)
provider.analytics.Send(ctx,
analyticstypes.Track{
UserId: "stats_" + organizationID.String(),
Event: "License Updated",
Properties: analyticstypes.NewPropertiesFromMap(stats),
Context: &analyticstypes.Context{
Extra: map[string]interface{}{
analyticstypes.KeyGroupID: organizationID.String(),
},
},
},
analyticstypes.Group{
UserId: "stats_" + organizationID.String(),
GroupId: organizationID.String(),
Traits: analyticstypes.NewTraitsFromMap(stats),
},
)
return nil
}
func (provider *provider) Checkout(ctx context.Context, organizationID valuer.UUID, postableSubscription *licensetypes.PostableSubscription) (*licensetypes.GettableSubscription, error) {
activeLicense, err := provider.GetActive(ctx, organizationID)
if err != nil {
return nil, err
}
body, err := json.Marshal(postableSubscription)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to marshal checkout payload")
}
response, err := provider.zeus.GetCheckoutURL(ctx, activeLicense.Key, body)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to generate checkout session")
}
return &licensetypes.GettableSubscription{RedirectURL: gjson.GetBytes(response, "url").String()}, nil
}
func (provider *provider) Portal(ctx context.Context, organizationID valuer.UUID, postableSubscription *licensetypes.PostableSubscription) (*licensetypes.GettableSubscription, error) {
activeLicense, err := provider.GetActive(ctx, organizationID)
if err != nil {
return nil, err
}
body, err := json.Marshal(postableSubscription)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to marshal portal payload")
}
response, err := provider.zeus.GetPortalURL(ctx, activeLicense.Key, body)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to generate portal session")
}
return &licensetypes.GettableSubscription{RedirectURL: gjson.GetBytes(response, "url").String()}, nil
}
func (provider *provider) GetFeatureFlags(ctx context.Context, organizationID valuer.UUID) ([]*licensetypes.Feature, error) {
license, err := provider.GetActive(ctx, organizationID)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
return licensetypes.BasicPlan, nil
}
return nil, err
}
return license.Features, nil
}
func (provider *provider) Collect(ctx context.Context, orgID valuer.UUID) (map[string]any, error) {
activeLicense, err := provider.GetActive(ctx, orgID)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
return map[string]any{}, nil
}
return nil, err
}
return licensetypes.NewStatsFromLicense(activeLicense), nil
}

View File

@@ -0,0 +1,81 @@
package sqllicensingstore
import (
"context"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type store struct {
sqlstore sqlstore.SQLStore
}
func New(sqlstore sqlstore.SQLStore) licensetypes.Store {
return &store{sqlstore}
}
func (store *store) Create(ctx context.Context, storableLicense *licensetypes.StorableLicense) error {
_, err := store.
sqlstore.
BunDB().
NewInsert().
Model(storableLicense).
Exec(ctx)
if err != nil {
return store.sqlstore.WrapAlreadyExistsErrf(err, errors.CodeAlreadyExists, "license with ID: %s already exists", storableLicense.ID)
}
return nil
}
func (store *store) Get(ctx context.Context, organizationID valuer.UUID, licenseID valuer.UUID) (*licensetypes.StorableLicense, error) {
storableLicense := new(licensetypes.StorableLicense)
err := store.
sqlstore.
BunDB().
NewSelect().
Model(storableLicense).
Where("org_id = ?", organizationID).
Where("id = ?", licenseID).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "license with ID: %s does not exist", licenseID)
}
return storableLicense, nil
}
func (store *store) GetAll(ctx context.Context, organizationID valuer.UUID) ([]*licensetypes.StorableLicense, error) {
storableLicenses := make([]*licensetypes.StorableLicense, 0)
err := store.
sqlstore.
BunDB().
NewSelect().
Model(&storableLicenses).
Where("org_id = ?", organizationID).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "licenses for organizationID: %s does not exists", organizationID)
}
return storableLicenses, nil
}
func (store *store) Update(ctx context.Context, organizationID valuer.UUID, storableLicense *licensetypes.StorableLicense) error {
_, err := store.
sqlstore.
BunDB().
NewUpdate().
Model(storableLicense).
WherePK().
Where("org_id = ?", organizationID).
Exec(ctx)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to update license with ID: %s", storableLicense.ID)
}
return nil
}

View File

@@ -1,4 +0,0 @@
.vscode
README.md
signoz.db
bin

View File

@@ -5,6 +5,7 @@ import (
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/valuer"
)
type DailyProvider struct {
@@ -28,17 +29,16 @@ func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvi
}
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: dp.reader,
Cache: dp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: dp.fluxInterval,
FeatureLookup: dp.ff,
Reader: dp.reader,
Cache: dp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: dp.fluxInterval,
})
return dp
}
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityDaily
return p.getAnomalies(ctx, req)
return p.getAnomalies(ctx, orgID, req)
}

View File

@@ -5,6 +5,7 @@ import (
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/valuer"
)
type HourlyProvider struct {
@@ -28,17 +29,16 @@ func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyPr
}
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: hp.reader,
Cache: hp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: hp.fluxInterval,
FeatureLookup: hp.ff,
Reader: hp.reader,
Cache: hp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: hp.fluxInterval,
})
return hp
}
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityHourly
return p.getAnomalies(ctx, req)
return p.getAnomalies(ctx, orgID, req)
}

View File

@@ -2,8 +2,10 @@ package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Provider interface {
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
}

View File

@@ -5,11 +5,12 @@ import (
"math"
"time"
"github.com/SigNoz/signoz/pkg/query-service/cache"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap"
)
@@ -38,12 +39,6 @@ func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericPr
}
}
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().ff = ff
}
}
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().reader = reader
@@ -56,7 +51,6 @@ type BaseSeasonalProvider struct {
fluxInterval time.Duration
cache cache.Cache
keyGenerator cache.KeyGenerator
ff interfaces.FeatureLookup
}
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
@@ -66,9 +60,9 @@ func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomaly
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
}
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
@@ -79,7 +73,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
}
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastPeriodQuery)
if err != nil {
return nil, err
}
@@ -90,7 +84,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
}
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
@@ -101,7 +95,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
}
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastSeasonQuery)
if err != nil {
return nil, err
}
@@ -112,7 +106,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
}
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past2SeasonQuery)
if err != nil {
return nil, err
}
@@ -123,7 +117,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
}
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past3SeasonQuery)
if err != nil {
return nil, err
}
@@ -342,9 +336,9 @@ func (p *BaseSeasonalProvider) getAnomalyScores(
return anomalyScoreSeries
}
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
anomalyParams := p.getQueryParams(req)
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
if err != nil {
return nil, err
}

View File

@@ -5,6 +5,7 @@ import (
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/valuer"
)
type WeeklyProvider struct {
@@ -27,17 +28,16 @@ func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyPr
}
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: wp.reader,
Cache: wp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: wp.fluxInterval,
FeatureLookup: wp.ff,
Reader: wp.reader,
Cache: wp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: wp.fluxInterval,
})
return wp
}
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityWeekly
return p.getAnomalies(ctx, req)
return p.getAnomalies(ctx, orgID, req)
}

View File

@@ -5,18 +5,18 @@ import (
"net/http/httputil"
"time"
"github.com/SigNoz/signoz/ee/query-service/dao"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/interfaces"
"github.com/SigNoz/signoz/ee/query-service/license"
"github.com/SigNoz/signoz/ee/query-service/usage"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/apis/fields"
"github.com/SigNoz/signoz/pkg/http/middleware"
querierAPI "github.com/SigNoz/signoz/pkg/querier"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/cache"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
rules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/signoz"
@@ -26,18 +26,12 @@ import (
)
type APIHandlerOptions struct {
DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig
PreferSpanMetrics bool
AppDao dao.ModelDao
DataConnector interfaces.Reader
RulesManager *rules.Manager
UsageManager *usage.Manager
FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager
IntegrationsController *integrations.Controller
CloudIntegrationsController *cloudintegrations.Controller
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
Cache cache.Cache
Gateway *httputil.ReverseProxy
GatewayUrl string
// Querier Influx Interval
@@ -54,23 +48,18 @@ type APIHandler struct {
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig,
PreferSpanMetrics: opts.PreferSpanMetrics,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags,
IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache,
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
FieldsAPI: fields.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.TelemetryStore),
Signoz: signoz,
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
})
if err != nil {
@@ -84,103 +73,48 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
return ah, nil
}
func (ah *APIHandler) FF() baseint.FeatureLookup {
return ah.opts.FeatureFlags
}
func (ah *APIHandler) RM() *rules.Manager {
return ah.opts.RulesManager
}
func (ah *APIHandler) LM() *license.Manager {
return ah.opts.LicenseManager
}
func (ah *APIHandler) UM() *usage.Manager {
return ah.opts.UsageManager
}
func (ah *APIHandler) AppDao() dao.ModelDao {
return ah.opts.AppDao
}
func (ah *APIHandler) Gateway() *httputil.ReverseProxy {
return ah.opts.Gateway
}
func (ah *APIHandler) CheckFeature(f string) bool {
err := ah.FF().CheckFeature(f)
return err == nil
}
// RegisterRoutes registers routes for this handler on the given router
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
// note: add ee override methods first
// routes available only in ee version
router.HandleFunc("/api/v1/featureFlags",
am.OpenAccess(ah.getFeatureFlags)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/loginPrecheck",
am.OpenAccess(ah.precheckLogin)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/features", am.ViewAccess(ah.getFeatureFlags)).Methods(http.MethodGet)
// paid plans specific routes
router.HandleFunc("/api/v1/complete/saml",
am.OpenAccess(ah.receiveSAML)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/complete/google",
am.OpenAccess(ah.receiveGoogleAuth)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
am.AdminAccess(ah.listDomainsByOrg)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/domains",
am.AdminAccess(ah.postDomain)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/domains/{id}",
am.AdminAccess(ah.putDomain)).
Methods(http.MethodPut)
router.HandleFunc("/api/v1/domains/{id}",
am.AdminAccess(ah.deleteDomain)).
Methods(http.MethodDelete)
router.HandleFunc("/api/v1/complete/saml", am.OpenAccess(ah.receiveSAML)).Methods(http.MethodPost)
// base overrides
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
// PAT APIs
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.getPATs)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.updatePAT)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.revokePAT)).Methods(http.MethodDelete)
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.LicensingAPI.Checkout)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.portalSession)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.LicensingAPI.Portal)).Methods(http.MethodPost)
// v3
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut)
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Activate)).Methods(http.MethodPost)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Refresh)).Methods(http.MethodPut)
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.LicensingAPI.GetActive)).Methods(http.MethodGet)
// v4
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
// v5
router.HandleFunc("/api/v5/query_range", am.ViewAccess(ah.queryRangeV5)).Methods(http.MethodPost)
router.HandleFunc("/api/v5/substitute_vars", am.ViewAccess(ah.QuerierAPI.ReplaceVariables)).Methods(http.MethodPost)
// Gateway
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
@@ -188,7 +122,7 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
}
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)

View File

@@ -3,192 +3,16 @@ package api
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"github.com/gorilla/mux"
"go.uber.org/zap"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
baseauth "github.com/SigNoz/signoz/pkg/query-service/auth"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/valuer"
)
func parseRequest(r *http.Request, req interface{}) error {
defer r.Body.Close()
requestBody, err := io.ReadAll(r.Body)
if err != nil {
return err
}
err = json.Unmarshal(requestBody, &req)
return err
}
// loginUser overrides base handler and considers SSO case.
func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
req := basemodel.LoginRequest{}
err := parseRequest(r, &req)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
ctx := context.Background()
if req.Email != "" && ah.CheckFeature(model.SSO) {
var apierr basemodel.BaseApiError
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
if apierr != nil && !apierr.IsNil() {
RespondError(w, apierr, nil)
}
}
// if all looks good, call auth
resp, err := baseauth.Login(ctx, &req, ah.opts.JWT)
if ah.HandleError(w, err, http.StatusUnauthorized) {
return
}
ah.WriteJSON(w, r, resp)
}
// registerUser registers a user and responds with a precheck
// so the front-end can decide the login method
func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(model.SSO) {
ah.APIHandler.Register(w, r)
return
}
ctx := context.Background()
var req *baseauth.RegisterRequest
defer r.Body.Close()
requestBody, err := io.ReadAll(r.Body)
if err != nil {
zap.L().Error("received no input in api", zap.Error(err))
RespondError(w, model.BadRequest(err), nil)
return
}
err = json.Unmarshal(requestBody, &req)
if err != nil {
zap.L().Error("received invalid user registration request", zap.Error(err))
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
return
}
// get invite object
invite, err := baseauth.ValidateInvite(ctx, req)
if err != nil {
zap.L().Error("failed to validate invite token", zap.Error(err))
RespondError(w, model.BadRequest(err), nil)
return
}
if invite == nil {
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
return
}
// get auth domain from email domain
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
if apierr != nil {
zap.L().Error("failed to get domain from email", zap.Error(apierr))
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
}
precheckResp := &basemodel.PrecheckResponse{
SSO: false,
IsUser: false,
}
if domain != nil && domain.SsoEnabled {
// sso is enabled, create user and respond precheck data
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
var precheckError basemodel.BaseApiError
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
if precheckError != nil {
RespondError(w, precheckError, precheckResp)
}
} else {
// no-sso, validate password
if err := baseauth.ValidatePassword(req.Password); err != nil {
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
return
}
_, registerError := baseauth.Register(ctx, req, ah.Signoz.Alertmanager)
if !registerError.IsNil() {
RespondError(w, apierr, nil)
return
}
precheckResp.IsUser = true
}
ah.Respond(w, precheckResp)
}
// getInvite returns the invite object details for the given invite token. We do not need to
// protect this API because invite token itself is meant to be private.
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
token := mux.Vars(r)["token"]
sourceUrl := r.URL.Query().Get("ref")
ctx := context.Background()
inviteObject, err := baseauth.GetInvite(context.Background(), token)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
resp := model.GettableInvitation{
InvitationResponseObject: inviteObject,
}
precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
resp.Precheck = precheck
if apierr != nil {
RespondError(w, apierr, resp)
}
ah.WriteJSON(w, r, resp)
}
// PrecheckLogin enables browser login page to display appropriate
// login methods
func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
email := r.URL.Query().Get("email")
sourceUrl := r.URL.Query().Get("ref")
resp, apierr := ah.AppDao().PrecheckLogin(ctx, email, sourceUrl)
if apierr != nil {
RespondError(w, apierr, resp)
}
ah.Respond(w, resp)
}
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
ssoError := []byte("Login failed. Please contact your system administrator")
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
@@ -197,84 +21,12 @@ func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
}
// receiveGoogleAuth completes google OAuth response and forwards a request
// to front-end to sign user in
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background()
if !ah.CheckFeature(model.SSO) {
zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain")
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description")))
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
return
}
relayState := q.Get("state")
zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
handleSsoError(w, r, redirectUri)
return
}
// upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil {
handleSsoError(w, r, redirectUri)
return
}
// now that we have domain, use domain to fetch sso settings.
// prepare google callback handler using parsedState -
// which contains redirect URL (front-end endpoint)
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
if err != nil {
zap.L().Error("[receiveGoogleAuth] failed to prepare google oauth provider", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
identity, err := callbackHandler.HandleCallback(r)
if err != nil {
zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email, ah.opts.JWT)
if err != nil {
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
http.Redirect(w, r, nextPage, http.StatusSeeOther)
}
// receiveSAML completes a SAML request and gets user logged in
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// this is the source url that initiated the login request
redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background()
if !ah.CheckFeature(model.SSO) {
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
err := r.ParseForm()
if err != nil {
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
@@ -298,12 +50,25 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
domain, err := ah.Signoz.Modules.User.GetDomainFromSsoResponse(ctx, parsedState)
if err != nil {
handleSsoError(w, r, redirectUri)
return
}
orgID, err := valuer.NewUUID(domain.OrgID)
if err != nil {
handleSsoError(w, r, redirectUri)
return
}
_, err = ah.Signoz.Licensing.GetActive(ctx, orgID)
if err != nil {
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
sp, err := domain.PrepareSamlRequest(parsedState)
if err != nil {
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
@@ -331,7 +96,7 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, email)
if err != nil {
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)

View File

@@ -11,12 +11,12 @@ import (
"time"
"github.com/SigNoz/signoz/ee/query-service/constants"
eeTypes "github.com/SigNoz/signoz/ee/types"
"github.com/SigNoz/signoz/pkg/query-service/auth"
baseconstants "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/dao"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.uber.org/zap"
@@ -30,6 +30,18 @@ type CloudIntegrationConnectionParamsResponse struct {
}
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
return
}
cloudProvider := mux.Vars(r)["cloudProvider"]
if cloudProvider != "aws" {
RespondError(w, basemodel.BadRequest(fmt.Errorf(
@@ -38,15 +50,7 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
return
}
currentUser, err := auth.GetUserFromReqContext(r.Context())
if err != nil {
RespondError(w, basemodel.UnauthorizedError(fmt.Errorf(
"couldn't deduce current user: %w", err,
)), nil)
return
}
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgID, cloudProvider)
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't provision PAT for cloud integration:",
@@ -58,11 +62,9 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
SigNozAPIKey: apiKey,
}
license, apiErr := ah.LM().GetRepo().GetActiveLicense(r.Context())
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't look for active license",
), nil)
license, err := ah.Signoz.Licensing.GetActive(r.Context(), orgID)
if err != nil {
render.Error(w, err)
return
}
@@ -118,7 +120,14 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
return "", apiErr
}
allPats, err := ah.AppDao().ListPATs(ctx, orgId)
orgIdUUID, err := valuer.NewUUID(orgId)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't parse orgId: %w", err,
))
}
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't list PATs: %w", err,
@@ -135,29 +144,36 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
zap.String("cloudProvider", cloudProvider),
)
newPAT := eeTypes.NewGettablePAT(
newPAT, err := types.NewStorableAPIKey(
integrationPATName,
baseconstants.ViewerGroup,
integrationUser.ID,
types.RoleViewer,
0,
)
integrationPAT, err := ah.AppDao().CreatePAT(ctx, orgId, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
))
}
return integrationPAT.Token, nil
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
))
}
return newPAT.Token, nil
}
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
ctx context.Context, orgId string, cloudProvider string,
) (*types.User, *basemodel.ApiError) {
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
integrationUserResult, err := ah.Signoz.Modules.User.GetUserByEmailInOrg(ctx, orgId, email)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return nil, basemodel.NotFoundError(fmt.Errorf("couldn't look for integration user: %w", err))
}
if integrationUserResult != nil {
@@ -169,33 +185,18 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
zap.String("cloudProvider", cloudProvider),
)
newUser := &types.User{
ID: cloudIntegrationUserId,
Name: fmt.Sprintf("%s integration", cloudProvider),
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
},
OrgID: orgId,
}
viewerGroup, apiErr := dao.DB().GetGroupByName(ctx, baseconstants.ViewerGroup)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't get viewer group for creating integration user")
}
newUser.GroupID = viewerGroup.ID
passwordHash, err := auth.PasswordHash(uuid.NewString())
newUser, err := types.NewUser(cloudIntegrationUser, email, types.RoleViewer.String(), orgId)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't hash random password for cloud integration user: %w", err,
"couldn't create cloud integration user: %w", err,
))
}
newUser.Password = passwordHash
integrationUser, apiErr := ah.AppDao().CreateUser(ctx, newUser, false)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't create cloud integration user")
password, err := types.NewFactorPassword(uuid.NewString())
integrationUser, err := ah.Signoz.Modules.User.CreateUserWithPassword(ctx, newUser, password)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
}
return integrationUser, nil

View File

@@ -1,63 +0,0 @@
package api
import (
"net/http"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
"github.com/SigNoz/signoz/pkg/query-service/auth"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/gorilla/mux"
)
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
ah.lockUnlockDashboard(w, r, true)
}
func (ah *APIHandler) unlockDashboard(w http.ResponseWriter, r *http.Request) {
ah.lockUnlockDashboard(w, r, false)
}
func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request, lock bool) {
// Locking can only be done by the owner of the dashboard
// or an admin
// - Fetch the dashboard
// - Check if the user is the owner or an admin
// - If yes, lock/unlock the dashboard
// - If no, return 403
// Get the dashboard UUID from the request
uuid := mux.Vars(r)["uuid"]
if strings.HasPrefix(uuid, "integration") {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "dashboards created by integrations cannot be modified"))
return
}
claims, ok := authtypes.ClaimsFromContext(r.Context())
if !ok {
render.Error(w, errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated"))
return
}
dashboard, err := dashboards.GetDashboard(r.Context(), claims.OrgID, uuid)
if err != nil {
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to get dashboard"))
return
}
if !auth.IsAdminV2(claims) && (dashboard.CreatedBy != claims.Email) {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "You are not authorized to lock/unlock this dashboard"))
return
}
// Lock/Unlock the dashboard
err = dashboards.LockUnlockDashboard(r.Context(), claims.OrgID, uuid, lock)
if err != nil {
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to lock/unlock dashboard"))
return
}
ah.Respond(w, "Dashboard updated successfully")
}

View File

@@ -1,91 +0,0 @@
package api
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
"github.com/google/uuid"
"github.com/gorilla/mux"
)
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
orgId := mux.Vars(r)["orgId"]
domains, apierr := ah.AppDao().ListDomains(context.Background(), orgId)
if apierr != nil {
RespondError(w, apierr, domains)
return
}
ah.Respond(w, domains)
}
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := types.GettableOrgDomain{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if err := req.ValidNew(); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if apierr := ah.AppDao().CreateDomain(ctx, &req); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, &req)
}
func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
domainIdStr := mux.Vars(r)["id"]
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
req := types.GettableOrgDomain{StorableOrgDomain: types.StorableOrgDomain{ID: domainId}}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
req.ID = domainId
if err := req.Valid(nil); err != nil {
RespondError(w, model.BadRequest(err), nil)
}
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, &req)
}
func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
domainIdStr := mux.Vars(r)["id"]
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
return
}
apierr := ah.AppDao().DeleteDomain(context.Background(), domainId)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, nil)
}

View File

@@ -9,13 +9,29 @@ import (
"time"
"github.com/SigNoz/signoz/ee/query-service/constants"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
pkgError "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap"
)
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
featureSet, err := ah.FF().GetFeatureFlags()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, pkgError.Newf(pkgError.TypeInvalidInput, pkgError.CodeInvalidInput, "orgId is invalid"))
return
}
featureSet, err := ah.Signoz.Licensing.GetFeatureFlags(r.Context(), orgID)
if err != nil {
ah.HandleError(w, err, http.StatusInternalServerError)
return
@@ -23,7 +39,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
if constants.FetchFeatures == "true" {
zap.L().Debug("fetching license")
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
license, err := ah.Signoz.Licensing.GetActive(ctx, orgID)
if err != nil {
zap.L().Error("failed to fetch license", zap.Error(err))
} else if license == nil {
@@ -43,10 +59,17 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
}
}
if ah.opts.PreferSpanMetrics {
for idx := range featureSet {
feature := &featureSet[idx]
if feature.Name == basemodel.UseSpanMetrics {
if constants.IsPreferSpanMetrics {
for idx, feature := range featureSet {
if feature.Name == licensetypes.UseSpanMetrics {
featureSet[idx].Active = true
}
}
}
if constants.IsDotMetricsEnabled {
for idx, feature := range featureSet {
if feature.Name == licensetypes.DotMetricsEnabled {
featureSet[idx].Active = true
}
}
@@ -57,7 +80,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
// fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint
// and returns the FeatureSet.
func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
func fetchZeusFeatures(url, licenseKey string) ([]*licensetypes.Feature, error) {
// Check if the URL is empty
if url == "" {
return nil, fmt.Errorf("url is empty")
@@ -116,28 +139,28 @@ func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
}
type ZeusFeaturesResponse struct {
Status string `json:"status"`
Data basemodel.FeatureSet `json:"data"`
Status string `json:"status"`
Data []*licensetypes.Feature `json:"data"`
}
// MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures.
func MergeFeatureSets(zeusFeatures, internalFeatures basemodel.FeatureSet) basemodel.FeatureSet {
func MergeFeatureSets(zeusFeatures, internalFeatures []*licensetypes.Feature) []*licensetypes.Feature {
// Create a map to store the merged features
featureMap := make(map[string]basemodel.Feature)
featureMap := make(map[string]*licensetypes.Feature)
// Add all features from the otherFeatures set to the map
for _, feature := range internalFeatures {
featureMap[feature.Name] = feature
featureMap[feature.Name.StringValue()] = feature
}
// Add all features from the zeusFeatures set to the map
// If a feature already exists (i.e., same name), the zeusFeature will overwrite it
for _, feature := range zeusFeatures {
featureMap[feature.Name] = feature
featureMap[feature.Name.StringValue()] = feature
}
// Convert the map back to a FeatureSet slice
var mergedFeatures basemodel.FeatureSet
var mergedFeatures []*licensetypes.Feature
for _, feature := range featureMap {
mergedFeatures = append(mergedFeatures, feature)
}

View File

@@ -3,78 +3,79 @@ package api
import (
"testing"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/assert"
)
func TestMergeFeatureSets(t *testing.T) {
tests := []struct {
name string
zeusFeatures basemodel.FeatureSet
internalFeatures basemodel.FeatureSet
expected basemodel.FeatureSet
zeusFeatures []*licensetypes.Feature
internalFeatures []*licensetypes.Feature
expected []*licensetypes.Feature
}{
{
name: "empty zeusFeatures and internalFeatures",
zeusFeatures: basemodel.FeatureSet{},
internalFeatures: basemodel.FeatureSet{},
expected: basemodel.FeatureSet{},
zeusFeatures: []*licensetypes.Feature{},
internalFeatures: []*licensetypes.Feature{},
expected: []*licensetypes.Feature{},
},
{
name: "non-empty zeusFeatures and empty internalFeatures",
zeusFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
zeusFeatures: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: true},
{Name: valuer.NewString("Feature2"), Active: false},
},
internalFeatures: basemodel.FeatureSet{},
expected: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
internalFeatures: []*licensetypes.Feature{},
expected: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: true},
{Name: valuer.NewString("Feature2"), Active: false},
},
},
{
name: "empty zeusFeatures and non-empty internalFeatures",
zeusFeatures: basemodel.FeatureSet{},
internalFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
zeusFeatures: []*licensetypes.Feature{},
internalFeatures: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: true},
{Name: valuer.NewString("Feature2"), Active: false},
},
expected: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
expected: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: true},
{Name: valuer.NewString("Feature2"), Active: false},
},
},
{
name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts",
zeusFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature3", Active: false},
zeusFeatures: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: true},
{Name: valuer.NewString("Feature3"), Active: false},
},
internalFeatures: basemodel.FeatureSet{
{Name: "Feature2", Active: true},
{Name: "Feature4", Active: false},
internalFeatures: []*licensetypes.Feature{
{Name: valuer.NewString("Feature2"), Active: true},
{Name: valuer.NewString("Feature4"), Active: false},
},
expected: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: true},
{Name: "Feature3", Active: false},
{Name: "Feature4", Active: false},
expected: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: true},
{Name: valuer.NewString("Feature2"), Active: true},
{Name: valuer.NewString("Feature3"), Active: false},
{Name: valuer.NewString("Feature4"), Active: false},
},
},
{
name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts",
zeusFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
zeusFeatures: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: true},
{Name: valuer.NewString("Feature2"), Active: false},
},
internalFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: false},
{Name: "Feature3", Active: true},
internalFeatures: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: false},
{Name: valuer.NewString("Feature3"), Active: true},
},
expected: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
{Name: "Feature3", Active: true},
expected: []*licensetypes.Feature{
{Name: valuer.NewString("Feature1"), Active: true},
{Name: valuer.NewString("Feature2"), Active: false},
{Name: valuer.NewString("Feature3"), Active: true},
},
},
}

View File

@@ -5,10 +5,26 @@ import (
"strings"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
ctx := req.Context()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
return
}
validPath := false
for _, allowedPrefix := range gateway.AllowedPrefix {
if strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+allowedPrefix) {
@@ -22,9 +38,9 @@ func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request
return
}
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
license, err := ah.Signoz.Licensing.GetActive(ctx, orgID)
if err != nil {
RespondError(rw, err, nil)
render.Error(rw, err)
return
}

View File

@@ -6,9 +6,7 @@ import (
"net/http"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/pkg/http/render"
)
type DayWiseBreakdown struct {
@@ -47,10 +45,6 @@ type details struct {
BillTotal float64 `json:"billTotal"`
}
type Redirect struct {
RedirectURL string `json:"redirectURL"`
}
type billingDetails struct {
Status string `json:"status"`
Data struct {
@@ -62,93 +56,6 @@ type billingDetails struct {
} `json:"data"`
}
type ApplyLicenseRequest struct {
LicenseKey string `json:"key"`
}
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
ah.listLicensesV2(w, r)
}
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context())
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
// return 404 not found if there is no active license
if activeLicense == nil {
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)
return
}
// TODO deprecate this when we move away from key for stripe
activeLicense.Data["key"] = activeLicense.Key
render.Success(w, http.StatusOK, activeLicense.Data)
}
// this function is called by zeus when inserting licenses in the query-service
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
var licenseKey ApplyLicenseRequest
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if licenseKey.LicenseKey == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
if apiError != nil {
RespondError(w, apiError, nil)
return
}
render.Success(w, http.StatusAccepted, nil)
}
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
apiError := ah.LM().RefreshLicense(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
render.Success(w, http.StatusNoContent, nil)
}
func getCheckoutPortalResponse(redirectURL string) *Redirect {
return &Redirect{RedirectURL: redirectURL}
}
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
checkoutRequest := &model.CheckoutRequest{}
if err := json.NewDecoder(r.Body).Decode(checkoutRequest); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
license := ah.LM().GetActiveLicense()
if license == nil {
RespondError(w, model.BadRequestStr("cannot proceed with checkout without license key"), nil)
return
}
redirectUrl, err := signozio.CheckoutSession(r.Context(), checkoutRequest, license.Key)
if err != nil {
RespondError(w, err, nil)
return
}
ah.Respond(w, getCheckoutPortalResponse(redirectUrl))
}
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
licenseKey := r.URL.Query().Get("licenseKey")
@@ -182,72 +89,3 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
// TODO(srikanthccv):Fetch the current day usage and add it to the response
ah.Respond(w, billingResponse.Data)
}
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
licensesV2 := []model.License{}
for _, l := range licenses {
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
if !ok {
planKeyFromPlanName = model.Basic
}
licenseV2 := model.License{
Key: l.Key,
ActivationId: "",
PlanDetails: "",
FeatureSet: l.Features,
ValidationMessage: "",
IsCurrent: l.IsCurrent,
LicensePlan: model.LicensePlan{
PlanKey: planKeyFromPlanName,
ValidFrom: l.ValidFrom,
ValidUntil: l.ValidUntil,
Status: l.Status},
}
licensesV2 = append(licensesV2, licenseV2)
}
return licensesV2
}
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
licensesV3, apierr := ah.LM().GetLicensesV3(r.Context())
if apierr != nil {
RespondError(w, apierr, nil)
return
}
licenses := convertLicenseV3ToLicenseV2(licensesV3)
resp := model.Licenses{
TrialStart: -1,
TrialEnd: -1,
OnTrial: false,
WorkSpaceBlock: false,
TrialConvertedToSubscription: false,
GracePeriodEnd: -1,
Licenses: licenses,
}
ah.Respond(w, resp)
}
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
portalRequest := &model.PortalRequest{}
if err := json.NewDecoder(r.Body).Decode(portalRequest); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
license := ah.LM().GetActiveLicense()
if license == nil {
RespondError(w, model.BadRequestStr("cannot request the portal session without license key"), nil)
return
}
redirectUrl, err := signozio.PortalSession(r.Context(), portalRequest, license.Key)
if err != nil {
RespondError(w, err, nil)
return
}
ah.Respond(w, getCheckoutPortalResponse(redirectUrl))
}

View File

@@ -1,145 +0,0 @@
package api
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
eeTypes "github.com/SigNoz/signoz/ee/types"
"github.com/SigNoz/signoz/pkg/query-service/auth"
baseconstants "github.com/SigNoz/signoz/pkg/query-service/constants"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/gorilla/mux"
"go.uber.org/zap"
)
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := model.CreatePATRequestBody{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
user, err := auth.GetUserFromReqContext(r.Context())
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
pat := eeTypes.NewGettablePAT(
req.Name,
req.Role,
user.ID,
req.ExpiresInDays,
)
err = validatePATRequest(pat)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
var apierr basemodel.BaseApiError
if pat, apierr = ah.AppDao().CreatePAT(ctx, user.OrgID, pat); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, &pat)
}
func validatePATRequest(req types.GettablePAT) error {
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
return fmt.Errorf("valid role is required")
}
if req.ExpiresAt < 0 {
return fmt.Errorf("valid expiresAt is required")
}
if req.Name == "" {
return fmt.Errorf("valid name is required")
}
return nil
}
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := types.GettablePAT{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
user, err := auth.GetUserFromReqContext(r.Context())
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
err = validatePATRequest(req)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
req.UpdatedByUserID = user.ID
id := mux.Vars(r)["id"]
req.UpdatedAt = time.Now()
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
var apierr basemodel.BaseApiError
if apierr = ah.AppDao().UpdatePAT(ctx, user.OrgID, req, id); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, map[string]string{"data": "pat updated successfully"})
}
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
user, err := auth.GetUserFromReqContext(r.Context())
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
zap.L().Info("Get PATs for user", zap.String("user_id", user.ID))
pats, apierr := ah.AppDao().ListPATs(ctx, user.OrgID)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, pats)
}
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
id := mux.Vars(r)["id"]
user, err := auth.GetUserFromReqContext(r.Context())
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
zap.L().Info("Revoke PAT with id", zap.String("id", id))
if apierr := ah.AppDao().RevokePAT(ctx, user.OrgID, id, user.ID); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, map[string]string{"data": "pat revoked successfully"})
}

View File

@@ -2,19 +2,39 @@ package api
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"runtime/debug"
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
"github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
bodyBytes, _ := io.ReadAll(r.Body)
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
@@ -29,7 +49,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams.Version = "v4"
// add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil {
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
@@ -85,34 +105,30 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
switch seasonality {
case anomaly.SeasonalityWeekly:
provider = anomaly.NewWeeklyProvider(
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
anomaly.WithCache[*anomaly.WeeklyProvider](aH.Signoz.Cache),
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
)
case anomaly.SeasonalityDaily:
provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
)
case anomaly.SeasonalityHourly:
provider = anomaly.NewHourlyProvider(
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
anomaly.WithCache[*anomaly.HourlyProvider](aH.Signoz.Cache),
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
)
default:
provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
)
}
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
anomalies, err := provider.GetAnomalies(r.Context(), orgID, &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
@@ -127,3 +143,139 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
aH.QueryRangeV4(w, r)
}
}
func extractSeasonality(anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) anomalyV2.Seasonality {
for _, fn := range anomalyQuery.Functions {
if fn.Name == qbtypes.FunctionNameAnomaly {
for _, arg := range fn.Args {
if arg.Name == "seasonality" {
if seasonalityStr, ok := arg.Value.(string); ok {
switch seasonalityStr {
case "weekly":
return anomalyV2.SeasonalityWeekly
case "hourly":
return anomalyV2.SeasonalityHourly
}
}
}
}
}
}
return anomalyV2.SeasonalityDaily // default
}
func createAnomalyProvider(aH *APIHandler, seasonality anomalyV2.Seasonality) anomalyV2.Provider {
switch seasonality {
case anomalyV2.SeasonalityWeekly:
return anomalyV2.NewWeeklyProvider(
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](aH.Signoz.Querier),
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](aH.Signoz.Instrumentation.Logger()),
)
case anomalyV2.SeasonalityHourly:
return anomalyV2.NewHourlyProvider(
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](aH.Signoz.Querier),
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](aH.Signoz.Instrumentation.Logger()),
)
default:
return anomalyV2.NewDailyProvider(
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](aH.Signoz.Querier),
anomalyV2.WithLogger[*anomalyV2.DailyProvider](aH.Signoz.Instrumentation.Logger()),
)
}
}
func (aH *APIHandler) handleAnomalyQuery(ctx context.Context, orgID valuer.UUID, anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation], queryRangeRequest qbtypes.QueryRangeRequest) (*anomalyV2.AnomaliesResponse, error) {
seasonality := extractSeasonality(anomalyQuery)
provider := createAnomalyProvider(aH, seasonality)
return provider.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{Params: queryRangeRequest})
}
func (aH *APIHandler) queryRangeV5(rw http.ResponseWriter, req *http.Request) {
bodyBytes, err := io.ReadAll(req.Body)
if err != nil {
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to read request body: %v", err))
return
}
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
ctx := req.Context()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
var queryRangeRequest qbtypes.QueryRangeRequest
if err := json.NewDecoder(req.Body).Decode(&queryRangeRequest); err != nil {
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to decode request body: %v", err))
return
}
defer func() {
if r := recover(); r != nil {
stackTrace := string(debug.Stack())
queryJSON, _ := json.Marshal(queryRangeRequest)
aH.Signoz.Instrumentation.Logger().ErrorContext(ctx, "panic in QueryRange",
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(
errors.CodeInternal,
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
))
}
}()
if err := queryRangeRequest.Validate(); err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, err)
return
}
if anomalyQuery, ok := queryRangeRequest.IsAnomalyRequest(); ok {
anomalies, err := aH.handleAnomalyQuery(ctx, orgID, anomalyQuery, queryRangeRequest)
if err != nil {
render.Error(rw, errors.NewInternalf(errors.CodeInternal, "failed to get anomalies: %v", err))
return
}
results := []any{}
for _, item := range anomalies.Results {
results = append(results, item)
}
finalResp := &qbtypes.QueryRangeResponse{
Type: queryRangeRequest.RequestType,
Data: struct {
Results []any `json:"results"`
}{
Results: results,
},
Meta: struct {
RowsScanned uint64 `json:"rowsScanned"`
BytesScanned uint64 `json:"bytesScanned"`
DurationMS uint64 `json:"durationMs"`
}{},
}
render.Success(rw, http.StatusOK, finalResp)
return
} else {
// regular query range request, let the querier handle it
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
aH.QuerierAPI.QueryRange(rw, req)
}
}

View File

@@ -1,33 +0,0 @@
package api
import (
"net/http"
"github.com/SigNoz/signoz/ee/query-service/app/db"
"github.com/SigNoz/signoz/ee/query-service/model"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
ah.APIHandler.SearchTraces(w, r)
return
}
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
return
}
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
if ah.HandleError(w, err, http.StatusBadRequest) {
return
}
ah.WriteJSON(w, r, result)
}

View File

@@ -1,40 +0,0 @@
package db
import (
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/jmoiron/sqlx"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/prometheus"
basechr "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/telemetrystore"
)
type ClickhouseReader struct {
conn clickhouse.Conn
appdb *sqlx.DB
*basechr.ClickHouseReader
}
func NewDataConnector(
localDB *sqlx.DB,
telemetryStore telemetrystore.TelemetryStore,
prometheus prometheus.Prometheus,
lm interfaces.FeatureLookup,
cluster string,
useLogsNewSchema bool,
useTraceNewSchema bool,
fluxIntervalForTraceDetail time.Duration,
cache cache.Cache,
) *ClickhouseReader {
chReader := basechr.NewReader(localDB, telemetryStore, prometheus, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
return &ClickhouseReader{
conn: telemetryStore.ClickhouseDB(),
appdb: localDB,
ClickHouseReader: chReader,
}
}

View File

@@ -2,189 +2,112 @@ package app
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
_ "net/http/pprof" // http profiler
"time"
"github.com/gorilla/handlers"
"github.com/jmoiron/sqlx"
eemiddleware "github.com/SigNoz/signoz/ee/http/middleware"
"github.com/SigNoz/signoz/ee/query-service/app/api"
"github.com/SigNoz/signoz/ee/query-service/app/db"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/dao"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/rules"
"github.com/SigNoz/signoz/ee/query-service/usage"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/http/middleware"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/query-service/auth"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/web"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
licensepkg "github.com/SigNoz/signoz/ee/query-service/license"
"github.com/SigNoz/signoz/ee/query-service/usage"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
baseexplorer "github.com/SigNoz/signoz/pkg/query-service/app/explorer"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
opAmpModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/query-service/app/preferences"
"github.com/SigNoz/signoz/pkg/query-service/cache"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
const AppDbEngine = "sqlite"
type ServerOptions struct {
Config signoz.Config
SigNoz *signoz.SigNoz
PromConfigPath string
SkipTopLvlOpsPath string
HTTPHostPort string
PrivateHostPort string
// alert specific params
DisableRules bool
RuleRepoURL string
Config signoz.Config
SigNoz *signoz.SigNoz
HTTPHostPort string
PrivateHostPort string
PreferSpanMetrics bool
CacheConfigPath string
FluxInterval string
FluxIntervalForTraceDetail string
Cluster string
GatewayUrl string
UseLogsNewSchema bool
UseTraceNewSchema bool
Jwt *authtypes.JWT
}
// Server runs HTTP api service
// Server runs HTTP, Mux and a grpc server
type Server struct {
serverOptions *ServerOptions
ruleManager *baserules.Manager
config signoz.Config
signoz *signoz.SigNoz
jwt *authtypes.JWT
ruleManager *baserules.Manager
// public http router
httpConn net.Listener
httpServer *http.Server
httpConn net.Listener
httpServer *http.Server
httpHostPort string
// private http
privateConn net.Listener
privateHTTP *http.Server
privateConn net.Listener
privateHTTP *http.Server
privateHostPort string
opampServer *opamp.Server
// Usage manager
usageManager *usage.Manager
opampServer *opamp.Server
unavailableChannel chan healthcheck.Status
}
// HealthCheckStatus returns health check status channel a client can subscribe to
func (s Server) HealthCheckStatus() chan healthcheck.Status {
return s.unavailableChannel
}
// NewServer creates and initializes Server
func NewServer(serverOptions *ServerOptions) (*Server, error) {
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore)
func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT) (*Server, error) {
gatewayProxy, err := gateway.NewProxy(config.Gateway.URL.String(), gateway.RoutePrefix)
if err != nil {
return nil, err
}
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore); err != nil {
return nil, err
}
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore); err != nil {
return nil, err
}
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
if err != nil {
return nil, err
}
// initiate license manager
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore)
if err != nil {
return nil, err
}
// set license manager as feature flag provider in dao
modelDao.SetFlagProvider(lm)
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
if err != nil {
return nil, err
}
reader := db.NewDataConnector(
serverOptions.SigNoz.SQLStore.SQLxDB(),
serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus,
lm,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
fluxIntervalForTraceDetail,
serverOptions.SigNoz.Cache,
reader := clickhouseReader.NewReader(
signoz.SQLStore,
signoz.TelemetryStore,
signoz.Prometheus,
signoz.TelemetryStore.Cluster(),
config.Querier.FluxInterval,
signoz.Cache,
)
skipConfig := &basemodel.SkipConfig{}
if serverOptions.SkipTopLvlOpsPath != "" {
// read skip config
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
if err != nil {
return nil, err
}
}
var c cache.Cache
if serverOptions.CacheConfigPath != "" {
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
if err != nil {
return nil, err
}
c = cache.NewCache(cacheOpts)
}
rm, err := makeRulesManager(
serverOptions.RuleRepoURL,
serverOptions.SigNoz.SQLStore.SQLxDB(),
reader,
c,
serverOptions.DisableRules,
lm,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
serverOptions.SigNoz.Alertmanager,
serverOptions.SigNoz.SQLStore,
serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus,
signoz.Cache,
signoz.Alertmanager,
signoz.SQLStore,
signoz.TelemetryStore,
signoz.Prometheus,
signoz.Modules.OrgGetter,
signoz.Querier,
signoz.Instrumentation.Logger(),
)
if err != nil {
@@ -192,19 +115,16 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
// initiate opamp
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
opAmpModel.Init(signoz.SQLStore, signoz.Instrumentation.Logger(), signoz.Modules.OrgGetter)
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore)
integrationsController, err := integrations.NewController(signoz.SQLStore)
if err != nil {
return nil, fmt.Errorf(
"couldn't create integrations controller: %w", err,
)
}
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore)
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
if err != nil {
return nil, fmt.Errorf(
"couldn't create cloud provider integrations controller: %w", err,
@@ -213,7 +133,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
// ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
serverOptions.SigNoz.SQLStore, integrationsController.GetPipelinesForInstalledIntegrations,
signoz.SQLStore,
integrationsController.GetPipelinesForInstalledIntegrations,
)
if err != nil {
return nil, err
@@ -221,7 +142,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
// initiate agent config handler
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
Store: signoz.SQLStore,
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
})
if err != nil {
@@ -229,59 +150,45 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
// start the usagemanager
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickhouseDB(), serverOptions.Config.TelemetryStore.Clickhouse.DSN)
usageManager, err := usage.New(signoz.Licensing, signoz.TelemetryStore.ClickhouseDB(), signoz.Zeus, signoz.Modules.OrgGetter)
if err != nil {
return nil, err
}
err = usageManager.Start()
if err != nil {
return nil, err
}
telemetry.GetInstance().SetReader(reader)
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
err = usageManager.Start(context.Background())
if err != nil {
return nil, err
}
apiOpts := api.APIHandlerOptions{
DataConnector: reader,
SkipConfig: skipConfig,
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
AppDao: modelDao,
RulesManager: rm,
UsageManager: usageManager,
FeatureFlags: lm,
LicenseManager: lm,
IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval,
FluxInterval: config.Querier.FluxInterval,
Gateway: gatewayProxy,
GatewayUrl: serverOptions.GatewayUrl,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
JWT: serverOptions.Jwt,
GatewayUrl: config.Gateway.URL.String(),
JWT: jwt,
}
apiHandler, err := api.NewAPIHandler(apiOpts, serverOptions.SigNoz)
apiHandler, err := api.NewAPIHandler(apiOpts, signoz)
if err != nil {
return nil, err
}
s := &Server{
// logger: logger,
// tracer: tracer,
config: config,
signoz: signoz,
jwt: jwt,
ruleManager: rm,
serverOptions: serverOptions,
httpHostPort: baseconst.HTTPHostPort,
privateHostPort: baseconst.PrivateHostPort,
unavailableChannel: make(chan healthcheck.Status),
usageManager: usageManager,
}
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
httpServer, err := s.createPublicServer(apiHandler, signoz.Web)
if err != nil {
return nil, err
@@ -297,30 +204,28 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
s.privateHTTP = privateServer
s.opampServer = opamp.InitializeServer(
&opAmpModel.AllAgents, agentConfMgr,
&opAmpModel.AllAgents, agentConfMgr, signoz.Instrumentation,
)
errorList := reader.PreloadMetricsMetadata(context.Background())
for _, er := range errorList {
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
}
return s, nil
}
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
// HealthCheckStatus returns health check status channel a client can subscribe to
func (s Server) HealthCheckStatus() chan healthcheck.Status {
return s.unavailableChannel
}
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
r := baseapp.NewRouter()
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(eemiddleware.NewPat(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
s.serverOptions.Config.APIServer.Timeout.Max,
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap)
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
s.config.APIServer.Timeout.ExcludedRoutes,
s.config.APIServer.Timeout.Default,
s.config.APIServer.Timeout.Max,
).Wrap)
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
apiHandler.RegisterPrivateRoutes(r)
@@ -341,46 +246,32 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
}
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
r := baseapp.NewRouter()
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger())
// add auth middleware
getUserFromRequest := func(ctx context.Context) (*types.GettableUser, error) {
user, err := auth.GetUserFromReqContext(ctx)
if err != nil {
return nil, err
}
if user.User.OrgID == "" {
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
}
return user, nil
}
am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(eemiddleware.NewPat(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
s.serverOptions.Config.APIServer.Timeout.Max,
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap)
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
s.config.APIServer.Timeout.ExcludedRoutes,
s.config.APIServer.Timeout.Default,
s.config.APIServer.Timeout.Max,
).Wrap)
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
apiHandler.RegisterRoutes(r, am)
apiHandler.RegisterLogsRoutes(r, am)
apiHandler.RegisterIntegrationRoutes(r, am)
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
apiHandler.RegisterFieldsRoutes(r, am)
apiHandler.RegisterQueryRangeV3Routes(r, am)
apiHandler.RegisterInfraMetricsRoutes(r, am)
apiHandler.RegisterQueryRangeV4Routes(r, am)
apiHandler.RegisterQueryRangeV5Routes(r, am)
apiHandler.RegisterWebSocketPaths(r, am)
apiHandler.RegisterMessagingQueuesRoutes(r, am)
apiHandler.RegisterThirdPartyApiRoutes(r, am)
apiHandler.MetricExplorerRoutes(r, am)
apiHandler.RegisterTraceFunnelsRoutes(r, am)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
@@ -406,7 +297,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
func (s *Server) initListeners() error {
// listen on public port
var err error
publicHostPort := s.serverOptions.HTTPHostPort
publicHostPort := s.httpHostPort
if publicHostPort == "" {
return fmt.Errorf("baseconst.HTTPHostPort is required")
}
@@ -416,10 +307,10 @@ func (s *Server) initListeners() error {
return err
}
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.httpHostPort))
// listen on private port to support internal services
privateHostPort := s.serverOptions.PrivateHostPort
privateHostPort := s.privateHostPort
if privateHostPort == "" {
return fmt.Errorf("baseconst.PrivateHostPort is required")
@@ -429,20 +320,14 @@ func (s *Server) initListeners() error {
if err != nil {
return err
}
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.privateHostPort))
return nil
}
// Start listening on http and private http port concurrently
func (s *Server) Start() error {
// initiate rule manager first
if !s.serverOptions.DisableRules {
s.ruleManager.Start()
} else {
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
}
func (s *Server) Start(ctx context.Context) error {
s.ruleManager.Start(ctx)
err := s.initListeners()
if err != nil {
@@ -455,7 +340,7 @@ func (s *Server) Start() error {
}
go func() {
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.httpHostPort))
switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
@@ -481,7 +366,7 @@ func (s *Server) Start() error {
}
go func() {
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.privateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
@@ -507,15 +392,15 @@ func (s *Server) Start() error {
return nil
}
func (s *Server) Stop() error {
func (s *Server) Stop(ctx context.Context) error {
if s.httpServer != nil {
if err := s.httpServer.Shutdown(context.Background()); err != nil {
if err := s.httpServer.Shutdown(ctx); err != nil {
return err
}
}
if s.privateHTTP != nil {
if err := s.privateHTTP.Shutdown(context.Background()); err != nil {
if err := s.privateHTTP.Shutdown(ctx); err != nil {
return err
}
}
@@ -523,48 +408,42 @@ func (s *Server) Stop() error {
s.opampServer.Stop()
if s.ruleManager != nil {
s.ruleManager.Stop()
s.ruleManager.Stop(ctx)
}
// stop usage manager
s.usageManager.Stop()
s.usageManager.Stop(ctx)
return nil
}
func makeRulesManager(
ruleRepoURL string,
db *sqlx.DB,
ch baseint.Reader,
cache cache.Cache,
disableRules bool,
fm baseint.FeatureLookup,
useLogsNewSchema bool,
useTraceNewSchema bool,
alertmanager alertmanager.Alertmanager,
sqlstore sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
prometheus prometheus.Prometheus,
orgGetter organization.Getter,
querier querier.Querier,
logger *slog.Logger,
) (*baserules.Manager, error) {
// create manager opts
managerOpts := &baserules.ManagerOptions{
TelemetryStore: telemetryStore,
Prometheus: prometheus,
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
Querier: querier,
SLogger: logger,
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification,
Alertmanager: alertmanager,
SQLStore: sqlstore,
OrgGetter: orgGetter,
}
// create Manager

View File

@@ -33,3 +33,18 @@ func GetOrDefaultEnv(key string, fallback string) string {
func GetDefaultSiteURL() string {
return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
}
const DotMetricsEnabled = "DOT_METRICS_ENABLED"
var IsDotMetricsEnabled = false
var IsPreferSpanMetrics = false
func init() {
if GetOrDefaultEnv(DotMetricsEnabled, "false") == "true" {
IsDotMetricsEnabled = true
}
if GetOrDefaultEnv("USE_SPAN_METRICS", "false") == "true" {
IsPreferSpanMetrics = true
}
}

View File

@@ -1,10 +0,0 @@
package dao
import (
"github.com/SigNoz/signoz/ee/query-service/dao/sqlite"
"github.com/SigNoz/signoz/pkg/sqlstore"
)
func InitDao(sqlStore sqlstore.SQLStore) (ModelDao, error) {
return sqlite.InitDB(sqlStore)
}

View File

@@ -1,45 +0,0 @@
package dao
import (
"context"
"net/url"
"github.com/SigNoz/signoz/ee/types"
basedao "github.com/SigNoz/signoz/pkg/query-service/dao"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
ossTypes "github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type ModelDao interface {
basedao.ModelDao
// SetFlagProvider sets the feature lookup provider
SetFlagProvider(flags baseint.FeatureLookup)
DB() *bun.DB
// auth methods
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*types.GettableOrgDomain, error)
// org domain (auth domains) CRUD ops
ListDomains(ctx context.Context, orgId string) ([]types.GettableOrgDomain, basemodel.BaseApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*types.GettableOrgDomain, basemodel.BaseApiError)
CreateDomain(ctx context.Context, d *types.GettableOrgDomain) basemodel.BaseApiError
UpdateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
GetDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, basemodel.BaseApiError)
CreatePAT(ctx context.Context, orgID string, p types.GettablePAT) (types.GettablePAT, basemodel.BaseApiError)
UpdatePAT(ctx context.Context, orgID string, p types.GettablePAT, id string) basemodel.BaseApiError
GetPAT(ctx context.Context, pat string) (*types.GettablePAT, basemodel.BaseApiError)
GetPATByID(ctx context.Context, orgID string, id string) (*types.GettablePAT, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, orgID string, token string) (*ossTypes.GettableUser, basemodel.BaseApiError)
ListPATs(ctx context.Context, orgID string) ([]types.GettablePAT, basemodel.BaseApiError)
RevokePAT(ctx context.Context, orgID string, id string, userID string) basemodel.BaseApiError
}

View File

@@ -1,204 +0,0 @@
package sqlite
import (
"context"
"fmt"
"net/url"
"strings"
"time"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
baseauth "github.com/SigNoz/signoz/pkg/query-service/auth"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/google/uuid"
"go.uber.org/zap"
)
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*types.User, basemodel.BaseApiError) {
// get auth domain from email domain
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
zap.L().Error("failed to get domain from email", zap.Error(apierr))
return nil, model.InternalErrorStr("failed to get domain from email")
}
if domain == nil {
zap.L().Error("email domain does not match any authenticated domain", zap.String("email", email))
return nil, model.InternalErrorStr("email domain does not match any authenticated domain")
}
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
if err != nil {
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
return nil, model.InternalErrorStr("failed to generate password hash")
}
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
if apiErr != nil {
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
return nil, apiErr
}
user := &types.User{
ID: uuid.NewString(),
Name: "",
Email: email,
Password: hash,
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
},
ProfilePictureURL: "", // Currently unused
GroupID: group.ID,
OrgID: domain.OrgID,
}
user, apiErr = m.CreateUser(ctx, user, false)
if apiErr != nil {
zap.L().Error("CreateUser failed", zap.Error(apiErr))
return nil, apiErr
}
return user, nil
}
// PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError) {
userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() {
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
return "", model.BadRequestStr("invalid user email received from the auth provider")
}
user := &types.User{}
if userPayload == nil {
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
user = newUser
if apiErr != nil {
zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr))
return "", apiErr
}
} else {
user = &userPayload.User
}
tokenStore, err := baseauth.GenerateJWTForUser(user, jwt)
if err != nil {
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
return "", model.InternalErrorStr("failed to generate token for the user")
}
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
user.ID,
tokenStore.RefreshJwt), nil
}
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
return false, apierr
}
if domain != nil && domain.SsoEnabled {
// sso is enabled, check if the user has admin role
userPayload, baseapierr := m.GetUserByEmail(ctx, email)
if baseapierr != nil || userPayload == nil {
return false, baseapierr
}
if userPayload.Role != baseconst.AdminGroup {
return false, model.BadRequest(fmt.Errorf("auth method not supported"))
}
}
return true, nil
}
// PrecheckLogin is called when the login or signup page is loaded
// to check sso login is to be prompted
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
// assume user is valid unless proven otherwise
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
// check if email is a valid user
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
if baseApiErr != nil {
return resp, baseApiErr
}
if userPayload == nil {
resp.IsUser = false
}
ssoAvailable := true
err := m.checkFeature(model.SSO)
if err != nil {
switch err.(type) {
case basemodel.ErrFeatureUnavailable:
// do nothing, just skip sso
ssoAvailable = false
default:
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
return resp, model.BadRequestStr(err.Error())
}
}
if ssoAvailable {
resp.IsUser = true
// find domain from email
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
var emailDomain string
emailComponents := strings.Split(email, "@")
if len(emailComponents) > 0 {
emailDomain = emailComponents[1]
}
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
return resp, apierr
}
if orgDomain != nil && orgDomain.SsoEnabled {
// saml is enabled for this domain, lets prepare sso url
if sourceUrl == "" {
sourceUrl = constants.GetDefaultSiteURL()
}
// parse source url that generated the login request
var err error
escapedUrl, _ := url.QueryUnescape(sourceUrl)
siteUrl, err := url.Parse(escapedUrl)
if err != nil {
zap.L().Error("failed to parse referer", zap.Error(err))
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
}
// build Idp URL that will authenticat the user
// the front-end will redirect user to this url
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
if err != nil {
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
return resp, model.InternalError(err)
}
// set SSO to true, as the url is generated correctly
resp.SSO = true
}
}
return resp, nil
}

View File

@@ -1,272 +0,0 @@
package sqlite
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
ossTypes "github.com/SigNoz/signoz/pkg/types"
"github.com/google/uuid"
"go.uber.org/zap"
)
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
// user domain. The domain is further used to process validity of the response.
// when sending login request to IdP we send relay state as URL (site url)
// with domainId or domainName as query parameter.
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*types.GettableOrgDomain, error) {
// derive domain id from relay state now
var domainIdStr string
var domainNameStr string
var domain *types.GettableOrgDomain
for k, v := range relayState.Query() {
if k == "domainId" && len(v) > 0 {
domainIdStr = strings.Replace(v[0], ":", "-", -1)
}
if k == "domainName" && len(v) > 0 {
domainNameStr = v[0]
}
}
if domainIdStr != "" {
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
zap.L().Error("failed to parse domainId from relay state", zap.Error(err))
return nil, fmt.Errorf("failed to parse domainId from IdP response")
}
domain, err = m.GetDomain(ctx, domainId)
if (err != nil) || domain == nil {
zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err))
return nil, fmt.Errorf("invalid credentials")
}
}
if domainNameStr != "" {
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
domain = domainFromDB
if (err != nil) || domain == nil {
zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err))
return nil, fmt.Errorf("invalid credentials")
}
}
if domain != nil {
return domain, nil
}
return nil, fmt.Errorf("failed to find domain received in IdP response")
}
// GetDomainByName returns org domain for a given domain name
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*types.GettableOrgDomain, basemodel.BaseApiError) {
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("name = ?", name).
Limit(1).
Scan(ctx)
if err != nil {
if err == sql.ErrNoRows {
return nil, model.BadRequest(fmt.Errorf("invalid domain name"))
}
return nil, model.InternalError(err)
}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}
return domain, nil
}
// GetDomain returns org domain for a given domain id
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*types.GettableOrgDomain, basemodel.BaseApiError) {
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("id = ?", id).
Limit(1).
Scan(ctx)
if err != nil {
if err == sql.ErrNoRows {
return nil, model.BadRequest(fmt.Errorf("invalid domain id"))
}
return nil, model.InternalError(err)
}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}
return domain, nil
}
// ListDomains gets the list of auth domains by org id
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]types.GettableOrgDomain, basemodel.BaseApiError) {
domains := []types.GettableOrgDomain{}
stored := []types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("org_id = ?", orgId).
Scan(ctx)
if err != nil {
if err == sql.ErrNoRows {
return domains, nil
}
return nil, model.InternalError(err)
}
for _, s := range stored {
domain := types.GettableOrgDomain{StorableOrgDomain: s}
if err := domain.LoadConfig(s.Data); err != nil {
zap.L().Error("ListDomains() failed", zap.Error(err))
}
domains = append(domains, domain)
}
return domains, nil
}
// CreateDomain creates a new auth domain
func (m *modelDao) CreateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError {
if domain.ID == uuid.Nil {
domain.ID = uuid.New()
}
if domain.OrgID == "" || domain.Name == "" {
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgID, Name "))
}
configJson, err := json.Marshal(domain)
if err != nil {
zap.L().Error("failed to unmarshal domain config", zap.Error(err))
return model.InternalError(fmt.Errorf("domain creation failed"))
}
storableDomain := types.StorableOrgDomain{
ID: domain.ID,
Name: domain.Name,
OrgID: domain.OrgID,
Data: string(configJson),
TimeAuditable: ossTypes.TimeAuditable{CreatedAt: time.Now(), UpdatedAt: time.Now()},
}
_, err = m.DB().NewInsert().
Model(&storableDomain).
Exec(ctx)
if err != nil {
zap.L().Error("failed to insert domain in db", zap.Error(err))
return model.InternalError(fmt.Errorf("domain creation failed"))
}
return nil
}
// UpdateDomain updates stored config params for a domain
func (m *modelDao) UpdateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError {
if domain.ID == uuid.Nil {
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain update failed"))
}
configJson, err := json.Marshal(domain)
if err != nil {
zap.L().Error("domain update failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain update failed"))
}
storableDomain := &types.StorableOrgDomain{
ID: domain.ID,
Name: domain.Name,
OrgID: domain.OrgID,
Data: string(configJson),
TimeAuditable: ossTypes.TimeAuditable{UpdatedAt: time.Now()},
}
_, err = m.DB().NewUpdate().
Model(storableDomain).
Column("data", "updated_at").
WherePK().
Exec(ctx)
if err != nil {
zap.L().Error("domain update failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain update failed"))
}
return nil
}
// DeleteDomain deletes an org domain
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
if id == uuid.Nil {
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain delete failed"))
}
storableDomain := &types.StorableOrgDomain{ID: id}
_, err := m.DB().NewDelete().
Model(storableDomain).
WherePK().
Exec(ctx)
if err != nil {
zap.L().Error("domain delete failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain delete failed"))
}
return nil
}
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, basemodel.BaseApiError) {
if email == "" {
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
}
components := strings.Split(email, "@")
if len(components) < 2 {
return nil, model.BadRequest(fmt.Errorf("invalid email address"))
}
parsedDomain := components[1]
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("name = ?", parsedDomain).
Limit(1).
Scan(ctx)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, model.InternalError(err)
}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}
return domain, nil
}

View File

@@ -1,46 +0,0 @@
package sqlite
import (
"fmt"
basedao "github.com/SigNoz/signoz/pkg/query-service/dao"
basedsql "github.com/SigNoz/signoz/pkg/query-service/dao/sqlite"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
)
type modelDao struct {
*basedsql.ModelDaoSqlite
flags baseint.FeatureLookup
}
// SetFlagProvider sets the feature lookup provider
func (m *modelDao) SetFlagProvider(flags baseint.FeatureLookup) {
m.flags = flags
}
// CheckFeature confirms if a feature is available
func (m *modelDao) checkFeature(key string) error {
if m.flags == nil {
return fmt.Errorf("flag provider not set")
}
return m.flags.CheckFeature(key)
}
// InitDB creates and extends base model DB repository
func InitDB(sqlStore sqlstore.SQLStore) (*modelDao, error) {
dao, err := basedsql.InitDB(sqlStore)
if err != nil {
return nil, err
}
// set package variable so dependent base methods (e.g. AuthCache) will work
basedao.SetDB(dao)
m := &modelDao{ModelDaoSqlite: dao}
return m, nil
}
func (m *modelDao) DB() *bun.DB {
return m.ModelDaoSqlite.DB()
}

View File

@@ -1,220 +0,0 @@
package sqlite
import (
"context"
"fmt"
"time"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
ossTypes "github.com/SigNoz/signoz/pkg/types"
"go.uber.org/zap"
)
func (m *modelDao) CreatePAT(ctx context.Context, orgID string, p types.GettablePAT) (types.GettablePAT, basemodel.BaseApiError) {
p.StorablePersonalAccessToken.OrgID = orgID
_, err := m.DB().NewInsert().
Model(&p.StorablePersonalAccessToken).
Exec(ctx)
if err != nil {
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
return types.GettablePAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
}
createdByUser, _ := m.GetUser(ctx, p.UserID)
if createdByUser == nil {
p.CreatedByUser = types.PatUser{
NotFound: true,
}
} else {
p.CreatedByUser = types.PatUser{
User: ossTypes.User{
ID: createdByUser.ID,
Name: createdByUser.Name,
Email: createdByUser.Email,
TimeAuditable: ossTypes.TimeAuditable{
CreatedAt: createdByUser.CreatedAt,
UpdatedAt: createdByUser.UpdatedAt,
},
ProfilePictureURL: createdByUser.ProfilePictureURL,
},
NotFound: false,
}
}
return p, nil
}
func (m *modelDao) UpdatePAT(ctx context.Context, orgID string, p types.GettablePAT, id string) basemodel.BaseApiError {
_, err := m.DB().NewUpdate().
Model(&p.StorablePersonalAccessToken).
Column("role", "name", "updated_at", "updated_by_user_id").
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("revoked = false").
Exec(ctx)
if err != nil {
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT update failed"))
}
return nil
}
func (m *modelDao) ListPATs(ctx context.Context, orgID string) ([]types.GettablePAT, basemodel.BaseApiError) {
pats := []types.StorablePersonalAccessToken{}
if err := m.DB().NewSelect().
Model(&pats).
Where("revoked = false").
Where("org_id = ?", orgID).
Order("updated_at DESC").
Scan(ctx); err != nil {
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
}
patsWithUsers := []types.GettablePAT{}
for i := range pats {
patWithUser := types.GettablePAT{
StorablePersonalAccessToken: pats[i],
}
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
if createdByUser == nil {
patWithUser.CreatedByUser = types.PatUser{
NotFound: true,
}
} else {
patWithUser.CreatedByUser = types.PatUser{
User: ossTypes.User{
ID: createdByUser.ID,
Name: createdByUser.Name,
Email: createdByUser.Email,
TimeAuditable: ossTypes.TimeAuditable{
CreatedAt: createdByUser.CreatedAt,
UpdatedAt: createdByUser.UpdatedAt,
},
ProfilePictureURL: createdByUser.ProfilePictureURL,
},
NotFound: false,
}
}
updatedByUser, _ := m.GetUser(ctx, pats[i].UpdatedByUserID)
if updatedByUser == nil {
patWithUser.UpdatedByUser = types.PatUser{
NotFound: true,
}
} else {
patWithUser.UpdatedByUser = types.PatUser{
User: ossTypes.User{
ID: updatedByUser.ID,
Name: updatedByUser.Name,
Email: updatedByUser.Email,
TimeAuditable: ossTypes.TimeAuditable{
CreatedAt: updatedByUser.CreatedAt,
UpdatedAt: updatedByUser.UpdatedAt,
},
ProfilePictureURL: updatedByUser.ProfilePictureURL,
},
NotFound: false,
}
}
patsWithUsers = append(patsWithUsers, patWithUser)
}
return patsWithUsers, nil
}
func (m *modelDao) RevokePAT(ctx context.Context, orgID string, id string, userID string) basemodel.BaseApiError {
updatedAt := time.Now().Unix()
_, err := m.DB().NewUpdate().
Model(&types.StorablePersonalAccessToken{}).
Set("revoked = ?", true).
Set("updated_by_user_id = ?", userID).
Set("updated_at = ?", updatedAt).
Where("id = ?", id).
Where("org_id = ?", orgID).
Exec(ctx)
if err != nil {
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT revoke failed"))
}
return nil
}
func (m *modelDao) GetPAT(ctx context.Context, token string) (*types.GettablePAT, basemodel.BaseApiError) {
pats := []types.StorablePersonalAccessToken{}
if err := m.DB().NewSelect().
Model(&pats).
Where("token = ?", token).
Where("revoked = false").
Scan(ctx); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
}
if len(pats) != 1 {
return nil, &model.ApiError{
Typ: model.ErrorInternal,
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
}
}
patWithUser := types.GettablePAT{
StorablePersonalAccessToken: pats[0],
}
return &patWithUser, nil
}
func (m *modelDao) GetPATByID(ctx context.Context, orgID string, id string) (*types.GettablePAT, basemodel.BaseApiError) {
pats := []types.StorablePersonalAccessToken{}
if err := m.DB().NewSelect().
Model(&pats).
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("revoked = false").
Scan(ctx); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
}
if len(pats) != 1 {
return nil, &model.ApiError{
Typ: model.ErrorInternal,
Err: fmt.Errorf("found zero or multiple PATs with same token"),
}
}
patWithUser := types.GettablePAT{
StorablePersonalAccessToken: pats[0],
}
return &patWithUser, nil
}
// deprecated
func (m *modelDao) GetUserByPAT(ctx context.Context, orgID string, token string) (*ossTypes.GettableUser, basemodel.BaseApiError) {
users := []ossTypes.GettableUser{}
if err := m.DB().NewSelect().
Model(&users).
Column("u.id", "u.name", "u.email", "u.password", "u.created_at", "u.profile_picture_url", "u.org_id", "u.group_id").
Join("JOIN personal_access_tokens p ON u.id = p.user_id").
Where("p.token = ?", token).
Where("p.expires_at >= strftime('%s', 'now')").
Where("p.org_id = ?", orgID).
Scan(ctx); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
}
if len(users) != 1 {
return nil, &model.ApiError{
Typ: model.ErrorInternal,
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
}
}
return &users[0], nil
}

View File

@@ -1,16 +0,0 @@
package signozio
type status string
type ValidateLicenseResponse struct {
Status status `json:"status"`
Data map[string]interface{} `json:"data"`
}
type CheckoutSessionRedirect struct {
RedirectURL string `json:"url"`
}
type CheckoutResponse struct {
Status status `json:"status"`
Data CheckoutSessionRedirect `json:"data"`
}

View File

@@ -1,223 +0,0 @@
package signozio
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
)
var C *Client
const (
POST = "POST"
APPLICATION_JSON = "application/json"
)
type Client struct {
Prefix string
GatewayUrl string
}
func New() *Client {
return &Client{
Prefix: constants.LicenseSignozIo,
GatewayUrl: constants.ZeusURL,
}
}
func init() {
C = New()
}
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
// Creating an HTTP client with a timeout for better control
client := &http.Client{
Timeout: 10 * time.Second,
}
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to create request"))
}
// Setting the custom header
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
response, err := client.Do(req)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to make post request"))
}
body, err := io.ReadAll(response.Body)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl)))
}
defer response.Body.Close()
switch response.StatusCode {
case 200:
a := ValidateLicenseResponse{}
err = json.Unmarshal(body, &a)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
}
license, err := model.NewLicenseV3(a.Data)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3"))
}
return license, nil
case 400:
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
case 401:
return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
default:
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
}
}
func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, POST, url, body)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", contentType)
return req, err
}
// SendUsage reports the usage of signoz to license server
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
reqString, _ := json.Marshal(usage)
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {
return model.BadRequest(errors.Wrap(err, "unable to create http request"))
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
}
body, err := io.ReadAll(res.Body)
if err != nil {
return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
}
defer res.Body.Close()
switch res.StatusCode {
case 200, 201:
return nil
case 400, 401:
return model.BadRequest(errors.Wrap(errors.New(string(body)),
"bad request error received from license.signoz.io"))
default:
return model.InternalError(errors.Wrap(errors.New(string(body)),
"internal error received from license.signoz.io"))
}
}
func CheckoutSession(ctx context.Context, checkoutRequest *model.CheckoutRequest, licenseKey string) (string, *model.ApiError) {
hClient := &http.Client{}
reqString, err := json.Marshal(checkoutRequest)
if err != nil {
return "", model.BadRequest(err)
}
req, err := http.NewRequestWithContext(ctx, "POST", C.GatewayUrl+"/v2/subscriptions/me/sessions/checkout", bytes.NewBuffer(reqString))
if err != nil {
return "", model.BadRequest(err)
}
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
response, err := hClient.Do(req)
if err != nil {
return "", model.BadRequest(err)
}
body, err := io.ReadAll(response.Body)
if err != nil {
return "", model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read checkout response from %v", C.GatewayUrl)))
}
defer response.Body.Close()
switch response.StatusCode {
case 201:
a := CheckoutResponse{}
err = json.Unmarshal(body, &a)
if err != nil {
return "", model.BadRequest(errors.Wrap(err, "failed to unmarshal zeus checkout response"))
}
return a.Data.RedirectURL, nil
case 400:
return "", model.BadRequest(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
case 401:
return "", model.Unauthorized(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
default:
return "", model.InternalError(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
}
}
func PortalSession(ctx context.Context, checkoutRequest *model.PortalRequest, licenseKey string) (string, *model.ApiError) {
hClient := &http.Client{}
reqString, err := json.Marshal(checkoutRequest)
if err != nil {
return "", model.BadRequest(err)
}
req, err := http.NewRequestWithContext(ctx, "POST", C.GatewayUrl+"/v2/subscriptions/me/sessions/portal", bytes.NewBuffer(reqString))
if err != nil {
return "", model.BadRequest(err)
}
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
response, err := hClient.Do(req)
if err != nil {
return "", model.BadRequest(err)
}
body, err := io.ReadAll(response.Body)
if err != nil {
return "", model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read portal response from %v", C.GatewayUrl)))
}
defer response.Body.Close()
switch response.StatusCode {
case 201:
a := CheckoutResponse{}
err = json.Unmarshal(body, &a)
if err != nil {
return "", model.BadRequest(errors.Wrap(err, "failed to unmarshal zeus portal response"))
}
return a.Data.RedirectURL, nil
case 400:
return "", model.BadRequest(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
case 401:
return "", model.Unauthorized(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
default:
return "", model.InternalError(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
}
}

View File

@@ -1,11 +0,0 @@
package interfaces
import (
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
)
// Connector defines methods for interaction
// with o11y data. for example - clickhouse
type DataConnector interface {
baseint.Reader
}

Some files were not shown because too many files have changed in this diff Show More