Compare commits

...

306 Commits

Author SHA1 Message Date
primus-bot[bot]
9f6419c2f8 chore(release): bump to v0.65.0 (#6725)
#### Summary
 - Release SigNoz v0.65.0

 Created by [Primus-Bot](https://github.com/apps/primus-bot)
2024-12-26 14:28:44 +00:00
Prashant Shahi
421879cf7a ci(releaser): update branch reference to use latest main (#6724)
### Summary

- update branch reference to use the latest main
2024-12-26 14:17:09 +00:00
Shaheer Kochai
00abadd429 fix: update API key expiration display logic in MultiIngestionSettings component (#6717)
- display 'No Expiry' for invalid or zero date expiration dates.
2024-12-26 19:10:53 +05:30
Prashant Shahi
14096f8d53 ci(releaser): github workflow for signoz releases (#6719)
### Summary

- github workflow for automated SigNoz releases

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-26 16:46:55 +05:30
Shaheer Kochai
d2aa1cf06e fix: fix the issue of saved view overriding query for logs and traces (#6678)
* fix: fix the issue of saved view overriding query for logs and traces for builder type query

* chore: refactored isDefaultQuery to use a function to extract relevant keys and use lodash's isEqual

* fix: add check for multiple queries in isDefaultQuery logic

* chore: moved extractRelevantKeys outside isDefaultQuery

* fix: fix the failing tests
2024-12-24 15:39:48 +00:00
Shaheer Kochai
838192cf5c fix(Traces Explorer): prevent duplicate API calls to query_range in traces explorer (#6677)
* fix(Traces Explorer): prevent duplicate API calls to query_range in traces explorer

* fix(QueryBuilder): fix the race condition causing duplicate triggering of initQueryBuilderData

* chore: address review comments

* fix: fix the failing tests

---------

Co-authored-by: Vikrant Gupta <vikrant.thomso@gmail.com>
2024-12-24 15:25:07 +00:00
Vikrant Gupta
5dfe245f2d chore: fix cross spawn vulnerability (#6709) 2024-12-24 19:07:20 +05:30
Vibhu Pandey
53b86e4b5c feat(ee): serve frontend pages with query-service (#6696)
Serve frontend pages with query-service
2024-12-23 16:44:48 +05:30
Vikrant Gupta
5d9a2571df feat: add k8s pod name as quick filter for logs (#6694) 2024-12-21 13:19:20 +05:30
Vikrant Gupta
bef6cc945a fix: do not try to route to org onboarding when workspace locked or suspended (#6692) 2024-12-20 22:28:53 +05:30
Vikrant Gupta
2c2e248c95 fix: add support route to the base route for private routes (#6688)
* fix: add support route to the base route for private routes

* fix: early return from useEffect if it's an old route

* fix: same old explorer routes in old mapping
2024-12-20 17:37:44 +05:30
Yunus M
2f62a9d36d feat: rename access tokens to api keys (#6687)
* feat: rename access tokens to api keys

* feat: update test cases

* feat: update delete token to delete key
2024-12-20 17:28:43 +05:30
Vikrant Gupta
04778b9641 fix: route permissions for billing and alerts (#6686) 2024-12-20 15:26:47 +05:30
Vikrant Gupta
26fe5e49e7 chore: revamp the frontend architecture (#6598)
* feat: setup the app context to fetch users,licenses and feature flags

* feat: added global event listeners for after_login event

* feat: remove redux from app state and private route

* feat: syncronize the approutes file

* feat: cleanup the private routes

* feat: handle login and logout

* feat: cleanup the app layout file

* feat: cleanup and syncronize side nav item

* fix: minor small re-render issue

* feat: parallel processing for sync calls for faster bootup of application

* feat: some refactoring for private routes

* fix: entire application too much re-rendering

* fix: remove redux

* feat: some more corrections

* feat: fix all the files except signup

* feat: add app provider to the test-utils

* feat: should fix a lot of tests

* chore: fix more tests

* chore: fix more tests

* feat: fix some tests and corrected the redux mock

* feat: delete snapshot

* fix: test cases

* fix: pipeline actions test cases

* fix: billing test cases

* feat: update the signup API to accept isAnonymous and hasOptedUpdates

* chore: cleanup the console logs

* fix: indefinite loading on manage licenses screen

* fix: better handling and route to something_went_wrong in case of qs down

* fix: signup for subsequent users

* chore: update test-utils

* fix: jerky behaviour on entering the home page

* feat: handle the retention for login context flow

* fix: do not let users workaround workspace blocked screen
2024-12-20 14:00:02 +05:30
Shaheer Kochai
accafbc3ec fix: fix the mismatch between time range picker placeholder and timerange popover values (#6675)
* fix: fix the mismatch between time range picker placeholder and timerange popover values

* fix: fix the stale value issue in range picker

---------

Co-authored-by: Vikrant Gupta <vikrant.thomso@gmail.com>
2024-12-20 13:35:40 +05:30
Vikrant Gupta
8e7c78e1b1 fix: detach the log indicator from timestamp column (#6681) 2024-12-20 13:25:52 +05:30
Shaheer Kochai
53ebd39f41 chore: add log events for timezone interactions in date/time picker and timezone adaptation (#6676)
* chore: add log events for timezone interactions in date/time picker and timezone adaptation

* refactor: modified the log event messages for timezone picker to follow the conventions

* chore: improve timezone picker event messages
2024-12-20 11:38:36 +04:30
Srikanth Chekuri
b36ef944cc chore: remove data migration (#6683) 2024-12-20 10:52:36 +07:00
Srikanth Chekuri
fa90fad373 chore: add pvcs list (#6654) 2024-12-19 12:01:12 +00:00
Srikanth Chekuri
77420b9d3a chore: address some gaps in k8s monitoring (#6653) 2024-12-19 17:22:39 +05:30
Prashant Shahi
cecc57e72d Merge pull request #6668 from SigNoz/chore/deprecate-develop
chore: develop deprecation and related changes
2024-12-19 13:48:29 +05:30
Prashant Shahi
512adc6471 Merge branch 'main' into chore/deprecate-develop 2024-12-19 13:35:27 +05:30
Prashant Shahi
42fefc65be chore: deprecate develop branch - use main
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-19 13:33:09 +05:30
Prashant Shahi
dcc659907a chore(signoz): pin versions: SigNoz 0.64.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-19 13:33:09 +05:30
Prashant Shahi
b90ed375c2 chore(signoz): pin versions: SigNoz 0.63.0, SigNoz OtelCollector 0.111.16
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-19 13:33:09 +05:30
Prashant Shahi
a8a3bd3f7d chore(signoz): pin versions: SigNoz 0.62.0, SigNoz OtelCollector 0.111.15
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-19 13:33:09 +05:30
SagarRajput-7
7405bfbbee feat: changed start and end time logic for consumer lag details (#6605) 2024-12-19 13:01:13 +05:30
Nityananda Gohain
67e822e23e feat: api for trace materialization (#6646)
* feat: api for trace materialization

* fix: minor changes and cleanup

* fix: minor fixes

* fix: update errors

* fix: address comments

* fix: address comments
2024-12-19 11:52:20 +07:00
Shivanshu Raj Shrivastava
60dc479a19 fix: add bucketing (#6669)
Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2024-12-18 19:57:33 +05:30
Nityananda Gohain
85cf4f4e2e fix: use placehold in limit and use proper exists (#6667) 2024-12-18 21:07:31 +07:00
Shivanshu Raj Shrivastava
83aa48c721 update service.instance.id (#6665)
* nit: update resource id and revert the flag
2024-12-18 19:06:22 +05:30
Prashant Shahi
823f84f857 Merge pull request #6664 from SigNoz/release/v0.64.x
Release/v0.64.x
2024-12-18 18:29:05 +05:30
Prashant Shahi
8a4d45084d chore(signoz): pin versions: SigNoz 0.64.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-18 17:57:42 +05:30
Prashant Shahi
5bc6c33899 Merge branch 'main' into release/v0.64.x 2024-12-18 17:55:57 +05:30
Shivanshu Raj Shrivastava
83f6dea2db Add support for trace_v3 schema in messaging queues (#6663)
feat: support trace v3 queries
2024-12-18 17:04:01 +05:30
Nityananda Gohain
7031c866e8 fix: add flags for using trace new schema (#6651) 2024-12-18 17:55:22 +07:00
Prashant Shahi
46bc7c7a21 Merge pull request #6662 from SigNoz/release/v0.63.x
Release/v0.63.x
2024-12-18 15:41:24 +05:30
Prashant Shahi
6d9741c3a4 chore(signoz): pin versions: SigNoz 0.63.0, SigNoz OtelCollector 0.111.16
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-18 15:25:20 +05:30
Prashant Shahi
610a8ec704 Merge branch 'main' into release/v0.63.x 2024-12-18 15:07:57 +05:30
Raj Kamal Singh
cd9f27ab08 Fix: QS: logs pipelines: better validation of pipelines being saved (#6652)
* chore: add test validating invalid field paths in pipeline operators are rejected

* chore: refactor posted pipelines validation to use a controller method

* fix: run a collector simulation to validate pipeline config being saved

* chore: minor cleanup
2024-12-18 10:42:14 +05:30
Vibhu Pandey
14fbb1fcda fix(flag): add missing flag back (#6647) 2024-12-17 01:12:31 +05:30
Vikrant Gupta
96da21df05 fix: correlation time stamps for APM to traces and logs (#6632) 2024-12-17 00:00:15 +05:30
Vikrant Gupta
8608f02263 fix: timerange selected for traces to logs (#6634) 2024-12-16 22:35:25 +05:30
Vikrant Gupta
2701ae5c34 fix: unable to remove query tags from the beginning when similar tags present (#6645)
* fix: unable to remove query tags from the begining

* fix: focus only when the filters dropdown is opened

* fix: used auto focus prop
2024-12-16 22:25:37 +05:30
Vibhu Pandey
951593b0a3 feat(licenses): deprecate licenses v2 (#6626)
deprecate licenses v2
2024-12-16 10:23:23 +00:00
Srikanth Chekuri
e6766023dd chore: use tag attributes v2 table (#6616) 2024-12-16 09:59:16 +00:00
Srikanth Chekuri
bef5b96c5c chore: add queries with tag attributes v1 (#6643) 2024-12-16 13:04:55 +05:30
Shaheer Kochai
b29359dee0 fix: improve traces table ux by making the table scrollable (#6423)
* fix: improve traces table ux by making the table scrollable

* chore: remove explicit scroll.x and modify the existing scroll.x from true to max-content

* fix(Traces Explorer): make trace explorer card full-width when collapsed, resizable when expanded
2024-12-16 07:00:40 +00:00
Amlan Kumar Nandy
9a1cd65b73 feat: add functionality to export dashboard as json from listing page 2024-12-16 12:10:21 +05:30
Amlan Kumar Nandy
8ab0c066d6 Merge branch 'develop' into dashboard-list-export-json 2024-12-16 11:39:18 +05:30
Shaheer Kochai
b333aa3775 Feat: Timezone picker feature (#6474)
* feat: time picker hint and timezone picker UI with basic functionality + helper to get timezones

* feat: add support for esc keypress to close the timezone picker

* chore: add the selected timezone as url param and close timezone picker on select

* fix: overall improvement + add searchIndex to timezone

* feat: timezone preferences UI

* chore: improve timezone utils

* chore: change timezone item from div to button

* feat: display timezone in timepicker input

* chore: fix the typo

* fix: don't focus on time picker when timezone is clicked

* fix: fix the issue of timezone breaking for browser and utc timezones

* fix: display the timezone in timepicker hint 'You are at'

* feat: timezone basic functionality (#6492)

* chore: change div to fragment + change type to any as the ESLint complains otherwise

* chore: manage etc timezone filtering with an arg

* chore: update timezone wrapper class name

* fix: add timezone support to downloaded logs

* feat: add current timezone to dashboard list and configure metadata modal

* fix: add pencil icon next to timezone hint + change the copy to Current timezone

* fix: properly handle the escape button behavior for timezone picker

* chore: replace @vvo/tzdb with native Intl API for timezones

* feat: lightmode for timezone picker and timezone adaptation components

* fix: use normald tz in browser timezone

* fix: timezone picker lightmode fixes

* feat: display selected time range in 12 hour format

* chore: remove unnecessary optional chaining

* fix: fix the typo in css variable

* chore: add em dash and change icon for timezone hint in date/time picker

* chore: move pen line icon to the right of timezone offset

* fix: fix the failing tests

* feat: handle switching off the timezone adaptation
2024-12-16 11:27:20 +05:30
amlannandy
8a3319cdf5 chore: address comments 2024-12-16 11:24:57 +05:30
amlannandy
d09c4d947e feat: update unit test 2024-12-16 11:24:57 +05:30
amlannandy
2508e6f9f1 feat: update unit test 2024-12-16 11:24:57 +05:30
amlannandy
1b8213653a feat: update tests 2024-12-16 11:24:57 +05:30
amlannandy
b499b10333 feat: add unit test 2024-12-16 11:24:57 +05:30
amlannandy
b35b975798 chore: address comments 2024-12-16 11:24:57 +05:30
amlannandy
715f8a2363 feat: address comments 2024-12-16 11:24:57 +05:30
amlannandy
8d1c4491b7 feat: add functionality to export dashboard as json from listing page 2024-12-16 11:24:57 +05:30
dependabot[bot]
e3caa6a8f5 chore(deps): bump golang.org/x/crypto from 0.27.0 to 0.31.0 (#6638)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.27.0 to 0.31.0.
- [Commits](https://github.com/golang/crypto/compare/v0.27.0...v0.31.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-14 04:29:25 +00:00
Vibhu Pandey
a1059ed949 feat(signoz-ingestion-key): rename signoz-access-token to signoz-ingestion-key (#6633) 2024-12-13 18:18:12 +05:30
SagarRajput-7
8c46de8eac feat: added table column and row logic for the new api response structure for producer overview (#6433)
* feat: added table column and row logic for the new api response structure for prodcure overview

* feat: fixed typo in function name

* feat: consumed new 2 table merging logic for producer latency overview

* feat: added 3 digit precision to 'ingestion_rate' and 'byte_rate'in consumer overview
2024-12-13 11:25:39 +05:30
Prashant Shahi
2b5a0ec496 Merge pull request #6625 from SigNoz/release/v0.62.x
Release/v0.62.x
2024-12-12 21:02:17 +05:30
Prashant Shahi
a9440c010c chore(signoz): pin versions: SigNoz 0.62.0, SigNoz OtelCollector 0.111.15
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-12 15:28:09 +05:30
Prashant Shahi
f9e7eff357 Merge branch 'main' into release/v0.62.x 2024-12-12 15:22:47 +05:30
Nityananda Gohain
0fbfb6b22b fix: fix count aggregate attribute column name (#6613)
* fix: fix count aggregate attribute column name

* fix: add test for column as well

* fix: use new schema in threshold rule enrichment
2024-12-11 13:37:25 +05:30
Nityananda Gohain
b25df66381 fix: migration for ingestion dashboard (#6610)
* fix: migration for ingestion dashboard

* fix: remove redundant uuid

* Update pkg/query-service/app/dashboards/model.go

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-12-10 18:49:21 +05:30
Nityananda Gohain
32fa5a403c fix: update default alert ch queries for traces and logs (#6552)
* fix: update default alert ch queries for traces and logs

* fix: use service.name instead of peer.service

* fix: add link to docs

* fix: add alias for service name
2024-12-10 18:36:02 +05:30
Nityananda Gohain
f9d4cf19e9 fix: smartTraceAlgo for new schema (#6602) 2024-12-10 13:43:51 +05:30
Amlan Kumar Nandy
81775c7d55 fix: resolve unnecessary refetching of graphs on service details page 2024-12-10 12:00:55 +05:30
amlannandy
8d2666004b fix: resolve the unneccessary refetching of graphs on service details page 2024-12-10 11:39:26 +05:30
Shivanshu Raj Shrivastava
51baf7f8d3 feat: add byte rate to producer API (#6579)
* fix: fix partition check for topic throughput
2024-12-09 15:18:41 +00:00
Amlan Kumar Nandy
31a2926375 chore: replace the 'Get started' CTA with 'New source' 2024-12-09 14:34:18 +05:30
amlannandy
8c6225185d chore: replace the 'Get started' CTA with 'New source' 2024-12-09 14:00:41 +05:30
deepakdinesh1123
d4458d65ad fix(docker): otel-collector-migrator command fixed in docker-compose-core.yaml (#6564)
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-12-07 20:15:07 +05:30
Nityananda Gohain
02d8fdb212 fix: enable smart trace detail (#6596) 2024-12-05 09:30:39 +05:30
Prashant Shahi
47d8c9e3e7 Merge pull request #6593 from SigNoz/release-sync/v0.61.x
Release Sync/v0.61.x
2024-12-04 21:28:47 +05:30
Prashant Shahi
a383c708e3 Merge pull request #6591 from SigNoz/release/v0.61.x
Release/v0.61.x
2024-12-04 20:49:18 +05:30
Prashant Shahi
99367be850 chore(signoz): pin versions: SigNoz 0.61.0, SigNoz OtelCollector 0.111.14
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-04 16:21:39 +05:30
Prashant Shahi
73bcc2af46 Merge branch 'main' into release/v0.61.x 2024-12-04 16:17:00 +05:30
Vikrant Gupta
43f856c41b fix: make the entire row clickable in logs quick filters (#6574) 2024-12-03 13:06:07 +05:30
Vikrant Gupta
6384b25af3 feat: show failed payment banner and block the workspace integration (#6560)
* feat: added new API endpoint for fetching the active license

* feat: add setup for apis on frontend

* feat: frontend infrastructure changes for app context and workspace suspended

* feat: added workspace suspended component

* feat: send back to application if workspace is not suspended

* feat: added the missing creative

* chore: only move to suspended state when state is payment_failed

* chore: address review comments

* fix: tab naming
2024-12-02 19:58:38 +05:30
Yunus M
507c0600cd chore: remove infra monitoring ff (#6571) 2024-12-02 17:36:37 +05:30
Yunus M
3d092ec2ae chore: update sentry react to v8 (#6569) 2024-12-02 14:21:35 +05:30
SagarRajput-7
2b8a610a07 fix: fixed typo in metric view dashboard title (#6557) 2024-11-29 11:27:54 +05:30
Nityananda Gohain
f7f8bf1867 fix: add dashbard names and update logs count in telemetry (#6553) 2024-11-28 20:46:06 +05:30
Vikrant Gupta
813cd845f4 fix: explicit mouse move event user action (#6549)
* fix: explicit mouse move event user action

* fix: handle next element selection on click

* fix: handle the last 0 case

* fix: handle search reset
2024-11-27 19:51:57 +05:30
Yunus M
6aee991633 feat: handle keyboard navigations for column selection in logs explor… (#6548)
* feat: handle keyboard navigations for column selection in logs explorer options view

* chore: remove console log
2024-11-27 13:04:23 +00:00
Yunus M
2bfd31841e feat: improve empty hosts, incorrect metrics and no filter views (#6530)
* feat: improve empty hosts, incorrect metrics and no filter views

* feat: add infra monitoring - host lists - usage events (#6536)

* feat: add usasge events

* feat: add reqrest early access events

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-11-27 16:00:08 +05:30
Prashant Shahi
a320a16556 Merge pull request #6546 from SigNoz/release/v0.60.x
Release/v0.60.x
2024-11-27 15:54:49 +05:30
Prashant Shahi
7cd8442e6e chore(signoz): pin versions: SigNoz 0.60.0, SigNoz OtelCollector 0.111.13
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-11-27 15:13:08 +05:30
Raj Kamal Singh
486632b64e Chore/qs use collector simulator from signoz otel collector (#6539)
* chore: qs: logs pipeline preview: use collectorsimulator from signoz-otel-collector

* chore: qs: remove collectorsimulator: located in signoz-otel-collector now
2024-11-27 11:53:39 +05:30
Philipp
328d955a74 Fix for running signoz-schema-migrator under docker-swarm (#6489) 2024-11-27 03:15:32 +00:00
Nityananda Gohain
a3e57a1829 fix: add telemetry for trace migration (#6537) 2024-11-26 20:57:36 +05:30
Vikrant Gupta
24ab18d988 chore: move away from parallel usages of licenses v2 and v3 (#6527)
* chore: move all the usages of license v2 to license v3

* chore: added log lines for debugging
2024-11-25 23:51:45 +05:30
Srikanth Chekuri
2e4956c2f7 chore: add additional info for host metrics onboarding (#6529) 2024-11-25 23:23:42 +05:30
Nityananda Gohain
b85f7921f4 fix: update api's to repect the new changes in attributes table (#6526) 2024-11-25 17:05:43 +05:30
Nityananda Gohain
0c2a15d86f fix: add settings to allow distributed_product_mode for trace panel (#6517)
* fix: add settings to allow distributed_product_mode for trace panel

* fix: tests fixed

* fix: add 10gb memory limit

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-11-22 22:48:35 +05:30
Nityananda Gohain
afbba1ed44 fix: add v2 for getServices and GetTopOperations (#6516)
* fix: add v2 for getServices and GetTopOperations

* fix: add comments

* fix: update logic for filters

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-11-22 21:57:25 +05:30
Yunus M
20f748f9c4 chore: update http proxy middleware (#6499) 2024-11-22 14:21:55 +00:00
Vikrant Gupta
96b5e0920f fix: decrease the pad angle and remove the empty legend entries (#6507)
* fix: pie chart should not have padding between adjacent items

* fix: remove the labels which do not have any data
2024-11-22 11:06:47 +00:00
Srikanth Chekuri
7fe4f8cc56 chore: remove the fancy text while fetching data (#6505) 2024-11-22 10:50:25 +00:00
Srikanth Chekuri
ed6abe5a95 chore: enable sorting for hosts list (#6502) 2024-11-22 10:37:42 +00:00
SagarRajput-7
a6968d452c fix: updated folder name and structure (#6488) 2024-11-22 14:05:22 +05:30
SagarRajput-7
0c5db1937e fix: fixed back navigation issue for dashboard details coming from dashboard list table (#6496) 2024-11-22 13:49:02 +05:30
Nityananda Gohain
67058b2a17 feat: trace v4 integration (#6226)
* feat: trace v4 inital commit

* fix: add remaining files

* fix: integrate with querier

* fix: get trace by id api updated

* fix: add servicename resource filter

* fix: tests

* fix: use correct prepQUery

* fix: services page

* fix: minor fixes to use the new table in api's and querier

* fix: add support for window based pagination

* feat: support for faster trace detail

* fix: searchTraces

* fix: attribute enrichment updated and issue in group by

* fix: issues in group by

* fix: enrichment using alias

* fix: test file added

* fix: tests

* fix: group by with filters

* fix: add subquery

* fix: trigger builde

* fix: update pagination logic and few ch column names

* fix: update qb

* fix: add tests

* feat: minor fixes

* fix: update pagination logic

* fix: update pagination logic

* fix: remove utils

* fix: remove unwanted API's

* fix: attribute and attribute values v2

* fix: autocomplete api's updated

* fix: tests fixed

* feat: minor fixes

* fix: update telemetry functions

* fix: dont use alias, use proper col names

* fix: move models to it's own file

* fix: minor fixes

* fix: address comments

* fix: add to serviceoverview function

* fix: add changes to overview function

* fix: address comments

* fix: remove printlines

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-11-22 12:00:29 +05:30
Nityananda Gohain
e46d969143 feat: ttl api for new trace tables (#6497)
* feat: tt api for new trace tables

* fix: remove print and use correct context

* fix: update var name
2024-11-21 22:37:28 +05:30
Yunus M
e4505693b0 feat: added the host list view and filters (#6210) (#6501)
* feat: added the host list view and filters (#6210)

* feat: added the host list view and filters

* feat: removed group by filter and added autocomplete for where clause

* feat: updated the table view and added the pagination

* feat: pass updated filters to api to get filtered data in the list

* feat: added global time range and order by for cpu,memory,iowait,load

* feat: added order by and color codes for cpu and memory usage progress bar

* refactor: removed inline styles

* Host lists improvement (#6366)

* style: added new style changes for date time selection in host lists view

* style: added padding to date time selector

* style: removed unnecessary styles for host tabs

* style: removed unused css

* feat: added the host detail view (#6267)

* Host containers (#6297)

* feat: added the host detail view

* feat: completed containers and processes details view

* Show host metrics panels in metrics tab. (#6306)

* feat: added the host detail view

* feat: completed containers and processes details view

* feat: added host metrics panels in metrics tabs

* refactor: removed inline styles from host containers and processes tabs

* style: added top and bottom margin to containers and processes tab

* Metrics time selection (#6360)

* feat: added the host detail view

* feat: completed containers and processes details view

* feat: added host metrics panels in metrics tabs

* refactor: removed inline styles from host containers and processes tabs

* feat: added logs and traces tab in host metrics detail view

* chore: removed console statements

* feat: added DateTimeSelection component in metrics tab

* style: added top and bottom margin to containers and processes tab

* style: removed inline styles

* feat: added logs and traces tab in host metrics detail view (#6359)

* feat: added the host detail view

* feat: completed containers and processes details view

* feat: added host metrics panels in metrics tabs

* refactor: removed inline styles from host containers and processes tabs

* feat: added logs and traces tab in host metrics detail view

* chore: removed console statements

* feat: added filters and time selection in traces tab

* fix: resolved metrics,logs and traces tab issues

* feat: added navigation for logs and traces to respective explorer pages

* fix: added the code for logs tab and navigation to respective explorer page

* fix: added fixes for date time selection custom issue

* style: added styles for light mode

* refactor: removed unused code and added comments

* refactor: added new code for host metric attribute keys

* feat: reset query data once we are on infra monitoring page

* chore: remove optional parameter from get attributes and groupby interfaces

* feat: update ui as per the designs

* fix: logs list, time select and other ui issues

* feat: update title for infra monitoring page

* feat: update copies

* feat: update styles for light mode

* fix: reset page size on filter, open explorers in new tab, enable horizontal scroll

* feat: traces tab updates

* feat: move infra monitoring behind ff

* fix: remove sorting from host listing page

---------

Co-authored-by: Yunus M <myounis.ar@live.com>

* chore: fix lint errors

---------

Co-authored-by: rahulkeswani101 <rahul@signoz.io>
2024-11-21 21:53:05 +05:30
Prashant Shahi
2dad9a3093 Merge pull request #6498 from SigNoz/sync/post-release-v0.59
Sync/post release v0.59
2024-11-21 19:58:05 +05:30
Prashant Shahi
7b6bd83e9a Merge branch 'main' into sync/post-release-v0.59 2024-11-21 16:10:05 +05:30
Nityananda Gohain
d43adc24ef feat: update clickhouse reader to support new trace schema (#6479)
* feat: update clickhouse reader to support new trace schema

* fix: minor fixes

* fix: address comments

* fix: add changes to overview function

* fix: add changes to overview function

* fix: use hardcoded true

* fix: address comments
2024-11-20 23:35:44 +05:30
Nityananda Gohain
5044861773 fix: remove service overview API (#6495) 2024-11-20 22:51:29 +05:30
Prashant Shahi
71d1e12be7 Merge pull request #6490 from SigNoz/release/v0.59.x
Release/v0.59.x
2024-11-20 19:29:36 +05:30
Prashant Shahi
5a70123b06 chore(signoz): pin versions: SigNoz 0.59.0, SigNoz OtelCollector 0.111.9
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-11-20 16:45:18 +05:30
Prashant Shahi
f410df846a Merge branch 'main' into release/v0.59.x 2024-11-20 16:35:05 +05:30
Yunus M
d7bd72e2aa chore: update http proxy middleware (#6486) 2024-11-20 14:34:38 +05:30
Shaheer Kochai
20e64b5102 chore: add log events to alert history page (#6396)
* chore: pass active route with RouteTab's onChangeHandler

* chore: add log events to alert history page

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-11-20 04:38:27 +00:00
Vikrant Gupta
0b03ff07f1 chore: exactly match the list licenses v2 structure with FF on (#6481) 2024-11-19 19:10:59 +05:30
Vibhu Pandey
c01060ccf7 feat(key): enable edit access for gateway (#6480) 2024-11-19 12:43:25 +00:00
Nityananda Gohain
57c2326908 Trace static fields and structs for trace v4. (#6469)
* fix: update static fields and add response structs

* fix: update ch names

* fix: move models to it's own file
2024-11-19 14:38:12 +05:30
Vikrant Gupta
649560265e chore: set zeusURL via build time variable (#6475) 2024-11-19 13:19:01 +05:30
Prashant Shahi
c8d0f7638e Merge pull request #6471 from SigNoz/release/v0.58.x
Release/v0.58.2
2024-11-19 12:16:18 +05:30
Prashant Shahi
25484caa4c Merge branch 'main' into release/v0.58.x 2024-11-19 11:35:28 +05:30
Prashant Shahi
9ccc686c63 chore(signoz): pin versions: SigNoz 0.58.2
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-11-19 11:27:28 +05:30
Prashant Shahi
3ad6ff73df Merge branch 'develop' into release/v0.58.x 2024-11-19 11:27:01 +05:30
Srikanth Chekuri
c93cf1ce95 fix: incorrect formula for apdex (#6460) 2024-11-19 05:42:04 +00:00
Prashant Shahi
a9ced66258 Merge branch 'develop' into release/v0.58.x 2024-11-19 11:06:27 +05:30
Nityananda Gohain
98a350692b fix: update TestListTsRange to return all range (#6470) 2024-11-19 10:51:40 +05:30
Vikrant Gupta
d93f72f18d chore: use the license v2 key to fill licenses v3 on startup (#6468)
* feat: use the license v2 key to fill licenses v3 on startup

* chore: make the init only if the licenses v2 is present

* chore: address review comments
2024-11-18 17:55:00 +05:30
Shaheer Kochai
a59e7b9dfb feat: add 'create channel' option in channels list and refetch alert channels on opening the channels dropdown (#6416)
* feat: add channel creation option and auto-refresh channels list on dropdown open

* chore: move inline styles to style.ts

* fix: show the prompt to ask admin if the user doesn't have permissions

* fix: display create channel option only if the user has permission

* fix: prevent repeated new alert event logs + log new channel option inside dropdown
2024-11-18 06:30:06 +00:00
Nityananda Gohain
91bbeaf175 fix: remove unwanted trace API's (#6464) 2024-11-18 10:27:08 +05:30
Yunus M
22e61e1605 [Snyk] Security upgrade alpine from 3.18.6 to 3.20.3 (#6463)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-6913411
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249236
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249265
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249265
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249419

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2024-11-17 21:56:15 +05:30
Srikanth Chekuri
656d1c2b1c chore: add missing alert telemetry (#6459) 2024-11-16 19:16:05 +00:00
Srikanth Chekuri
493ae4fd07 chore: add user email to log_comment (#6461) 2024-11-17 00:36:10 +05:30
Srikanth Chekuri
cd1ec561b1 fix: compare op outside bounds for anomaly alert (#6458) 2024-11-16 20:17:34 +05:30
Nityananda Gohain
0acf39a532 feat: support for new enrichment logic in traces (#6438)
* feat: support for new enrichment logic in traces

* fix: default test added

* fix: update func name in links

* Update pkg/query-service/utils/logs_test.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2024-11-16 15:19:25 +05:30
Prashant Shahi
d859301d30 Merge branch 'develop' into release/v0.58.x 2024-11-16 04:50:33 +05:30
Nityananda Gohain
35f4eaa23b fix: update logs struct to fix live logs (#6453) 2024-11-15 22:42:16 +05:30
Prashant Shahi
07c24bcdf3 Merge pull request #6452 from SigNoz/release/v0.58.x
Release/v0.58.1
2024-11-15 22:36:10 +05:30
Nityananda Gohain
77c5f17dce feat: support for window based pagination in new trace v4 (#6440)
* feat: support for window based pagination in new trace v4

* fix: update pagination logic

* fix: update comment

* fix: substract correct length

* fix: revert changes

* fix: add tests for querier

* fix: rename matcher

* fix: handle offset inmemory for list queries in traces

* fix: correct var name

* fix: add max pagination limit for traces
2024-11-15 22:13:28 +05:30
Prashant Shahi
a11aadb712 Merge branch 'main' into release/v0.58.x 2024-11-15 22:06:09 +05:30
Prashant Shahi
bc9c7b5f1d chore(signoz): pin versions: SigNoz 0.58.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-11-15 22:02:47 +05:30
Prashant Shahi
1bba932d08 Merge branch 'develop' into release/v0.58.x 2024-11-15 22:01:55 +05:30
SagarRajput-7
c1478c4e54 feat: removed dashboard uuid is all cases be it duplicate, empty or somevalid, while import json (#6448)
* feat: removed dashboard uuid is all cases be it duplicate, empty or somevalid, while import json

* feat: added comment to better explain the logic
2024-11-15 19:35:29 +05:30
Yunus M
371224a64a fix: show org onboarding only to cloud customers (#6451) 2024-11-15 19:12:38 +05:30
Yunus M
504bc0d541 feat: ingestion limits - add toggle feature (#6430) 2024-11-15 08:32:31 +00:00
Nityananda Gohain
2faa0c6d4f feat: trace V4 QB (#6407)
* feat: trace V4 QB

* fix: update get column name and remove id

* fix: handle contains and update tests

* fix: remove unwanted step interval calculation

* fix: add test cases

* fix: add tests for static columns in QB

* fix: add more order by tests

* fix: update order by logic
2024-11-13 20:30:01 +05:30
Srikanth Chekuri
969ac5028e chore: add v2 metric writer to pipelines (#6345) 2024-11-13 10:41:28 +00:00
Prashant Shahi
3f7adeb040 Merge branch 'develop' into release/v0.58.x 2024-11-13 12:01:54 +05:30
Srikanth Chekuri
323da3494b chore: add experimental rate/increase calc (#6432) 2024-11-13 11:47:56 +05:30
Vikrant Gupta
01fda51959 chore: return proper http codes on unique constraint error (#6428) 2024-11-13 00:25:00 +05:30
Srikanth Chekuri
85ac21f253 fix: update request payload for span metrics queries (#6323) 2024-11-12 17:22:42 +00:00
Srikanth Chekuri
fd9e9f0fb3 chore: add k8s {deployment, daemonset, statefulset, job} resources (#6401) 2024-11-12 15:23:40 +00:00
Nityananda Gohain
d5523fc092 fix: ignore ts for panel type table (#6419) 2024-11-12 08:04:45 +00:00
Nityananda Gohain
2ec641b99e fix: add severity_text legend (#6415) 2024-11-12 05:54:22 +00:00
Ekansh Gupta
d1503f1418 feat: fixProducerAPI (#6422)
chore: bugfix
2024-11-12 05:30:36 +00:00
Vikrant Gupta
e974e9d47f feat: consume the new licenses v3 structure. (#6341)
* feat: setup for licenses v3 integration

* feat: added some more logic

* feat: validator changes

* chore: added a couple of todos

* feat: added config parameter for licenses v3 and the boot option

* feat: some typo fix

* feat: added refresh licenses handler

* feat: handle the start manager license activation

* chore: text updates

* feat: added list licenses call

* chore: refactor the entire code to cleanup interfaces

* fix: nil pointer error

* chore: some minor edits

* feat: model changes

* feat: model changes

* fix: utilise factory pattern

* feat: added default basic plan

* chore: added test cases for new license function

* feat: added more test cases

* chore: make the licenses id not null

* feat: cosmetic changes

* feat: cosmetic changes

* feat: update zeus URL

* chore: license testing fixes

* feat: added license status and category handling for query-service

* chore: added v3 support in v2 endpoint

* chore: http response codes and some code cleanup

* chore: added detailed test cases

* chore: address review comments

* chore: some misc cleanup
2024-11-12 01:40:10 +05:30
Shaheer Kochai
577a169508 feat: alert rename interaction (#6208)
* feat: alert rename interaction

* feat: add support for enter and escape shortcuts to submit and cancel rename

* chore: add missing alert field

* chore: update the style similar to dashboard rename

* refactor: remove buttonProps

* chore: add missing alert property to fix the build

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-11-11 19:13:07 +00:00
Shaheer Kochai
939e2a3570 fix: fix the issue of adding new query in new alert page changing the data source (#6286)
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-11-11 07:34:20 +00:00
Yunus M
b64326070c [Snyk] Fix for 2 vulnerabilities (#6215)
* fix: frontend/package.json & frontend/yarn.lock to reduce vulnerabilities

The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-UPLOT-6209224
- https://snyk.io/vuln/SNYK-JS-VUETEMPLATECOMPILER-8219888

* chore: upgrade design tokens to 1.1.3

---------

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
Co-authored-by: ahmadshaheer <ashaheerki@gmail.com>
2024-11-11 06:45:43 +00:00
SagarRajput-7
63872983c6 feat: added metric page in messaging queues (#6399)
* feat: added metric page in messaging queues

* feat: added misc fixes

* feat: removed a class name from mqcards

* feat: added lightMode styles for kafka 2.0 (#6400)

* feat: resolved comments and used strings
2024-11-09 13:04:43 +05:30
Prashant Shahi
eb6670980a Merge pull request #6405 from SigNoz/release/v0.58.x
Release/v0.58.x
2024-11-08 21:49:57 +05:30
Srikanth Chekuri
831540eaf0 fix: test notification missing for anomaly alert (#6391) 2024-11-08 15:40:09 +00:00
Prashant Shahi
48f3b9cacb chore(signoz): pin versions: SigNoz 0.58.0, SigNoz OtelCollector 0.111.8
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-11-08 16:02:59 +05:30
Prashant Shahi
eaf8571fe9 Merge branch 'main' into release/v0.58.x 2024-11-08 15:49:41 +05:30
Nityananda Gohain
22c10f9479 Issue 6367 (#6376)
* fix: issue with orderby by materialized column

* fix: tests

* fix: order by issue in old explorer as well
2024-11-08 07:05:32 +00:00
Yunus M
e748fb0655 chore: update events for onboarding part 2 (#6397) 2024-11-08 06:52:39 +00:00
SagarRajput-7
fdc54a62a9 fix: kafka - misc fix and features (#6379)
* feat: fixed multiple fixes and chores in kafka 2.0

* feat: fixed producer latency - producer-detail call

* feat: fixed mq-detail page layout and pagination

* feat: resolved comments
2024-11-07 23:49:47 +05:30
SagarRajput-7
abe0ab69b0 feat: added kafka - scenario - 4 - drop rate table (#6380)
* feat: added kafka - scenario - 4 - drop rate table

* feat: added api, new table and traceid redirection

* feat: code refactor
2024-11-07 23:37:54 +05:30
Shaheer Kochai
e623c92615 fix: adding the key requires double enter before it gets added as label key after the first label (#6296) 2024-11-07 15:03:59 +00:00
Shaheer Kochai
dc5917db01 chore: setup router compatibility package (#6285) 2024-11-07 19:02:23 +04:30
dependabot[bot]
d6a7f0b6f4 chore(deps): bump express from 4.19.2 to 4.21.1 in /frontend (#6166)
Bumps [express](https://github.com/expressjs/express) from 4.19.2 to 4.21.1.
- [Release notes](https://github.com/expressjs/express/releases)
- [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md)
- [Commits](https://github.com/expressjs/express/compare/4.19.2...4.21.1)

---
updated-dependencies:
- dependency-name: express
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-07 11:48:05 +05:30
Vikrant Gupta
471803115e feat: added support for instrumentation scope in logs (#6378)
* feat: added support for instrumentation scope in logs

* chore: remove console logs

* fix: the logic for rendering prefix

* feat: address review comments

---------

Co-authored-by: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com>
2024-11-07 11:47:35 +05:30
SagarRajput-7
8403a3362d feat: corrected the handling of relativeTime as null in alertHistory page (#6392) 2024-11-07 02:34:11 +05:30
Nityananda Gohain
64d46bc855 feat: support for scope in logs old and new qb (#6339) 2024-11-06 20:47:04 +05:30
SagarRajput-7
c9fee27604 feat: updated the design for Messaging Queue - summary section (#6319)
* feat: updated the design for Messaging Queue - summary section

* feat: resolved comments

* feat: added better logic for switch options and resolved query
2024-11-06 14:44:39 +05:30
SagarRajput-7
f1b6b2d3d8 feat: added onboarding detail for consumer setup (#6372)
* feat: added onboarding detail for consumer setup

* feat: corrected spelling

* feat: reorders imports correctly
2024-11-06 14:43:54 +05:30
SagarRajput-7
468f056530 feat: added kafka - scenario - 4 (#6370)
* feat: added kafka - scenario - 4

* feat: added error handling and correct api handler for kafka apis
2024-11-06 14:24:38 +05:30
SagarRajput-7
7086470ce2 feat: added healthcheck and attribute checklist component for Kafka (#6371)
* feat: added healthcheck and attribute checklist component for Kafka

* feat: corrected the onboardingapi payload

* feat: added missing configuration button at overview and onboarding flow
2024-11-06 14:23:51 +05:30
Yunus M
352296c6cd fix: initialize target to 3 in anomaly detection alert (#6362) 2024-11-05 22:13:12 +05:30
SagarRajput-7
975307a8b8 feat: added onboarding setup for Producer for Messaging queues (#6236)
* fix: added onboarding setup for producer/consumer for Messaging queues

* fix: polled onboarding status api

* feat: added onboarding status api with useQueury functions and updated endpoints

* feat: added onboarding status api util for attribute data

* feat: refactoring and url query changes

* feat: changed start and end time to nanosecond for api payload

* feat: added comment description
2024-11-05 19:40:23 +05:30
SagarRajput-7
12377be809 feat: added generic UI for scenario 1,3,4 (#6287)
* feat: added generic table component for scenario 1,3,4

* feat: added generic logic for mq detail tables and consumed for sc-1,2

* feat: added overview and details table for scenario-3

* feat: added table row clicks func

* feat: resolved comments
2024-11-05 19:26:41 +05:30
Yunus M
9d90b8d19c chore: github wf update pr labels and block pr until related docs are shipped for the feature (#6333) 2024-11-04 23:58:38 +05:30
Yunus M
5005923ef4 fix: re add threshold for promql alerts (#6355) 2024-11-04 15:19:05 +05:30
Srikanth Chekuri
db4338be42 chore: add feature flag, handle out-of-index error, some house keeping work (#6344) 2024-11-02 01:23:43 +05:30
Yunus M
c7d0598ec0 feat: improve async handling for org onboarding cases (#6342) 2024-11-01 23:55:29 +05:30
Yunus M
4978fb9599 fix: add safety check to check if anomaly rule in uplot chart options (#6343) 2024-11-01 22:51:09 +05:30
Shivanshu Raj Shrivastava
7b18c3ba06 enable scenario 4 on staging (#6269)
* fix: enable env at docker compose
2024-11-01 21:19:58 +05:30
Shaheer Kochai
92cdb36879 fix: redirect to docs on clicking alert setup guide in create alert page (#6265) 2024-11-01 17:03:59 +05:30
Nityananda Gohain
580f0b816e fix: issues with resource query builder w.r.t quotes (#6318) 2024-11-01 13:52:13 +05:30
Shivanshu Raj Shrivastava
b770fc2457 fix: typo (#6334) 2024-10-31 20:11:50 +05:30
dependabot[bot]
c177230cce chore(deps): bump webpack from 5.88.2 to 5.94.0 in /frontend (#5813)
Bumps [webpack](https://github.com/webpack/webpack) from 5.88.2 to 5.94.0.
- [Release notes](https://github.com/webpack/webpack/releases)
- [Commits](https://github.com/webpack/webpack/compare/v5.88.2...v5.94.0)

---
updated-dependencies:
- dependency-name: webpack
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-31 16:44:26 +05:30
Ankit Nayan
2112047a02 [Snyk] Security upgrade alpine from 3.18.5 to 3.20.3 (#6237)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-6913411
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249265
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249419
- https://snyk.io/vuln/SNYK-ALPINE318-OPENSSL-6152404
- https://snyk.io/vuln/SNYK-ALPINE318-OPENSSL-6152404

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2024-10-31 15:54:28 +05:30
Shaheer Kochai
03c193d5a1 chore: upgrade axios from 1.7.4 to 1.7.7 (#6291) 2024-10-31 09:00:34 +00:00
Yunus M
b83b295318 fix: frontend/package.json & frontend/yarn.lock to reduce vulnerabilities (#6266)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-HTTPPROXYMIDDLEWARE-8229906
- https://snyk.io/vuln/SNYK-JS-UPLOT-6209224

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2024-10-31 14:10:49 +05:30
Shivanshu Raj Shrivastava
fbe75cd057 fix: use query builder for metrics onboarding API (#6327) 2024-10-30 23:13:56 +05:30
Yunus M
860145fb1d fix: handle redirect in onboarding (#6324) 2024-10-30 15:00:01 +00:00
dependabot[bot]
2fe75e74cd chore(deps): bump uplot from 1.6.26 to 1.6.31 in /frontend
Bumps [uplot](https://github.com/leeoniya/uPlot) from 1.6.26 to 1.6.31.
- [Release notes](https://github.com/leeoniya/uPlot/releases)
- [Commits](https://github.com/leeoniya/uPlot/compare/1.6.26...1.6.31)

---
updated-dependencies:
- dependency-name: uplot
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-30 16:59:58 +05:30
Yunus M
8e19c346a4 feat: handle light mode and remove unnecessary logos 2024-10-30 15:41:51 +05:30
Yunus M
1b33efe4cc chore: remove workspace from fullscreen 2024-10-30 15:41:51 +05:30
Yunus M
2642338672 chore: update org onboarding package 2024-10-30 15:41:51 +05:30
Yunus M
845dc00568 feat: handle onboarding visibility 2024-10-30 15:41:51 +05:30
Yunus M
a1090bfdc5 feat: handle invite user flows 2024-10-30 15:41:51 +05:30
Yunus M
44f41c55f9 feat: handle linear to exponential conversion for logs, services and hosts 2024-10-30 15:41:51 +05:30
Yunus M
42ac9ab6fe feat: update to use v2 instance 2024-10-30 15:41:51 +05:30
Yunus M
5c02250aae feat: feedback updates 2024-10-30 15:41:51 +05:30
Yunus M
c49a9dac1a feat: feedback updates 2024-10-30 15:41:51 +05:30
Yunus M
abc2ec2155 feat: handle redirection after onboarding 2024-10-30 15:41:51 +05:30
Yunus M
4dc5615d2f feat: handle errors for profiles and invite users api 2024-10-30 15:41:51 +05:30
Yunus M
6c350f30aa feat: integrate update profile and invite users api 2024-10-30 15:41:51 +05:30
Yunus M
6664e1bc02 feat: maintain state and add log events 2024-10-30 15:41:51 +05:30
Yunus M
438cbcef87 feat: add questionaire components (#5998)
* feat: add questionaire components

* feat: update css

* feat: delete icon svgs and update css

* feat: update component names
2024-10-30 15:41:51 +05:30
Yunus M
829e1f0920 feat: onboarding v2 base setup 2024-10-30 15:41:51 +05:30
Srikanth Chekuri
68d25a8989 fix: add support for {{.Labels.<key>}} with dots in key for template (#6282) 2024-10-30 14:12:45 +05:30
Vikrant Gupta
cc90321ac0 chore: move hostname to resource attributes for logs qf (#6303)
* chore: move hostname to resource attributes for logs qf

* chore: fix the key for hostname
2024-10-29 13:18:34 +05:30
Prashant Shahi
b10c22223b Merge pull request #6300 from SigNoz/release/signoz-0.57.0
Release/signoz 0.57.0
2024-10-29 00:45:14 +05:30
Ekansh Gupta
bdcae62bf9 fix: fixed the step interval which was being perculated to list view (#6260)
* fix: fixed the step interval which was being perculated to list view

* fix: fixed the step interval which was being perculated to list view

* fix: fixed the step interval which was being perculated to list view

* fix: fixed the step interval which was being perculated to list view

* fix: fixed the step interval which was being perculated to list view

* fix: fixed the step interval which was being perculated to list view

* chore: bump signoz-otel-collector version (#6290)

* Chore: bump signoz otel collector dependency to 0.111.5 (#6302)

* chore: bump signoz-otel-collector dependency version to 0.111.5

* chore: logs filter suggestions: update import for ResourceHierarchy from signoz-otel-collector

* fix: fixed the step interval which was being perculated to list view

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
Co-authored-by: Raj Kamal Singh <1133322+raj-k-singh@users.noreply.github.com>
2024-10-28 22:32:04 +05:30
Prashant Shahi
cdde369748 Merge branch 'develop' into release/signoz-0.57.0 2024-10-28 21:26:47 +05:30
Raj Kamal Singh
4e26189778 Chore: bump signoz otel collector dependency to 0.111.5 (#6302)
* chore: bump signoz-otel-collector dependency version to 0.111.5

* chore: logs filter suggestions: update import for ResourceHierarchy from signoz-otel-collector
2024-10-28 21:23:47 +05:30
Prashant Shahi
523cbcd6fc chore: go mod tidy
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-10-28 19:40:23 +05:30
Prashant Shahi
eeadc021e1 chore(signoz): pin versions: SigNoz 0.57.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-10-28 19:40:10 +05:30
Srikanth Chekuri
952ab58023 chore: bump signoz-otel-collector version (#6290) 2024-10-28 14:06:43 +05:30
ahmadshaheer
3ca2fff5c5 fix: send float number in args array in case of timeshift formula 2024-10-28 11:52:38 +05:30
ahmadshaheer
ef3a9adb48 Revert "fix: add v4 to the new alert payload (#6090)"
This reverts commit 5651d69485.
2024-10-28 11:52:38 +05:30
ahmadshaheer
975f141604 fix: add v4 to default alert objects to fix the issue of incorrect query response due to v3 endpoint 2024-10-28 11:52:38 +05:30
ahmadshaheer
c206f4fa5c chore: improve comments 2024-10-28 11:51:33 +05:30
ahmadshaheer
e88e24e434 fix: fix the race condition resulting in switching between views not working properly 2024-10-28 11:51:33 +05:30
Raj Kamal Singh
94e0423479 Fix: logs pipelines: ensure special characters in pipeline identifiers don't result in bad collector config names (#6259)
* chore: add test validating pipe char in pipeline alias doesnt break collector config

* fix: pipelines collector conf: ensure bad names are not generated
2024-10-28 11:46:12 +05:30
Raj Kamal Singh
5891fbc229 Chore: upgrade signoz otel collector dependency to v0.111.2 (#6257)
* chore: upgrade signoz-otel-collector dependencies to v0.111.2

* chore: update references to otel-collector types in collector simulator

* chore: escape '$' as '$$$' and not '$$' in generated pipeline collector config

* chore: update go.sum entry for logstransformprocessor

* chore: some more go.sum updates to get build working

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-10-28 10:45:20 +05:30
Vikrant Gupta
8137ec54ba fix: arithmetic operators are removed from Dashboard query builder formulas (#6276)
* fix: the redirect from dashboard landing page to edit removing arithmetic operations

* fix: the url for the dashboard edit widget
2024-10-26 21:46:18 +05:30
Vibhu Pandey
f7b80524a5 feat(integrations): whitelist /deployments/me (#6275) 2024-10-26 16:11:43 +05:30
Sudeep MP
4be0508dd2 feat: add Request Dashboard button and improve dashboard list styles (#6251)
* feat: add Request Dashboard button and improve dashboard list styles

* feat: support for empty state and dashboard list state
2024-10-25 13:04:54 +00:00
Shivanshu Raj Shrivastava
a31c4b8339 Fix api query context (#6268)
* chore: fix naming api query context
2024-10-25 14:06:54 +05:30
Vishal Sharma
d7846338ce chore: remove facing issues button (#6256) 2024-10-24 16:53:00 +05:30
Vikrant Gupta
5dac1ad20a fix: explicitly return the empty slice for variables query (#6258) 2024-10-24 15:03:35 +05:30
Yunus M
8d704c331c feat: update the response data type 2024-10-24 14:54:34 +05:30
Yunus M
f8e47496fa feat: add get and update apis for org and user preferences 2024-10-24 14:54:34 +05:30
Srikanth Chekuri
6fef9d9676 chore: fix access for downtime schedules (#6255) 2024-10-24 07:37:03 +00:00
Srikanth Chekuri
190767fd0a chore: add k8s nodes, namespaces, and cluster list (#6230) 2024-10-24 12:17:24 +05:30
Srikanth Chekuri
1e78786cae chore: add k8s pods list (#6229) 2024-10-24 11:33:51 +05:30
Srikanth Chekuri
6448fb17e7 chore: update hosts list to use pre-aggregated data table dynamically (#6227) 2024-10-24 11:19:39 +05:30
Srikanth Chekuri
f2e33d7ca9 chore: enable anomaly detection for ee/paid plan (#6243) 2024-10-24 07:38:31 +05:30
Srikanth Chekuri
6c7167a224 chore: add missing shiftby in alert rule (#6239) 2024-10-24 02:20:32 +05:30
dependabot[bot]
00421235b0 chore(deps): bump micromatch from 4.0.5 to 4.0.8 in /frontend (#5817)
Bumps [micromatch](https://github.com/micromatch/micromatch) from 4.0.5 to 4.0.8.
- [Release notes](https://github.com/micromatch/micromatch/releases)
- [Changelog](https://github.com/micromatch/micromatch/blob/master/CHANGELOG.md)
- [Commits](https://github.com/micromatch/micromatch/compare/4.0.5...4.0.8)

---
updated-dependencies:
- dependency-name: micromatch
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-24 01:38:31 +05:30
Vishal Sharma
0e2b67059b Fix/bulk invite api error response (#6247) 2024-10-23 23:43:46 +05:30
Yunus M
910c44cefc feat: add org onboarding preference (#6248) 2024-10-23 23:27:25 +05:30
Vishal Sharma
8bad036423 fix: name is optional in bulk invite API (#6246) 2024-10-23 19:49:45 +05:30
Yunus M
a21830132f feat: remove hardcode is darkmode value from get anomaly data func (#6245) 2024-10-23 19:36:02 +05:30
Yunus M
9419f56e95 feat: tooltip plugin to show series data in tooltip (#6194)
* feat: tooltip plugin to show series data in tooltip

* feat: send anomaly function as last function

* feat: default z_score_threshold to 3

* fix: anomaly function not applied on initial load

* feat: maintain select alert type on reload

* feat: maintain select alert type on reload

* chore: update events to handle anomaly alert interactions
2024-10-23 19:06:38 +05:30
Sai Chander
347868c18b feat: enable offline functionality for frontend (#6152)
Co-authored-by: Vikrant Gupta <vikrant.thomso@gmail.com>
2024-10-23 05:53:51 +00:00
Srikanth Chekuri
17e20e7f41 chore: split migrator job to sync and async (#6107) 2024-10-23 00:05:10 +05:30
Nityananda Gohain
2b0da82f94 feat: move resource qb to its own package and use common options (#6238)
* feat: move resource qb to its own package and use common options

* fix: add remaining files

* fix: remove redundant struct
2024-10-22 16:46:58 +05:30
Vikrant Gupta
911362cecf chore: remove the required check from organisation name (#6225) 2024-10-21 18:34:37 +05:30
SagarRajput-7
481f9620d3 chore: added doc link for kafka on getStarted for non-cloud users (#6222)
* chore: added doc link for kafka on getStarted for non-cloud users

* chore: added link with utm parameter
2024-10-21 15:52:35 +05:30
SagarRajput-7
e5be431f18 fix: removed selectedValue & uuid from exported and copied dashboard json (#6145)
* fix: removed selectedValue from exported and copied dashboard json

* fix: added uuid validation to not expect empty uuid

* fix: removed uuid from export and copied dashboard json

* fix: resolved comment and removed uuid when empty

* fix: renamed function
2024-10-21 15:51:43 +05:30
SagarRajput-7
503ed45a99 fix: fixed threshold for columns with units (#6079)
* fix: fixed threshold for columns with units

* fix: added interunit and category conversion for handling threshold across different unit types

* fix: added invalid comparison text and removed unsupported unit categories

* fix: cleanup and multiple threshold and state change handling

* fix: restrict category select to only columnUnit group

* fix: restricted column name from threshold option

* fix: removed console log

* fix: resolved comments and some refactoring

* fix: resolved comments and some refactoring
2024-10-21 15:51:22 +05:30
Srikanth Chekuri
28818fbaac chore: update query builder to use 5min/30min aggregation tables (#5679) 2024-10-21 14:22:32 +05:30
Vikrant Gupta
c0e40614bf fix: issues with the logs where clause filter (#6198)
* fix: flicker in logs explorer page

* fix: added inline code comments

* fix: edit rules css should be scoped inside the container

* fix: add explicit space after adding key and operators to help users type and create the tag

* fix: do not wait for api to complete for the user to enter the query

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-10-21 11:42:20 +05:30
Akira Hayashi
2d732ae4a9 chore: run all query-service tests (#6098) 2024-10-20 15:24:12 +00:00
Vikrant Gupta
8466e31e02 chore: change the log background to same as severity text with some opacity (#6217)
* chore: change the log background to same as severity text with some opacity

* chore: make the log background changes for column renderer

* chore: remove the backdrop mask from logs details drawer

* chore: fix tests
2024-10-18 18:19:42 +05:30
Vikrant Gupta
efdaf7ee43 chore: make the clear selection view more highlighted (#6216)
* chore: make the clear selection view more highlighted

* chore: address minor design issue
2024-10-18 18:18:30 +05:30
Vikrant Gupta
0dec94a5c6 chore: remove the trial expiry banner on logout (#6212) 2024-10-18 18:17:17 +05:30
Shivanshu Raj Shrivastava
204728ff60 chore: move constants to fix ci (#6213) 2024-10-18 01:15:54 +05:30
Shivanshu Raj Shrivastava
e51f4d986d feat: kafka Scenario 1, 3, 4 all squashed (#6144)
* feat: kafka partition level observerability features
2024-10-17 19:44:42 +05:30
Vishal Sharma
337a941d0d feat: onboarding API via proxy (#6058)
* feat: onboarding API via proxy

* chore: update profiles route

* chore: update profiles url
2024-10-17 19:09:10 +05:30
Vishal Sharma
fc4b55cb34 feat: bulk invite user api (#6057) 2024-10-17 18:41:31 +05:30
Shaheer Kochai
96cb8053df chore: add test id to additional filters button (#6183) 2024-10-17 10:13:35 +04:30
Shaheer Kochai
5651d69485 fix: add v4 to the new alert payload (#6090) 2024-10-17 10:12:26 +04:30
Prashant Shahi
a6e492880d fix(docker): use env prefix for boolean in collector config (#6199)
### Summary

Fixes for `expected type 'bool', got unconvertible type 'string'` errors.

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-10-16 15:07:02 +05:30
SagarRajput-7
80b3c3e256 fix: fixed all not deselecting and empty array setting to _ALL_ (#6086) 2024-10-16 11:22:30 +05:30
Srikanth Chekuri
0806420dd7 chore: add process list (#6125) 2024-10-15 23:02:52 +05:30
Yunus M
18e240e3d1 feat: search series in anomaly response data (#6185) 2024-10-15 19:16:43 +05:30
Prashant Shahi
d0965a24c5 Merge pull request #6181 from SigNoz/sync/signoz-0.56.0
Sync/post release v0.56
2024-10-15 00:11:37 +05:30
Robi
7ed689693f fix: remove trailing slash from http payload (#6176) 2024-10-14 19:46:45 +05:30
rahulkeswani101
90ae55264a fix: updated row key in triggered alert list table (#6154) 2024-10-14 17:16:44 +05:30
Srikanth Chekuri
bf4c792cdb chore: update default feature flag and error response for formula (#6184) 2024-10-14 14:04:49 +05:30
SagarRajput-7
dd097821d1 fix: fixed incorrect label for orderBy clause when selected (#6177) 2024-10-14 14:04:27 +05:30
Yunus M
701b8803ac feat: move anomaly detection behind ff and show beta (#6180) 2024-10-14 12:18:55 +05:30
Raj Kamal Singh
2728ddd255 Fix: log pipelines generates bad config if first op is disabled (#6174)
* chore: add test reproducing bad config generation when first pipeline op is disabled

* fix: logs pipelines: set router output to first enabled operator
2024-10-14 11:24:42 +05:30
Raj Kamal Singh
5187ed58a0 Revert "Revert "Feat: use new logspipelineprocessor for generating logs pipel…" (#6179)
This reverts commit 1411ae41c3.
2024-10-14 11:11:51 +05:30
Yunus M
2180118094 feat: anomaly detection UI (#5916)
* feat: anamoly detection - initial ui

* feat: anamoly detection - oct 10

* feat: use antd checkbox

* feat: handle multiple series

* feat: handle chart height on switch btwn threshold / anomaly

* feat: do not update url on detection type change

* chore: pr clean up

* feat: remove chart container background
2024-10-14 10:31:02 +05:30
Prashant Shahi
78d1e19e60 Merge pull request #6156 from SigNoz/release/v0.56.x
Release/v0.56.x
2024-10-11 21:41:34 +05:30
Prashant Shahi
5588c7dd3f revert(signoz): pin versions: Schema Migrator 0.102.10
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-10-10 19:41:47 +05:30
Prashant Shahi
b70d50f2b3 chore: pin versions: SigNoz 0.56.0, OtelCollector 0.102.12, Alertmanager 0.23.7
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-10-10 15:39:49 +05:30
Prashant Shahi
728f699051 Merge branch 'main' into release/v0.56.x 2024-10-10 15:17:47 +05:30
Prashant Shahi
87499d1ead feat: enable new logs schema by default (#6077)
#### Summary

- update all docker compose YAMLs to use new logs schema
- enable `use_new_schema ` flag for clickhouselogsexporter in otel-collector config YAMLs
- remove prefer delta from docker-compose YAMLs

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-09-26 00:31:06 +05:30
Prashant Shahi
5fa8686fcf Merge pull request #6075 from SigNoz/release/v0.55.x
Release/v0.55.x
2024-09-25 22:25:37 +05:30
Prashant Shahi
dc2db524c7 chore(signoz): 📌 pin versions: SigNoz 0.55.0, SigNoz OtelCollector 0.102.10
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-09-25 21:46:12 +05:30
Prashant Shahi
b3545b767a Merge branch 'main' into release/v0.55.x 2024-09-25 21:43:48 +05:30
Prashant Shahi
08f3b089f4 Merge pull request #5922 from SigNoz/release/v0.54.x
Release/v0.54.x
2024-09-11 15:33:09 +05:30
Prashant Shahi
1d8e5b6c0f chore(signoz): 📌 pin versions: SigNoz 0.54.0, SigNoz OtelCollector 0.102.8
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-09-11 13:47:02 +05:30
Prashant Shahi
0dcded59e5 Merge branch 'main' into release/v0.54.x 2024-09-11 13:45:00 +05:30
Prashant Shahi
262beef8f9 Merge pull request #5800 from SigNoz/release/v0.53.x
Release/v0.53.x
2024-08-30 15:20:27 +05:30
Prashant Shahi
43cc6dea92 chore(signoz): 📌 pin versions: SigNoz OtelCollector 0.102.7
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-08-30 15:06:52 +05:30
Prashant Shahi
6684640abe Merge branch 'develop' into release/v0.53.x 2024-08-30 12:50:35 +05:30
Prashant Shahi
0a146910d6 chore(signoz): 📌 pin versions: SigNoz 0.53.0, SigNoz OtelCollector 0.102.6
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-08-29 19:25:34 +05:30
Prashant Shahi
690ed0f7f1 Merge branch 'main' into release/v0.53.x 2024-08-29 19:23:57 +05:30
Prashant Shahi
5bcf7de440 Merge pull request #5704 from SigNoz/release/v0.52.x
Release/v0.52.x
2024-08-16 21:00:32 +05:30
Prashant Shahi
703983a5f9 chore(signoz): 📌 pin versions: SigNoz 0.52.0, SigNoz OtelCollector 0.102.3
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-08-15 23:12:36 +05:30
Prashant Shahi
766a2123c5 Merge branch 'main' into release/v0.52.x 2024-08-15 13:42:02 +05:30
Prashant Shahi
a476c68f7e Merge pull request #5618 from SigNoz/release/v0.51.x
Release/v0.51.x
2024-07-31 22:30:09 +05:30
Prashant Shahi
fc15aa6f1c Merge branch 'develop' into release/v0.51.x 2024-07-31 21:29:07 +05:30
Prashant Shahi
4192fd573d chore(signoz): 📌 pin versions: SigNoz 0.51.0, SigNoz OtelCollector 0.102.3
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-07-31 20:29:09 +05:30
Prashant Shahi
ca13d80205 Merge branch 'main' into release/v0.51.x 2024-07-31 20:27:51 +05:30
Prashant Shahi
8d84ce8f06 Merge pull request #5509 from SigNoz/release/v0.50.x
Release/v0.50.x
2024-07-17 20:16:19 +05:30
Prashant Shahi
09ea7b9eb5 chore(signoz): 📌 pin versions: SigNoz 0.50.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-07-17 19:01:03 +05:30
944 changed files with 35802 additions and 13072 deletions

View File

@@ -3,7 +3,6 @@ name: build-pipeline
on:
pull_request:
branches:
- develop
- main
- release/v*

83
.github/workflows/docs.yml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: "Update PR labels and Block PR until related docs are shipped for the feature"
on:
pull_request:
branches:
- main
types: [opened, edited, labeled, unlabeled]
permissions:
pull-requests: write
contents: read
jobs:
docs_label_check:
runs-on: ubuntu-latest
steps:
- name: Check PR Title and Manage Labels
uses: actions/github-script@v6
with:
script: |
const prTitle = context.payload.pull_request.title;
const prNumber = context.payload.pull_request.number;
const owner = context.repo.owner;
const repo = context.repo.repo;
// Fetch the current PR details to get labels
const pr = await github.rest.pulls.get({
owner,
repo,
pull_number: prNumber
});
const labels = pr.data.labels.map(label => label.name);
if (prTitle.startsWith('feat:')) {
const hasDocsRequired = labels.includes('docs required');
const hasDocsShipped = labels.includes('docs shipped');
const hasDocsNotRequired = labels.includes('docs not required');
// If "docs not required" is present, skip the checks
if (hasDocsNotRequired && !hasDocsRequired) {
console.log("Skipping checks due to 'docs not required' label.");
return; // Exit the script early
}
// If "docs shipped" is present, remove "docs required" if it exists
if (hasDocsShipped && hasDocsRequired) {
await github.rest.issues.removeLabel({
owner,
repo,
issue_number: prNumber,
name: 'docs required'
});
console.log("Removed 'docs required' label.");
}
// Add "docs required" label if neither "docs shipped" nor "docs required" are present
if (!hasDocsRequired && !hasDocsShipped) {
await github.rest.issues.addLabels({
owner,
repo,
issue_number: prNumber,
labels: ['docs required']
});
console.log("Added 'docs required' label.");
}
}
// Fetch the updated labels after any changes
const updatedPr = await github.rest.pulls.get({
owner,
repo,
pull_number: prNumber
});
const updatedLabels = updatedPr.data.labels.map(label => label.name);
const updatedHasDocsRequired = updatedLabels.includes('docs required');
const updatedHasDocsShipped = updatedLabels.includes('docs shipped');
// Block PR if "docs required" is still present and "docs shipped" is missing
if (updatedHasDocsRequired && !updatedHasDocsShipped) {
core.setFailed("This PR requires documentation. Please remove the 'docs required' label and add the 'docs shipped' label to proceed.");
}

View File

@@ -42,7 +42,7 @@ jobs:
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s

View File

@@ -2,7 +2,8 @@ name: Jest Coverage - changed files
on:
pull_request:
branches: develop
branches:
- main
jobs:
build:
@@ -11,7 +12,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
ref: "refs/heads/develop"
ref: "refs/heads/main"
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
- name: Fetch branch

View File

@@ -4,7 +4,6 @@ on:
push:
branches:
- main
- develop
tags:
- v*
@@ -58,6 +57,17 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
- name: Setup golang
uses: actions/setup-go@v4
with:

16
.github/workflows/releaser.yaml vendored Normal file
View File

@@ -0,0 +1,16 @@
name: releaser
on:
# schedule every wednesday 9:30 AM UTC (3pm IST)
schedule:
- cron: '30 9 * * 3'
# allow manual triggering of the workflow by a maintainer with no inputs
workflow_dispatch: {}
jobs:
releaser:
uses: signoz/primus.workflows/.github/workflows/releaser-signoz.yaml@main
secrets: inherit
with:
PRIMUS_REF: main

View File

@@ -3,7 +3,6 @@ on:
pull_request:
branches:
- main
- develop
paths:
- 'frontend/**'
defaults:

View File

@@ -1,12 +1,12 @@
name: staging-deployment
# Trigger deployment only on push to develop branch
# Trigger deployment only on push to main branch
on:
push:
branches:
- develop
- main
jobs:
deploy:
name: Deploy latest develop branch to staging
name: Deploy latest main branch to staging
runs-on: ubuntu-latest
environment: staging
permissions:
@@ -38,6 +38,7 @@ jobs:
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export OTELCOL_TAG="main"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
export KAFKA_SPAN_EVAL="true"
docker system prune --force
docker pull signoz/signoz-otel-collector:main
docker pull signoz/signoz-schema-migrator:main

View File

@@ -44,7 +44,7 @@ jobs:
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout develop
git checkout main
git pull
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}

View File

@@ -339,7 +339,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
@@ -362,7 +362,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```

View File

@@ -8,6 +8,7 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
ZEUS_URL ?= https://api.signoz.cloud
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
# Internal variables or constants.
@@ -33,8 +34,9 @@ buildHash=${PACKAGE}/pkg/query-service/version.buildHash
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
all: build-push-frontend build-push-query-service
@@ -79,7 +81,7 @@ build-query-service-static:
@if [ $(DEV_BUILD) != "" ]; then \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
@@ -96,12 +98,12 @@ build-query-service-static-arm64:
# Steps to build static binary of query service for all platforms
.PHONY: build-query-service-static-all
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64: build-query-service-static-amd64
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@@ -188,13 +190,4 @@ check-no-ee-references:
fi
test:
go test ./pkg/query-service/app/metrics/...
go test ./pkg/query-service/cache/...
go test ./pkg/query-service/app/...
go test ./pkg/query-service/app/querier/...
go test ./pkg/query-service/converter/...
go test ./pkg/query-service/formatter/...
go test ./pkg/query-service/tests/integration/...
go test ./pkg/query-service/rules/...
go test ./pkg/query-service/collectorsimulator/...
go test ./pkg/query-service/postprocess/...
go test ./pkg/query-service/...

11
conf/defaults.yaml Normal file
View File

@@ -0,0 +1,11 @@
##################### SigNoz Configuration Defaults #####################
#
# Do not modify this file
#
##################### Web #####################
web:
# The prefix to serve web on
prefix: /
# The directory containing the static build files.
directory: /etc/signoz/web

View File

@@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
```
To generate load:

View File

@@ -1,5 +1,4 @@
version: "3.9"
x-clickhouse-defaults: &clickhouse-defaults
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
@@ -16,14 +15,7 @@ x-clickhouse-defaults: &clickhouse-defaults
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
@@ -32,15 +24,12 @@ x-clickhouse-defaults: &clickhouse-defaults
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
- clickhouse
- otel-collector-migrator
# - clickhouse-2
# - clickhouse-3
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
@@ -57,7 +46,6 @@ services:
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-2
@@ -89,9 +77,8 @@ services:
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
!!merge <<: *clickhouse-defaults
hostname: clickhouse
# ports:
# - "9000:9000"
@@ -103,7 +90,6 @@ services:
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
# clickhouse-2:
# <<: *clickhouse-defaults
# hostname: clickhouse-2
@@ -131,9 +117,8 @@ services:
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.5
image: signoz/alertmanager:0.23.7
volumes:
- ./data/alertmanager:/data
command:
@@ -144,14 +129,9 @@ services:
deploy:
restart_policy:
condition: on-failure
query-service:
image: signoz/query-service:0.55.0
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true"
]
image: signoz/query-service:0.65.0
command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
@@ -169,24 +149,16 @@ services:
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
deploy:
restart_policy:
condition: on-failure
<<: *db-depend
!!merge <<: *db-depend
frontend:
image: signoz/frontend:0.55.0
image: signoz/frontend:0.65.0
deploy:
restart_policy:
condition: on-failure
@@ -197,15 +169,9 @@ services:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.102.10
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
image: signoz/signoz-otel-collector:0.111.16
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
@@ -214,7 +180,6 @@ services:
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
@@ -236,20 +201,20 @@ services:
- clickhouse
- otel-collector-migrator
- query-service
otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.102.10
deploy:
restart_policy:
condition: on-failure
delay: 5s
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
image: signoz/signoz-schema-migrator:0.111.16
deploy:
restart_policy:
condition: on-failure
delay: 5s
command:
- "sync"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
logspout:
image: "gliderlabs/logspout:v3.2.14"
volumes:
@@ -262,17 +227,15 @@ services:
mode: global
restart_policy:
condition: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
command: [ "all" ]
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
logging:
options:
max-size: 50m
max-file: "3"
load-hotrod:
image: "signoz/locust:1.2.3"
hostname: load-hotrod

View File

@@ -66,28 +66,6 @@ processors:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
timeout: 2s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -131,18 +109,19 @@ processors:
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 10s
use_new_schema: true
extensions:
@@ -163,20 +142,20 @@ service:
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp, tcplog/docker]
processors: [batch]

View File

@@ -57,7 +57,7 @@ services:
alertmanager:
container_name: signoz-alertmanager
image: signoz/alertmanager:0.23.5
image: signoz/alertmanager:0.23.7
volumes:
- ./data/alertmanager:/data
depends_on:
@@ -69,10 +69,12 @@ services:
- --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator
command:
- "sync"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
clickhouse:
condition: service_healthy
@@ -84,7 +86,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.102.10
image: signoz/signoz-otel-collector:0.111.16
command:
[
"--config=/etc/otel-collector-config.yaml",

View File

@@ -25,7 +25,8 @@ services:
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true"
"--use-logs-new-schema=true",
"--use-trace-new-schema=true"
]
ports:
- "6060:6060"

View File

@@ -13,14 +13,7 @@ x-clickhouse-defaults: &clickhouse-defaults
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
@@ -29,20 +22,17 @@ x-clickhouse-defaults: &clickhouse-defaults
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
otel-collector-migrator-sync:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
@@ -59,7 +49,6 @@ services:
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
@@ -93,9 +82,8 @@ services:
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
@@ -110,7 +98,6 @@ services:
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
@@ -128,7 +115,6 @@ services:
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
@@ -145,9 +131,8 @@ services:
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
@@ -158,17 +143,11 @@ services:
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.55.0}
image: signoz/query-service:${DOCKER_TAG:-0.65.0}
container_name: signoz-query-service
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true"
]
command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
@@ -187,21 +166,13 @@ services:
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
<<: *db-depend
!!merge <<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.55.0}
image: signoz/frontend:${DOCKER_TAG:-0.65.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -211,31 +182,40 @@ services:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
container_name: otel-migrator
otel-collector-migrator-sync:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator-sync
command:
- "sync"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.10}
container_name: signoz-otel-collector
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector-migrator-async:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator-async
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
- "async"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator-sync:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
container_name: signoz-otel-collector
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
@@ -244,7 +224,6 @@ services:
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
@@ -262,11 +241,10 @@ services:
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
otel-collector-migrator-sync:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout

View File

@@ -1,8 +1,6 @@
version: "2.4"
include:
- test-app-docker-compose.yaml
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
@@ -18,14 +16,7 @@ x-clickhouse-defaults: &clickhouse-defaults
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
@@ -34,20 +25,17 @@ x-clickhouse-defaults: &clickhouse-defaults
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
@@ -64,7 +52,6 @@ services:
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
@@ -98,9 +85,8 @@ services:
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
@@ -115,7 +101,6 @@ services:
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
@@ -133,7 +118,6 @@ services:
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
@@ -150,9 +134,8 @@ services:
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
@@ -163,18 +146,11 @@ services:
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.55.0}
image: signoz/query-service:${DOCKER_TAG:-0.65.0}
container_name: signoz-query-service
command:
[
"-config=/root/config/prometheus.yml",
"-gateway-url=https://api.staging.signoz.cloud",
"--use-logs-new-schema=true"
]
command: ["-config=/root/config/prometheus.yml", "-gateway-url=https://api.staging.signoz.cloud", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
@@ -191,23 +167,16 @@ services:
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
<<: *db-depend
!!merge <<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.55.0}
image: signoz/frontend:${DOCKER_TAG:-0.65.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -217,31 +186,22 @@ services:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.10}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
container_name: signoz-otel-collector
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
@@ -250,7 +210,6 @@ services:
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
@@ -272,7 +231,6 @@ services:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout

View File

@@ -57,35 +57,11 @@ receivers:
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -142,17 +118,18 @@ extensions:
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 10s
use_new_schema: true
# logging: {}
@@ -170,20 +147,20 @@ service:
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp, tcplog/docker]
processors: [batch]

View File

@@ -1,5 +1,5 @@
# use a minimal alpine image
FROM alpine:3.18.6
FROM alpine:3.20.3
# Add Maintainer Info
LABEL maintainer="signoz"
@@ -23,6 +23,9 @@ COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service
# Copy frontend
COPY frontend/build/ /etc/signoz/web/
# run the binary
ENTRYPOINT ["./query-service"]

View File

@@ -38,8 +38,9 @@ type APIHandlerOptions struct {
Cache cache.Cache
Gateway *httputil.ReverseProxy
// Querier Influx Interval
FluxInterval time.Duration
UseLogsNewSchema bool
FluxInterval time.Duration
UseLogsNewSchema bool
UseTraceNewSchema bool
}
type APIHandler struct {
@@ -65,6 +66,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
Cache: opts.Cache,
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
})
if err != nil {
@@ -173,14 +175,22 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
// v2
router.HandleFunc("/api/v2/licenses",
am.ViewAccess(ah.listLicensesV2)).
Methods(http.MethodGet)
// v3
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut)
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
// v4
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
// Gateway
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
ah.APIHandler.RegisterRoutes(router, am)

View File

@@ -9,7 +9,15 @@ import (
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
ctx := req.Context()
if !strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+gateway.AllowedPrefix) {
validPath := false
for _, allowedPrefix := range gateway.AllowedPrefix {
if strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+allowedPrefix) {
validPath = true
break
}
}
if !validPath {
rw.WriteHeader(http.StatusNotFound)
return
}

View File

@@ -9,6 +9,7 @@ import (
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/http/render"
"go.uber.org/zap"
)
@@ -59,6 +60,21 @@ type billingDetails struct {
} `json:"data"`
}
type ApplyLicenseRequest struct {
LicenseKey string `json:"key"`
}
type ListLicenseResponse map[string]interface{}
func convertLicenseV3ToListLicenseResponse(licensesV3 []*model.LicenseV3) []ListLicenseResponse {
listLicenses := []ListLicenseResponse{}
for _, license := range licensesV3 {
listLicenses = append(listLicenses, license.Data)
}
return listLicenses
}
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
@@ -79,7 +95,7 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
license, apiError := ah.LM().Activate(r.Context(), l.Key)
license, apiError := ah.LM().ActivateV3(r.Context(), l.Key)
if apiError != nil {
RespondError(w, apiError, nil)
return
@@ -88,6 +104,68 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, license)
}
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicensesV3(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
ah.Respond(w, convertLicenseV3ToListLicenseResponse(licenses))
}
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context())
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
// return 404 not found if there is no active license
if activeLicense == nil {
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)
return
}
// TODO deprecate this when we move away from key for stripe
activeLicense.Data["key"] = activeLicense.Key
render.Success(w, http.StatusOK, activeLicense.Data)
}
// this function is called by zeus when inserting licenses in the query-service
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
var licenseKey ApplyLicenseRequest
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if licenseKey.LicenseKey == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
if apiError != nil {
RespondError(w, apiError, nil)
return
}
render.Success(w, http.StatusAccepted, nil)
}
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
apiError := ah.LM().RefreshLicense(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
render.Success(w, http.StatusNoContent, nil)
}
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct {
@@ -154,12 +232,38 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, billingResponse.Data)
}
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
RespondError(w, apiError, nil)
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
licensesV2 := []model.License{}
for _, l := range licenses {
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
if !ok {
planKeyFromPlanName = model.Basic
}
licenseV2 := model.License{
Key: l.Key,
ActivationId: "",
PlanDetails: "",
FeatureSet: l.Features,
ValidationMessage: "",
IsCurrent: l.IsCurrent,
LicensePlan: model.LicensePlan{
PlanKey: planKeyFromPlanName,
ValidFrom: l.ValidFrom,
ValidUntil: l.ValidUntil,
Status: l.Status},
}
licensesV2 = append(licensesV2, licenseV2)
}
return licensesV2
}
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
licensesV3, apierr := ah.LM().GetLicensesV3(r.Context())
if apierr != nil {
RespondError(w, apierr, nil)
return
}
licenses := convertLicenseV3ToLicenseV2(licensesV3)
resp := model.Licenses{
TrialStart: -1,

View File

@@ -53,7 +53,11 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
if anomalyQueryExists {
// ensure all queries have metric data source, and there should be only one anomaly query
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
if query.DataSource != v3.DataSourceMetrics {
// What is query.QueryName == query.Expression doing here?
// In the current implementation, the way to recognize if a query is a formula is by
// checking if the expression is the same as the query name. if the expression is different
// then it is a formula. otherwise, it is simple builder query.
if query.DataSource != v3.DataSourceMetrics && query.QueryName == query.Expression {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("all queries must have metric data source")}, nil)
return
}
@@ -100,6 +104,13 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
)
default:
provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
)
}
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
if err != nil {

View File

@@ -26,8 +26,9 @@ func NewDataConnector(
dialTimeout time.Duration,
cluster string,
useLogsNewSchema bool,
useTraceNewSchema bool,
) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema, useTraceNewSchema)
return &ClickhouseReader{
conn: ch.GetConn(),
appdb: localDB,

View File

@@ -31,8 +31,8 @@ import (
"go.signoz.io/signoz/ee/query-service/rules"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/migrate"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/web"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
@@ -78,6 +78,7 @@ type ServerOptions struct {
Cluster string
GatewayUrl string
UseLogsNewSchema bool
UseTraceNewSchema bool
}
// Server runs HTTP api service
@@ -107,7 +108,7 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
}
// NewServer creates and initializes Server
func NewServer(serverOptions *ServerOptions) (*Server, error) {
func NewServer(serverOptions *ServerOptions, web *web.Web) (*Server, error) {
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil {
@@ -156,6 +157,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.DialTimeout,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
)
go qb.Start(readerReady)
reader = qb
@@ -189,6 +191,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.DisableRules,
lm,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
)
if err != nil {
@@ -270,6 +273,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
FluxInterval: fluxInterval,
Gateway: gatewayProxy,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
}
apiHandler, err := api.NewAPIHandler(apiOpts)
@@ -286,7 +290,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
usageManager: usageManager,
}
httpServer, err := s.createPublicServer(apiHandler)
httpServer, err := s.createPublicServer(apiHandler, web)
if err != nil {
return nil, err
@@ -312,10 +316,10 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
r := baseapp.NewRouter()
r.Use(baseapp.LogCommentEnricher)
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddlewarePrivate)
r.Use(baseapp.LogCommentEnricher)
apiHandler.RegisterPrivateRoutes(r)
@@ -335,7 +339,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
}, nil
}
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (*http.Server, error) {
r := baseapp.NewRouter()
@@ -348,17 +352,17 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
}
if user.User.OrgId == "" {
return nil, model.UnauthorizedError(errors.New("orgId is missing in the claims"))
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
}
return user, nil
}
am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(baseapp.LogCommentEnricher)
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware)
r.Use(baseapp.LogCommentEnricher)
apiHandler.RegisterRoutes(r, am)
apiHandler.RegisterLogsRoutes(r, am)
@@ -379,6 +383,11 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
handler = handlers.CompressHandler(handler)
err := web.AddToRouter(r)
if err != nil {
return nil, err
}
return &http.Server{
Handler: handler,
}, nil
@@ -736,7 +745,8 @@ func makeRulesManager(
cache cache.Cache,
disableRules bool,
fm baseint.FeatureLookup,
useLogsNewSchema bool) (*baserules.Manager, error) {
useLogsNewSchema bool,
useTraceNewSchema bool) (*baserules.Manager, error) {
// create engine
pqle, err := pqle.FromConfigPath(promConfigPath)
@@ -765,8 +775,10 @@ func makeRulesManager(
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification,
}
// create Manager

View File

@@ -14,6 +14,9 @@ var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
// this is set via build time variable
var ZeusURL = "https://api.signoz.cloud"
func GetOrDefaultEnv(key string, fallback string) string {
v := os.Getenv(key)
if len(v) == 0 {

View File

@@ -8,9 +8,9 @@ import (
"strings"
)
const (
RoutePrefix string = "/api/gateway"
AllowedPrefix string = "/v1/workspaces/me"
var (
RoutePrefix string = "/api/gateway"
AllowedPrefix []string = []string{"/v1/workspaces/me", "/v2/profiles/me", "/v2/deployments/me"}
)
type proxy struct {

View File

@@ -2,14 +2,7 @@ package signozio
type status string
type ActivationResult struct {
Status status `json:"status"`
Data *ActivationResponse `json:"data,omitempty"`
ErrorType string `json:"errorType,omitempty"`
Error string `json:"error,omitempty"`
}
type ActivationResponse struct {
ActivationId string `json:"ActivationId"`
PlanDetails string `json:"PlanDetails"`
type ValidateLicenseResponse struct {
Status status `json:"status"`
Data map[string]interface{} `json:"data"`
}

View File

@@ -7,9 +7,9 @@ import (
"fmt"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
@@ -23,12 +23,14 @@ const (
)
type Client struct {
Prefix string
Prefix string
GatewayUrl string
}
func New() *Client {
return &Client{
Prefix: constants.LicenseSignozIo,
Prefix: constants.LicenseSignozIo,
GatewayUrl: constants.ZeusURL,
}
}
@@ -36,82 +38,56 @@ func init() {
C = New()
}
// ActivateLicense sends key to license.signoz.io and gets activation data
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
licenseReq := map[string]string{
"key": key,
"siteId": siteId,
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
// Creating an HTTP client with a timeout for better control
client := &http.Client{
Timeout: 10 * time.Second,
}
reqString, _ := json.Marshal(licenseReq)
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
if err != nil {
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to create request: %w", err)))
}
httpBody, err := io.ReadAll(httpResponse.Body)
// Setting the custom header
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
response, err := client.Do(req)
if err != nil {
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
}
defer httpResponse.Body.Close()
// read api request result
result := ActivationResult{}
err = json.Unmarshal(httpBody, &result)
if err != nil {
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
}
switch httpResponse.StatusCode {
case 200, 201:
return result.Data, nil
case 400, 401:
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
default:
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
}
}
// ValidateLicense validates the license key
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
validReq := map[string]string{
"activationId": activationId,
}
reqString, _ := json.Marshal(validReq)
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to make post request: %w", err)))
}
body, err := io.ReadAll(response.Body)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl)))
}
defer response.Body.Close()
switch response.StatusCode {
case 200, 201:
a := ActivationResult{}
case 200:
a := ValidateLicenseResponse{}
err = json.Unmarshal(body, &a)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
}
return a.Data, nil
case 400, 401:
license, err := model.NewLicenseV3(a.Data)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3"))
}
return license, nil
case 400:
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
"bad request error received from license.signoz.io"))
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
case 401:
return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
default:
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
"internal error received from license.signoz.io"))
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
}
}

View File

@@ -3,10 +3,12 @@ package license
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"time"
"github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"go.signoz.io/signoz/ee/query-service/license/sqlite"
"go.signoz.io/signoz/ee/query-service/model"
@@ -48,9 +50,35 @@ func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
return licenses, nil
}
// GetActiveLicense fetches the latest active license from DB.
// If the license is not present, expect a nil license and a nil error in the output.
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
licensesData := []model.LicenseDB{}
licenseV3Data := []*model.LicenseV3{}
query := "SELECT id,key,data FROM licenses_v3"
err := r.db.Select(&licensesData, query)
if err != nil {
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
}
for _, l := range licensesData {
var licenseData map[string]interface{}
err := json.Unmarshal([]byte(l.Data), &licenseData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
}
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
if err != nil {
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
}
licenseV3Data = append(licenseV3Data, license)
}
return licenseV3Data, nil
}
func (r *Repo) GetActiveLicenseV2(ctx context.Context) (*model.License, *basemodel.ApiError) {
var err error
licenses := []model.License{}
@@ -79,6 +107,60 @@ func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel
return active, nil
}
// GetActiveLicense fetches the latest active license from DB.
// If the license is not present, expect a nil license and a nil error in the output.
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
activeLicenseV3, err := r.GetActiveLicenseV3(ctx)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
}
if activeLicenseV3 == nil {
return nil, nil
}
activeLicenseV2 := model.ConvertLicenseV3ToLicenseV2(activeLicenseV3)
return activeLicenseV2, nil
}
func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) {
var err error
licenses := []model.LicenseDB{}
query := "SELECT id,key,data FROM licenses_v3"
err = r.db.Select(&licenses, query)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
}
var active *model.LicenseV3
for _, l := range licenses {
var licenseData map[string]interface{}
err := json.Unmarshal([]byte(l.Data), &licenseData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
}
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
if err != nil {
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
}
if active == nil &&
(license.ValidFrom != 0) &&
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
active = license
}
if active != nil &&
license.ValidFrom > active.ValidFrom &&
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
active = license
}
}
return active, nil
}
// InsertLicense inserts a new license in db
func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
@@ -204,3 +286,59 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
}
return nil
}
// InsertLicenseV3 inserts a new license v3 in db
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
// licsense is the entity of zeus so putting the entire license here without defining schema
licenseData, err := json.Marshal(l.Data)
if err != nil {
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
}
_, err = r.db.ExecContext(ctx,
query,
l.ID,
l.Key,
string(licenseData),
)
if err != nil {
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
}
}
zap.L().Error("error in inserting license data: ", zap.Error(err))
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
}
return nil
}
// UpdateLicenseV3 updates a new license v3 in db
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
// the key and id for the license can't change so only update the data here!
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
license, err := json.Marshal(l.Data)
if err != nil {
return fmt.Errorf("insert license failed: license marshal error")
}
_, err = r.db.ExecContext(ctx,
query,
license,
l.ID,
)
if err != nil {
zap.L().Error("error in updating license data: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err)
}
return nil
}

View File

@@ -7,6 +7,7 @@ import (
"time"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"sync"
@@ -45,8 +46,9 @@ type Manager struct {
failedAttempts uint64
// keep track of active license and features
activeLicense *model.License
activeFeatures basemodel.FeatureSet
activeLicense *model.License
activeLicenseV3 *model.LicenseV3
activeFeatures basemodel.FeatureSet
}
func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
@@ -74,9 +76,7 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M
// start loads active license in memory and initiates validator
func (lm *Manager) start(features ...basemodel.Feature) error {
err := lm.LoadActiveLicense(features...)
return err
return lm.LoadActiveLicenseV3(features...)
}
func (lm *Manager) Stop() {
@@ -84,7 +84,7 @@ func (lm *Manager) Stop() {
<-lm.terminated
}
func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) {
lm.mutex.Lock()
defer lm.mutex.Unlock()
@@ -92,8 +92,8 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
return
}
lm.activeLicense = l
lm.activeFeatures = append(l.FeatureSet, features...)
lm.activeLicenseV3 = l
lm.activeFeatures = append(l.Features, features...)
// set default features
setDefaultFeatures(lm)
@@ -105,7 +105,7 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
// we want to make sure only one validator runs,
// we already have lock() so good to go
lm.validatorRunning = true
go lm.Validator(context.Background())
go lm.ValidatorV3(context.Background())
}
}
@@ -114,14 +114,13 @@ func setDefaultFeatures(lm *Manager) {
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
}
// LoadActiveLicense loads the most recent active license
func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
active, err := lm.repo.GetActiveLicense(context.Background())
func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
active, err := lm.repo.GetActiveLicenseV3(context.Background())
if err != nil {
return err
}
if active != nil {
lm.SetActive(active, features...)
lm.SetActiveV3(active, features...)
} else {
zap.L().Info("No active license found, defaulting to basic plan")
// if no active license is found, we default to basic(free) plan with all default features
@@ -163,13 +162,36 @@ func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, a
return
}
func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicensesV3(ctx)
if err != nil {
return nil, model.InternalError(err)
}
for _, l := range licenses {
if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key {
l.IsCurrent = true
}
if l.ValidUntil == -1 {
// for subscriptions, there is no end-date as such
// but for showing user some validity we default one year timespan
l.ValidUntil = l.ValidFrom + 31556926
}
response = append(response, l)
}
return response, nil
}
// Validator validates license after an epoch of time
func (lm *Manager) Validator(ctx context.Context) {
func (lm *Manager) ValidatorV3(ctx context.Context) {
zap.L().Info("ValidatorV3 started!")
defer close(lm.terminated)
tick := time.NewTicker(validationFrequency)
defer tick.Stop()
lm.Validate(ctx)
lm.ValidateV3(ctx)
for {
select {
@@ -180,17 +202,33 @@ func (lm *Manager) Validator(ctx context.Context) {
case <-lm.done:
return
case <-tick.C:
lm.Validate(ctx)
lm.ValidateV3(ctx)
}
}
}
}
// Validate validates the current active license
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
if apiError != nil {
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
return apiError
}
err := lm.repo.UpdateLicenseV3(ctx, license)
if err != nil {
return model.BadRequest(errors.Wrap(err, "failed to update the new license"))
}
lm.SetActiveV3(license)
return nil
}
func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
zap.L().Info("License validation started")
if lm.activeLicense == nil {
if lm.activeLicenseV3 == nil {
return nil
}
@@ -210,52 +248,15 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
lm.mutex.Unlock()
}()
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
if apiError != nil {
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
return apiError.Err
err := lm.RefreshLicense(ctx)
if err != nil {
return err
}
if response.PlanDetails == lm.activeLicense.PlanDetails {
// license plan hasnt changed, nothing to do
return nil
}
if response.PlanDetails != "" {
// copy and replace the active license record
l := model.License{
Key: lm.activeLicense.Key,
CreatedAt: lm.activeLicense.CreatedAt,
PlanDetails: response.PlanDetails,
ValidationMessage: lm.activeLicense.ValidationMessage,
ActivationId: lm.activeLicense.ActivationId,
}
if err := l.ParsePlan(); err != nil {
zap.L().Error("failed to parse updated license", zap.Error(err))
return err
}
// updated plan is parsable, check if plan has changed
if lm.activeLicense.PlanDetails != response.PlanDetails {
err := lm.repo.UpdatePlanDetails(ctx, lm.activeLicense.Key, response.PlanDetails)
if err != nil {
// unexpected db write issue but we can let the user continue
// and wait for update to work in next cycle.
zap.L().Error("failed to validate license", zap.Error(err))
}
}
// activate the update license plan
lm.SetActive(&l)
}
return nil
}
// Activate activates a license key with signoz server
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
defer func() {
if errResponse != nil {
userEmail, err := auth.GetEmailFromJwt(ctx)
@@ -266,36 +267,22 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
}
}()
response, apiError := validate.ActivateLicense(key, "")
license, apiError := validate.ValidateLicenseV3(licenseKey)
if apiError != nil {
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
zap.L().Error("failed to get the license", zap.Error(apiError.Err))
return nil, apiError
}
l := &model.License{
Key: key,
ActivationId: response.ActivationId,
PlanDetails: response.PlanDetails,
}
// parse validity and features from the plan details
err := l.ParsePlan()
// insert the new license to the sqlite db
err := lm.repo.InsertLicenseV3(ctx, license)
if err != nil {
zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err)
}
// store the license before activating it
err = lm.repo.InsertLicense(ctx, l)
if err != nil {
zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err)
return nil, err
}
// license is valid, activate it
lm.SetActive(l)
return l, nil
lm.SetActiveV3(license)
return license, nil
}
// CheckFeature will be internally used by backend routines

View File

@@ -48,5 +48,16 @@ func InitDB(db *sqlx.DB) error {
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
}
table_schema = `CREATE TABLE IF NOT EXISTS licenses_v3 (
id TEXT PRIMARY KEY,
key TEXT NOT NULL UNIQUE,
data TEXT
);`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating licenses_v3 table: %s", err.Error())
}
return nil
}

View File

@@ -10,13 +10,17 @@ import (
"syscall"
"time"
"go.opentelemetry.io/collector/confmap"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.signoz.io/signoz/ee/query-service/app"
signozconfig "go.signoz.io/signoz/pkg/config"
"go.signoz.io/signoz/pkg/confmap/provider/signozenvprovider"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/migrate"
"go.signoz.io/signoz/pkg/query-service/version"
signozweb "go.signoz.io/signoz/pkg/web"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@@ -94,6 +98,7 @@ func main() {
var cluster string
var useLogsNewSchema bool
var useTraceNewSchema bool
var cacheConfigPath, fluxInterval string
var enableQueryServiceLogOTLPExport bool
var preferSpanMetrics bool
@@ -102,8 +107,10 @@ func main() {
var maxOpenConns int
var dialTimeout time.Duration
var gatewayUrl string
var useLicensesV3 bool
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
@@ -117,6 +124,7 @@ func main() {
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
flag.Parse()
@@ -127,6 +135,23 @@ func main() {
version.PrintVersion()
config, err := signozconfig.New(context.Background(), signozconfig.ProviderSettings{
ResolverSettings: confmap.ResolverSettings{
URIs: []string{"signozenv:"},
ProviderFactories: []confmap.ProviderFactory{
signozenvprovider.NewFactory(),
},
},
})
if err != nil {
zap.L().Fatal("Failed to create config", zap.Error(err))
}
web, err := signozweb.New(zap.L(), config.Web)
if err != nil {
zap.L().Fatal("Failed to create web", zap.Error(err))
}
serverOptions := &app.ServerOptions{
HTTPHostPort: baseconst.HTTPHostPort,
PromConfigPath: promConfigPath,
@@ -143,6 +168,7 @@ func main() {
Cluster: cluster,
GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
}
// Read the jwt secret key
@@ -160,7 +186,7 @@ func main() {
zap.L().Info("Migration successful")
}
server, err := app.NewServer(serverOptions)
server, err := app.NewServer(serverOptions, web)
if err != nil {
zap.L().Fatal("Failed to create server", zap.Error(err))
}

View File

@@ -46,6 +46,13 @@ func BadRequest(err error) *ApiError {
}
}
func Unauthorized(err error) *ApiError {
return &ApiError{
Typ: basemodel.ErrorUnauthorized,
Err: err,
}
}
// BadRequestStr returns a ApiError object of bad request for string input
func BadRequestStr(s string) *ApiError {
return &ApiError{

View File

@@ -3,6 +3,8 @@ package model
import (
"encoding/base64"
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/pkg/errors"
@@ -104,3 +106,165 @@ type SubscriptionServerResp struct {
Status string `json:"status"`
Data Licenses `json:"data"`
}
type Plan struct {
Name string `json:"name"`
}
type LicenseDB struct {
ID string `json:"id"`
Key string `json:"key"`
Data string `json:"data"`
}
type LicenseV3 struct {
ID string
Key string
Data map[string]interface{}
PlanName string
Features basemodel.FeatureSet
Status string
IsCurrent bool
ValidFrom int64
ValidUntil int64
}
func extractKeyFromMapStringInterface[T any](data map[string]interface{}, key string) (T, error) {
var zeroValue T
if val, ok := data[key]; ok {
if value, ok := val.(T); ok {
return value, nil
}
return zeroValue, fmt.Errorf("%s key is not a valid %s", key, reflect.TypeOf(zeroValue))
}
return zeroValue, fmt.Errorf("%s key is missing", key)
}
func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
var features basemodel.FeatureSet
// extract id from data
licenseID, err := extractKeyFromMapStringInterface[string](data, "id")
if err != nil {
return nil, err
}
delete(data, "id")
// extract key from data
licenseKey, err := extractKeyFromMapStringInterface[string](data, "key")
if err != nil {
return nil, err
}
delete(data, "key")
// extract status from data
status, err := extractKeyFromMapStringInterface[string](data, "status")
if err != nil {
return nil, err
}
planMap, err := extractKeyFromMapStringInterface[map[string]any](data, "plan")
if err != nil {
return nil, err
}
planName, err := extractKeyFromMapStringInterface[string](planMap, "name")
if err != nil {
return nil, err
}
// if license status is inactive then default it to basic
if status == LicenseStatusInactive {
planName = PlanNameBasic
}
featuresFromZeus := basemodel.FeatureSet{}
if _features, ok := data["features"]; ok {
featuresData, err := json.Marshal(_features)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal features data")
}
if err := json.Unmarshal(featuresData, &featuresFromZeus); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal features data")
}
}
switch planName {
case PlanNameTeams:
features = append(features, ProPlan...)
case PlanNameEnterprise:
features = append(features, EnterprisePlan...)
case PlanNameBasic:
features = append(features, BasicPlan...)
default:
features = append(features, BasicPlan...)
}
if len(featuresFromZeus) > 0 {
for _, feature := range featuresFromZeus {
exists := false
for i, existingFeature := range features {
if existingFeature.Name == feature.Name {
features[i] = feature // Replace existing feature
exists = true
break
}
}
if !exists {
features = append(features, feature) // Append if it doesn't exist
}
}
}
data["features"] = features
_validFrom, err := extractKeyFromMapStringInterface[float64](data, "valid_from")
if err != nil {
_validFrom = 0
}
validFrom := int64(_validFrom)
_validUntil, err := extractKeyFromMapStringInterface[float64](data, "valid_until")
if err != nil {
_validUntil = 0
}
validUntil := int64(_validUntil)
return &LicenseV3{
ID: licenseID,
Key: licenseKey,
Data: data,
PlanName: planName,
Features: features,
ValidFrom: validFrom,
ValidUntil: validUntil,
Status: status,
}, nil
}
func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}) (*LicenseV3, error) {
licenseDataWithIdAndKey := data
licenseDataWithIdAndKey["id"] = id
licenseDataWithIdAndKey["key"] = key
return NewLicenseV3(licenseDataWithIdAndKey)
}
func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
planKeyFromPlanName, ok := MapOldPlanKeyToNewPlanName[l.PlanName]
if !ok {
planKeyFromPlanName = Basic
}
return &License{
Key: l.Key,
ActivationId: "",
PlanDetails: "",
FeatureSet: l.Features,
ValidationMessage: "",
IsCurrent: l.IsCurrent,
LicensePlan: LicensePlan{
PlanKey: planKeyFromPlanName,
ValidFrom: l.ValidFrom,
ValidUntil: l.ValidUntil,
Status: l.Status},
}
}

View File

@@ -0,0 +1,170 @@
package model
import (
"encoding/json"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/model"
)
func TestNewLicenseV3(t *testing.T) {
testCases := []struct {
name string
data []byte
pass bool
expected *LicenseV3
error error
}{
{
name: "Error for missing license id",
data: []byte(`{}`),
pass: false,
error: errors.New("id key is missing"),
},
{
name: "Error for license id not being a valid string",
data: []byte(`{"id": 10}`),
pass: false,
error: errors.New("id key is not a valid string"),
},
{
name: "Error for missing license key",
data: []byte(`{"id":"does-not-matter"}`),
pass: false,
error: errors.New("key key is missing"),
},
{
name: "Error for invalid string license key",
data: []byte(`{"id":"does-not-matter","key":10}`),
pass: false,
error: errors.New("key key is not a valid string"),
},
{
name: "Error for missing license status",
data: []byte(`{"id":"does-not-matter", "key": "does-not-matter","category":"FREE"}`),
pass: false,
error: errors.New("status key is missing"),
},
{
name: "Error for invalid string license status",
data: []byte(`{"id":"does-not-matter","key": "does-not-matter", "category":"FREE", "status":10}`),
pass: false,
error: errors.New("status key is not a valid string"),
},
{
name: "Error for missing license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE"}`),
pass: false,
error: errors.New("plan key is missing"),
},
{
name: "Error for invalid json license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":10}`),
pass: false,
error: errors.New("plan key is not a valid map[string]interface {}"),
},
{
name: "Error for invalid license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{}}`),
pass: false,
error: errors.New("name key is missing"),
},
{
name: "Parse the entire license properly",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"category": "FREE",
"status": "ACTIVE",
"valid_from": float64(1730899309),
"valid_until": float64(-1),
},
PlanName: PlanNameTeams,
ValidFrom: 1730899309,
ValidUntil: -1,
Status: "ACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
{
name: "Fallback to basic plan if license status is inactive",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"category": "FREE",
"status": "INACTIVE",
"valid_from": float64(1730899309),
"valid_until": float64(-1),
},
PlanName: PlanNameBasic,
ValidFrom: 1730899309,
ValidUntil: -1,
Status: "INACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
{
name: "fallback states for validFrom and validUntil",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"valid_from": 1234.456,
"valid_until": 5678.567,
"category": "FREE",
"status": "ACTIVE",
},
PlanName: PlanNameTeams,
ValidFrom: 1234,
ValidUntil: 5678,
Status: "ACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
}
for _, tc := range testCases {
var licensePayload map[string]interface{}
err := json.Unmarshal(tc.data, &licensePayload)
require.NoError(t, err)
license, err := NewLicenseV3(licensePayload)
if license != nil {
license.Features = make(model.FeatureSet, 0)
delete(license.Data, "features")
}
if tc.pass {
require.NoError(t, err)
require.NotNil(t, license)
assert.Equal(t, tc.expected, license)
} else {
require.Error(t, err)
assert.EqualError(t, err, tc.error.Error())
require.Nil(t, license)
}
}
}

View File

@@ -1,6 +1,7 @@
package model
import (
"go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
@@ -8,6 +9,21 @@ const SSO = "SSO"
const Basic = "BASIC_PLAN"
const Pro = "PRO_PLAN"
const Enterprise = "ENTERPRISE_PLAN"
var (
PlanNameEnterprise = "ENTERPRISE"
PlanNameTeams = "TEAMS"
PlanNameBasic = "BASIC"
)
var (
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
)
var (
LicenseStatusInactive = "INACTIVE"
)
const DisableUpsell = "DISABLE_UPSELL"
const Onboarding = "ONBOARDING"
const ChatSupport = "CHAT_SUPPORT"
@@ -134,6 +150,13 @@ var BasicPlan = basemodel.FeatureSet{
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.HostsInfraMonitoring,
Active: constants.EnableHostsInfraMonitoring(),
Usage: 0,
UsageLimit: -1,
Route: "",
},
}
var ProPlan = basemodel.FeatureSet{
@@ -249,6 +272,13 @@ var ProPlan = basemodel.FeatureSet{
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.HostsInfraMonitoring,
Active: constants.EnableHostsInfraMonitoring(),
Usage: 0,
UsageLimit: -1,
Route: "",
},
}
var EnterprisePlan = basemodel.FeatureSet{
@@ -378,4 +408,11 @@ var EnterprisePlan = basemodel.FeatureSet{
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.HostsInfraMonitoring,
Active: constants.EnableHostsInfraMonitoring(),
Usage: 0,
UsageLimit: -1,
Route: "",
},
}

View File

@@ -61,6 +61,11 @@ func NewAnomalyRule(
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
if p.RuleCondition.CompareOp == baserules.ValueIsBelow {
target := -1 * *p.RuleCondition.Target
p.RuleCondition.Target = &target
}
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
if err != nil {
return nil, err

View File

@@ -1,10 +1,15 @@
package rules
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
baserules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
@@ -21,6 +26,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.FF,
opts.Reader,
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
)
@@ -79,6 +85,107 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
return task, nil
}
// TestNotification prepares a dummy rule for given rule parameters and
// sends a test notification. returns alert count and error (if any)
func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.ApiError) {
ctx := context.Background()
if opts.Rule == nil {
return 0, basemodel.BadRequest(fmt.Errorf("rule is required"))
}
parsedRule := opts.Rule
var alertname = parsedRule.AlertName
if alertname == "" {
// alertname is not mandatory for testing, so picking
// a random string here
alertname = uuid.New().String()
}
// append name to indicate this is test alert
parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, baserules.TestAlertPostFix)
var rule baserules.Rule
var err error
if parsedRule.RuleType == baserules.RuleTypeThreshold {
// add special labels for test alerts
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
parsedRule.Labels[labels.RuleSourceLabel] = ""
parsedRule.Labels[labels.AlertRuleIdLabel] = ""
// create a threshold rule
rule, err = baserules.NewThresholdRule(
alertname,
parsedRule,
opts.FF,
opts.Reader,
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else if parsedRule.RuleType == baserules.RuleTypeProm {
// create promql rule
rule, err = baserules.NewPromRule(
alertname,
parsedRule,
opts.Logger,
opts.Reader,
opts.ManagerOpts.PqlEngine,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else if parsedRule.RuleType == baserules.RuleTypeAnomaly {
// create anomaly rule
rule, err = NewAnomalyRule(
alertname,
parsedRule,
opts.FF,
opts.Reader,
opts.Cache,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else {
return 0, basemodel.BadRequest(fmt.Errorf("failed to derive ruletype with given information"))
}
// set timestamp to current utc time
ts := time.Now().UTC()
count, err := rule.Eval(ctx, ts)
if err != nil {
zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
return 0, basemodel.InternalError(fmt.Errorf("rule evaluation failed"))
}
alertsFound, ok := count.(int)
if !ok {
return 0, basemodel.InternalError(fmt.Errorf("something went wrong"))
}
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc)
return alertsFound, nil
}
// newTask returns an appropriate group for
// rule type
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {

View File

@@ -13,8 +13,3 @@ if [ "$branch" = "main" ]; then
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
exit 1
fi
if [ "$branch" = "develop" ]; then
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
exit 1
fi

View File

@@ -34,15 +34,15 @@
"@dnd-kit/core": "6.1.0",
"@dnd-kit/modifiers": "7.0.0",
"@dnd-kit/sortable": "8.0.0",
"@grafana/data": "^9.5.2",
"@grafana/data": "^11.2.3",
"@mdx-js/loader": "2.3.0",
"@mdx-js/react": "2.3.0",
"@monaco-editor/react": "^4.3.1",
"@radix-ui/react-tabs": "1.0.4",
"@radix-ui/react-tooltip": "1.0.7",
"@sentry/react": "7.102.1",
"@sentry/webpack-plugin": "2.16.0",
"@signozhq/design-tokens": "0.0.8",
"@sentry/react": "8.41.0",
"@sentry/webpack-plugin": "2.22.6",
"@signozhq/design-tokens": "1.1.4",
"@uiw/react-md-editor": "3.23.5",
"@visx/group": "3.3.0",
"@visx/shape": "3.5.0",
@@ -51,7 +51,7 @@
"ansi-to-html": "0.7.2",
"antd": "5.11.0",
"antd-table-saveas-excel": "2.2.1",
"axios": "1.7.4",
"axios": "1.7.7",
"babel-eslint": "^10.1.0",
"babel-jest": "^29.6.4",
"babel-loader": "9.1.3",
@@ -76,7 +76,7 @@
"fontfaceobserver": "2.3.0",
"history": "4.10.1",
"html-webpack-plugin": "5.5.0",
"http-proxy-middleware": "2.0.6",
"http-proxy-middleware": "3.0.3",
"i18next": "^21.6.12",
"i18next-browser-languagedetector": "^6.1.3",
"i18next-http-backend": "^1.3.2",
@@ -87,6 +87,8 @@
"lodash-es": "^4.17.21",
"lucide-react": "0.379.0",
"mini-css-extract-plugin": "2.4.5",
"overlayscrollbars": "^2.8.1",
"overlayscrollbars-react": "^0.5.6",
"papaparse": "5.4.1",
"posthog-js": "1.160.3",
"rc-tween-one": "3.0.6",
@@ -107,11 +109,10 @@
"react-query": "3.39.3",
"react-redux": "^7.2.2",
"react-router-dom": "^5.2.0",
"react-router-dom-v5-compat": "6.27.0",
"react-syntax-highlighter": "15.5.0",
"react-use": "^17.3.2",
"react-virtuoso": "4.0.3",
"overlayscrollbars-react": "^0.5.6",
"overlayscrollbars": "^2.8.1",
"redux": "^4.0.5",
"redux-thunk": "^2.3.0",
"rehype-raw": "7.0.0",
@@ -123,11 +124,11 @@
"ts-node": "^10.2.1",
"tsconfig-paths-webpack-plugin": "^3.5.1",
"typescript": "^4.0.5",
"uplot": "1.6.26",
"uplot": "1.6.31",
"uuid": "^8.3.2",
"web-vitals": "^0.2.4",
"webpack": "5.88.2",
"webpack-dev-server": "^4.15.1",
"webpack": "5.94.0",
"webpack-dev-server": "^4.15.2",
"webpack-retry-chunk-load-plugin": "3.1.1",
"xstate": "^4.31.0"
},
@@ -240,6 +241,8 @@
"semver": "7.5.4",
"xml2js": "0.5.0",
"phin": "^3.7.1",
"body-parser": "1.20.3"
"body-parser": "1.20.3",
"http-proxy-middleware": "3.0.3",
"cross-spawn": "7.0.5"
}
}

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="none"><path fill="#A65F3E" d="M8.04 10.331a.41.41 0 0 1-.414-.414.4.4 0 0 1 .121-.292l8.071-8.071a.414.414 0 1 1 .585.585l-8.07 8.071a.4.4 0 0 1-.293.121"/><path fill="#A65F3E" d="M16.11 1.5c.09 0 .178.034.245.101a.35.35 0 0 1 0 .492l-8.07 8.07a.346.346 0 0 1-.49 0 .35.35 0 0 1 0-.49l8.07-8.072a.35.35 0 0 1 .245-.101m0-.133a.48.48 0 0 0-.338.14L7.7 9.578a.47.47 0 0 0-.14.34.475.475 0 0 0 .478.478c.13 0 .25-.05.34-.14l8.07-8.071a.48.48 0 0 0-.339-.818"/><path fill="#FFE082" d="m1.701 12.438 3.89 3.889c.873-.963 1.62-2.057 2.023-3.313.03-.091.034-.24.128-.359.451-.566 1.865-2.008.706-3.167-1.106-1.106-2.438.227-2.994.686-.17.14-.384.228-.606.276-1.493.326-3.034 1.869-3.147 1.988"/><path fill="#FFE082" d="M8.385 8.577a.62.62 0 0 1 .393-.085c.098.018.237.135.38.28.144.143.28.304.32.408s-.005.242-.005.242c-.116.23-.383.69-.6.624-.24-.074-.482-.305-.66-.479a1.5 1.5 0 0 1-.276-.328c-.096-.177.008-.324.129-.447.086-.082.232-.17.319-.215"/><path fill="#F9C248" d="M8.327 8.975c.116.11.21.243.339.338.252.185.455.097.62-.052.049-.044.122-.1.17-.055a.1.1 0 0 1 .025.051.45.45 0 0 1-.045.273 1.3 1.3 0 0 1-.433.529c-.032.022-.07.044-.11.032a.12.12 0 0 1-.056-.045c-.207-.244-.37-.533-.626-.724-.103-.076-.364-.132-.298-.303.1-.262.317-.137.414-.044"/><path fill="#F9C248" d="M7.614 13.014c.028-.091.033-.24.127-.359.515-.645 1.223-1.38 1.145-2.275-.01-.123-.169-.75-.342-.514-.04.052-.024.315-.03.379-.1 1.172-1.02 1.821-1.19 2.024s-.164.393-.31.695a5 5 0 0 1-.61.947c-.379.47-.825.88-1.286 1.27a.8.8 0 0 0-.203.217c-.131.241.153.406.305.558l.369.368c.873-.961 1.62-2.055 2.025-3.31"/><path fill="#E2A610" d="M5.537 15.809c-.1-.157-.242-.3-.317-.458a.24.24 0 0 1-.03-.123c.01-.08.13-.15.187-.198q.129-.108.254-.22c.162-.149.314-.314.419-.509.017-.031.032-.07.016-.102-.035-.065-.238.152-.275.186-.105.092-.208.187-.318.272-.146.113-.422.304-.618.213-.1-.046-.19-.169-.263-.249-.084-.094-.164-.191-.252-.283a17 17 0 0 0-.592-.582c-.05-.046-.06-.066-.003-.122a10 10 0 0 0 .546-.58c.022-.025.044-.067.017-.09-.018-.015-.048-.004-.07.007-.26.138-.467.354-.692.544-.055.046-.214-.13-.249-.158-.092-.073-.154-.102-.046-.21.484-.49.972-.946 1.554-1.323.107-.07.22-.14.28-.253-.01-.03-.054-.026-.085-.015-.807.29-1.89 1.291-1.983 1.38-.162.158-.454-.206-.885-.481-.147-.094 0-.235.038-.279.26-.307.603-.642.603-.642-.127.013-.956.76-1.054.873-.084.097-.17.184-.175.318a.52.52 0 0 0 .107.325c.77 1.05 2.586 2.794 3.23 3.253.384.274.502.224.659.068a.35.35 0 0 0 .105-.3.65.65 0 0 0-.108-.263"/><path fill="#A65F3E" d="M8.835 10.176s-.438-.825-1.017-1.074c0 0-.02-.054.02-.1.052-.057.12-.07.157-.046.427.265.812.619 1.007 1.102.039.093-.131.23-.167.118"/><path fill="#F44336" d="M7.64 12.88c-.528-.818-1.63-1.937-2.46-2.524-.066-.046.204-.204.272-.156a9.7 9.7 0 0 1 2.31 2.398c.045.067-.091.33-.123.282M8.193 12.078c-.506-.71-1.521-1.738-2.238-2.312-.062-.05.182-.22.232-.181.755.602 1.668 1.499 2.18 2.263.037.057-.138.282-.174.23"/></svg>

After

Width:  |  Height:  |  Size: 2.9 KiB

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="16" fill="none"><path fill="#616161" fill-rule="evenodd" d="M8.096 2.885H4.372V2.51h3.724zM8.096 4.79H4.372v-.375h3.724z" clip-rule="evenodd"/><path fill="#9E9E9E" d="M7.098 15.539H5.662V.936s.134-.311.719-.311.719.311.719.311v14.603z"/><path fill="#757575" d="M6.73.671V12.47H5.662v1.074c.181.001.345.023.493.055.336.074.576.37.576.714v1.227H7.1V.936c-.002 0-.08-.179-.37-.265"/><path fill="#2196F3" d="M10.58.54a3.03 3.03 0 0 0-3.028 3.038 3.02 3.02 0 0 0 3.027 3.028 3.025 3.025 0 0 0 3.028-3.028A3.035 3.035 0 0 0 10.579.54"/><path fill="#fff" d="M11.902 1.671c-.19-.048-.569-.098-1.321-.098-.753 0-1.132.05-1.322.098-.112.029-.488.185-.488.606v2.598c0 .142.115.258.258.258h.095v.288c0 .084.068.151.152.151h.306a.15.15 0 0 0 .151-.151v-.288h1.693v.288c0 .084.067.151.15.151h.307a.15.15 0 0 0 .151-.151v-.288h.098a.26.26 0 0 0 .259-.258V2.277c0-.404-.377-.579-.49-.606m-2.139.206c0-.064.051-.115.115-.115h1.403c.063 0 .115.051.115.115v.204a.115.115 0 0 1-.115.115H9.878a.115.115 0 0 1-.115-.115zm.024 2.736a.08.08 0 0 1-.078.078h-.308a.264.264 0 0 1-.264-.264v-.139c0-.042.035-.077.077-.077h.31c.144 0 .263.117.263.264zm2.235-.186a.264.264 0 0 1-.264.264h-.309a.08.08 0 0 1-.077-.078v-.138c0-.145.117-.264.264-.264h.308c.043 0 .078.035.078.077zm.07-1.129c0 .168-.363.46-1.513.46s-1.512-.27-1.512-.46v-.767c0-.05.05-.175.175-.175h2.695c.125 0 .155.126.155.175z"/><path fill="#F5F5F5" d="M8.61 12.867H4.15a.285.285 0 0 1-.285-.285v-5.15c0-.158.127-.285.285-.285h4.457c.158 0 .285.127.285.285v5.15a.285.285 0 0 1-.284.285"/><path fill="#82AEC0" d="M8.128 12.015H4.632l-.01-4.07H8.12z" opacity=".8"/><path fill="#F5F5F5" fill-rule="evenodd" d="M6.246 12.07V7.945h.25v4.123z" clip-rule="evenodd"/><path fill="#616161" d="M6.246 7.946H4.622v.34h1.624z"/><path fill="#F5F5F5" fill-rule="evenodd" d="M8.142 11.307H4.618v-.125h3.524zM8.142 10.482H4.618v-.125h3.524zM8.12 9.657H4.621v-.125H8.12zM8.12 8.833H4.617v-.125H8.12z" clip-rule="evenodd"/><path fill="#616161" d="M8.118 9.426H6.495v.34h1.623zM6.253 10.25H4.635v.34h1.618z"/><path fill="#9E9E9E" fill-rule="evenodd" d="M4.15 7.334a.097.097 0 0 0-.097.098v5.15c0 .054.044.097.098.097h4.458a.097.097 0 0 0 .097-.097v-5.15a.097.097 0 0 0-.098-.098zm-.472.098c0-.261.212-.473.473-.473h4.457c.261 0 .473.212.473.473v5.15c0 .26-.211.472-.472.472H4.151a.47.47 0 0 1-.473-.472z" clip-rule="evenodd"/><path fill="#757575" d="M4.17 12.682c-.194.017-.194-.11-.194-.145V7.493c0-.141.115-.256.256-.256H8.56c.118 0 .172.071.148.216 0 0-.015-.092-.128-.092H4.233a.133.133 0 0 0-.132.132v5.045c0 .117.069.144.069.144"/><path fill="#FFCA28" d="M4.9 1.775H.642v3.7h4.26z"/><path fill="#9E9E9E" d="M4.663 1.775c.132 0 .238.106.238.237v3.225a.237.237 0 0 1-.238.237H.88a.237.237 0 0 1-.238-.237V2.012c0-.131.107-.237.238-.237zm0-.25H.88a.49.49 0 0 0-.488.487v3.225c0 .269.22.487.488.487h3.783a.49.49 0 0 0 .488-.487V2.012a.487.487 0 0 0-.488-.487"/><path fill="#FFFDE7" fill-rule="evenodd" d="M4.902 3.11H.642v-.25h4.26zM4.902 4.388H.642v-.25h4.26z" clip-rule="evenodd"/><path fill="#757575" d="M1.975 2.186H.904v.282h1.07zM1.711 4.777H.904v.283h.807zM4.552 4.777h-.807v.283h.807zM4.552 2.186h-.807v.282h.807zM3.795 3.482H3.33v.282h.465zM4.552 3.482h-.465v.282h.465zM2.388 3.482H.904v.282h1.484z"/></svg>

After

Width:  |  Height:  |  Size: 3.2 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

1
frontend/public/css/uPlot.min.css vendored Normal file
View File

@@ -0,0 +1 @@
.uplot, .uplot *, .uplot *::before, .uplot *::after {box-sizing: border-box;}.uplot {font-family: system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";line-height: 1.5;width: min-content;}.u-title {text-align: center;font-size: 18px;font-weight: bold;}.u-wrap {position: relative;user-select: none;}.u-over, .u-under {position: absolute;}.u-under {overflow: hidden;}.uplot canvas {display: block;position: relative;width: 100%;height: 100%;}.u-axis {position: absolute;}.u-legend {font-size: 14px;margin: auto;text-align: center;}.u-inline {display: block;}.u-inline * {display: inline-block;}.u-inline tr {margin-right: 16px;}.u-legend th {font-weight: 600;}.u-legend th > * {vertical-align: middle;display: inline-block;}.u-legend .u-marker {width: 1em;height: 1em;margin-right: 4px;background-clip: padding-box !important;}.u-inline.u-live th::after {content: ":";vertical-align: middle;}.u-inline:not(.u-live) .u-value {display: none;}.u-series > * {padding: 4px;}.u-series th {cursor: pointer;}.u-legend .u-off > * {opacity: 0.3;}.u-select {background: rgba(0,0,0,0.07);position: absolute;pointer-events: none;}.u-cursor-x, .u-cursor-y {position: absolute;left: 0;top: 0;pointer-events: none;will-change: transform;}.u-hz .u-cursor-x, .u-vt .u-cursor-y {height: 100%;border-right: 1px dashed #607D8B;}.u-hz .u-cursor-y, .u-vt .u-cursor-x {width: 100%;border-bottom: 1px dashed #607D8B;}.u-cursor-pt {position: absolute;top: 0;left: 0;border-radius: 50%;border: 0 solid;pointer-events: none;will-change: transform;/*this has to be !important since we set inline "background" shorthand */background-clip: padding-box !important;}.u-axis.u-off, .u-select.u-off, .u-cursor-x.u-off, .u-cursor-y.u-off, .u-cursor-pt.u-off {display: none;}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -56,6 +56,7 @@
"option_last": "last",
"option_above": "above",
"option_below": "below",
"option_above_below": "above/below",
"option_equal": "is equal to",
"option_notequal": "not equal to",
"button_query": "Query",
@@ -110,6 +111,8 @@
"choose_alert_type": "Choose a type for the alert",
"metric_based_alert": "Metric based Alert",
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data.",
"anomaly_based_alert": "Anomaly based Alert",
"anomaly_based_alert_desc": "Send a notification when a condition occurs in the metric data.",
"log_based_alert": "Log-based Alert",
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
"traces_based_alert": "Trace-based Alert",

View File

@@ -0,0 +1,12 @@
{
"workspaceSuspended": "Your workspace is locked",
"gotQuestions": "Got Questions?",
"contactUs": "Contact Us",
"actionHeader": "Pay to continue",
"actionDescription": "Pay now to keep enjoying all the great features youve been using.",
"yourDataIsSafe": "Your data is safe with us until",
"actNow": "Act now to avoid any disruptions and continue where you left off.",
"contactAdmin": "Contact your admin to proceed with the upgrade.",
"continueMyJourney": "Settle your bill to continue",
"somethingWentWrong": "Something went wrong"
}

View File

@@ -0,0 +1,8 @@
{
"containers_visualization_message": "The ability to visualise containers is in active development and should be available to you soon.",
"processes_visualization_message": "The ability to visualise processes is in active development and should be available to you soon.",
"working_message": "We're working to extend infrastructure monitoring to take care of a bunch of different cases. Thank you for your patience.",
"waitlist_message": "Join the waitlist for early access.",
"waitlist_success_message": "We have received your request for early access. We will get back to you as soon as we launch the feature.",
"contact_support": "Contact Support"
}

View File

@@ -0,0 +1,24 @@
{
"metricGraphCategory": {
"brokerMetrics": {
"title": "Broker Metrics",
"description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics"
},
"consumerMetrics": {
"title": "Consumer Metrics",
"description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed."
},
"producerMetrics": {
"title": "Producer Metrics",
"description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery."
},
"brokerJVMMetrics": {
"title": "Broker JVM Metrics",
"description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate."
},
"partitionMetrics": {
"title": "Partition Metrics",
"description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas."
}
}
}

View File

@@ -1,30 +1,54 @@
{
"breadcrumb": "Messaging Queues",
"header": "Kafka / Overview",
"overview": {
"title": "Start sending data in as little as 20 minutes",
"subtitle": "Connect and Monitor Your Data Streams"
},
"configureConsumer": {
"title": "Configure Consumer",
"description": "Add consumer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"configureProducer": {
"title": "Configure Producer",
"description": "Add producer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"monitorKafka": {
"title": "Monitor kafka",
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
"button": "Get Started"
},
"summarySection": {
"viewDetailsButton": "View Details"
},
"confirmModal": {
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
"okText": "Proceed"
}
}
"breadcrumb": "Messaging Queues",
"header": "Kafka / Overview",
"overview": {
"title": "Start sending data in as little as 20 minutes",
"subtitle": "Connect and Monitor Your Data Streams"
},
"configureConsumer": {
"title": "Configure Consumer",
"description": "Add consumer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"configureProducer": {
"title": "Configure Producer",
"description": "Add producer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"monitorKafka": {
"title": "Monitor kafka",
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
"button": "Get Started"
},
"summarySection": {
"viewDetailsButton": "View Details",
"consumer": {
"title": "Consumer lag view",
"description": "Connect and Monitor Your Data Streams"
},
"producer": {
"title": "Producer latency view",
"description": "Connect and Monitor Your Data Streams"
},
"partition": {
"title": "Partition Latency view",
"description": "Connect and Monitor Your Data Streams"
},
"dropRate": {
"title": "Drop Rate view",
"description": "Connect and Monitor Your Data Streams"
},
"metricPage": {
"title": "Metric View",
"description": "Connect and Monitor Your Data Streams"
}
},
"confirmModal": {
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
"okText": "Proceed"
},
"overviewSummarySection": {
"title": "Monitor Your Data Streams",
"subtitle": "Monitor key Kafka metrics like consumer lag and latency to ensure efficient data flow and troubleshoot in real time."
}
}

View File

@@ -3,7 +3,7 @@
"alert_channels": "Alert Channels",
"organization_settings": "Organization Settings",
"ingestion_settings": "Ingestion Settings",
"api_keys": "Access Tokens",
"api_keys": "API Keys",
"my_settings": "My Settings",
"overview_metrics": "Overview Metrics",
"dbcall_metrics": "Database Calls",

View File

@@ -43,6 +43,7 @@
"option_last": "last",
"option_above": "above",
"option_below": "below",
"option_above_below": "above/below",
"option_equal": "is equal to",
"option_notequal": "not equal to",
"button_query": "Query",

View File

@@ -26,7 +26,7 @@
"MY_SETTINGS": "SigNoz | My Settings",
"ORG_SETTINGS": "SigNoz | Organization Settings",
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
"API_KEYS": "SigNoz | Access Tokens",
"API_KEYS": "SigNoz | API Keys",
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
"UN_AUTHORIZED": "SigNoz | Unauthorized",
"NOT_FOUND": "SigNoz | Page Not Found",
@@ -37,8 +37,10 @@
"PASSWORD_RESET": "SigNoz | Password Reset",
"LIST_LICENSES": "SigNoz | List of Licenses",
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
"WORKSPACE_SUSPENDED": "SigNoz | Workspace Suspended",
"SUPPORT": "SigNoz | Support",
"DEFAULT": "Open source Observability Platform | SigNoz",
"ALERT_HISTORY": "SigNoz | Alert Rule History",
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview"
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
"INFRASTRUCTURE_MONITORING_HOSTS": "SigNoz | Infra Monitoring"
}

View File

@@ -13,9 +13,12 @@
"button_no": "No",
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
"remove_label_success": "Labels cleared",
"alert_form_step1": "Step 1 - Define the metric",
"alert_form_step2": "Step 2 - Define Alert Conditions",
"alert_form_step3": "Step 3 - Alert Configuration",
"alert_form_step1": "Choose a detection method",
"alert_form_step2": "Define the metric",
"alert_form_step3": "Define Alert Conditions",
"alert_form_step4": "Alert Configuration",
"threshold_alert_desc": "An alert is triggered whenever a metric deviates from an expected threshold.",
"anomaly_detection_alert_desc": "An alert is triggered whenever a metric deviates from an expected pattern.",
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
"confirm_save_title": "Save Changes",
"confirm_save_content_part1": "Your alert built with",
@@ -35,6 +38,7 @@
"button_cancelchanges": "Cancel",
"button_discard": "Discard",
"text_condition1": "Send a notification when",
"text_condition1_anomaly": "Send notification when the observed value for",
"text_condition2": "the threshold",
"text_condition3": "during the last",
"option_1min": "1 min",
@@ -56,6 +60,7 @@
"option_last": "last",
"option_above": "above",
"option_below": "below",
"option_above_below": "above/below",
"option_equal": "is equal to",
"option_notequal": "not equal to",
"button_query": "Query",
@@ -109,7 +114,9 @@
"user_tooltip_more_help": "More details on how to create alerts",
"choose_alert_type": "Choose a type for the alert",
"metric_based_alert": "Metric based Alert",
"anomaly_based_alert": "Anomaly based Alert",
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data.",
"anomaly_based_alert_desc": "Send a notification when a condition occurs in the metric data.",
"log_based_alert": "Log-based Alert",
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
"traces_based_alert": "Trace-based Alert",

View File

@@ -1,3 +1,3 @@
{
"delete_confirm_message": "Are you sure you want to delete {{keyName}} token? Deleting a token is irreversible and cannot be undone."
"delete_confirm_message": "Are you sure you want to delete {{keyName}} key? Deleting a key is irreversible and cannot be undone."
}

View File

@@ -7,5 +7,5 @@
"save": "Save",
"edit": "Edit",
"logged_in": "Logged In",
"pending_data_placeholder": "Just a bit of patience, just a little bits enough ⎯ were getting your {{dataSource}}!"
"pending_data_placeholder": "Retrieving your {{dataSource}}!"
}

View File

@@ -0,0 +1,12 @@
{
"workspaceSuspended": "Your workspace is locked",
"gotQuestions": "Got Questions?",
"contactUs": "Contact Us",
"actionHeader": "Pay to continue",
"actionDescription": "Pay now to keep enjoying all the great features youve been using.",
"yourDataIsSafe": "Your data is safe with us until",
"actNow": "Act now to avoid any disruptions and continue where you left off.",
"contactAdmin": "Contact your admin to proceed with the upgrade.",
"continueMyJourney": "Settle your bill to continue",
"somethingWentWrong": "Something went wrong"
}

View File

@@ -0,0 +1,8 @@
{
"containers_visualization_message": "The ability to visualise containers is in active development and should be available to you soon.",
"processes_visualization_message": "The ability to visualise processes is in active development and should be available to you soon.",
"working_message": "We're working to extend infrastructure monitoring to take care of a bunch of different cases. Thank you for your patience.",
"waitlist_message": "Join the waitlist for early access.",
"waitlist_success_message": "We have received your request for early access. We will get back to you as soon as we launch the feature.",
"contact_support": "Contact Support"
}

View File

@@ -0,0 +1,24 @@
{
"metricGraphCategory": {
"brokerMetrics": {
"title": "Broker Metrics",
"description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics"
},
"consumerMetrics": {
"title": "Consumer Metrics",
"description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed."
},
"producerMetrics": {
"title": "Producer Metrics",
"description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery."
},
"brokerJVMMetrics": {
"title": "Broker JVM Metrics",
"description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate."
},
"partitionMetrics": {
"title": "Partition Metrics",
"description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas."
}
}
}

View File

@@ -1,30 +1,54 @@
{
"breadcrumb": "Messaging Queues",
"header": "Kafka / Overview",
"overview": {
"title": "Start sending data in as little as 20 minutes",
"subtitle": "Connect and Monitor Your Data Streams"
},
"configureConsumer": {
"title": "Configure Consumer",
"description": "Add consumer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"configureProducer": {
"title": "Configure Producer",
"description": "Add producer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"monitorKafka": {
"title": "Monitor kafka",
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
"button": "Get Started"
},
"summarySection": {
"viewDetailsButton": "View Details"
},
"confirmModal": {
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
"okText": "Proceed"
}
}
"breadcrumb": "Messaging Queues",
"header": "Kafka / Overview",
"overview": {
"title": "Start sending data in as little as 20 minutes",
"subtitle": "Connect and Monitor Your Data Streams"
},
"configureConsumer": {
"title": "Configure Consumer",
"description": "Add consumer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"configureProducer": {
"title": "Configure Producer",
"description": "Add producer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"monitorKafka": {
"title": "Monitor kafka",
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
"button": "Get Started"
},
"summarySection": {
"viewDetailsButton": "View Details",
"consumer": {
"title": "Consumer lag view",
"description": "Connect and Monitor Your Data Streams"
},
"producer": {
"title": "Producer latency view",
"description": "Connect and Monitor Your Data Streams"
},
"partition": {
"title": "Partition Latency view",
"description": "Connect and Monitor Your Data Streams"
},
"dropRate": {
"title": "Drop Rate view",
"description": "Connect and Monitor Your Data Streams"
},
"metricPage": {
"title": "Metric View",
"description": "Connect and Monitor Your Data Streams"
}
},
"confirmModal": {
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
"okText": "Proceed"
},
"overviewSummarySection": {
"title": "Monitor Your Data Streams",
"subtitle": "Monitor key Kafka metrics like consumer lag and latency to ensure efficient data flow and troubleshoot in real time."
}
}

View File

@@ -3,7 +3,7 @@
"alert_channels": "Alert Channels",
"organization_settings": "Organization Settings",
"ingestion_settings": "Ingestion Settings",
"api_keys": "Access Tokens",
"api_keys": "API Keys",
"my_settings": "My Settings",
"overview_metrics": "Overview Metrics",
"dbcall_metrics": "Database Calls",

View File

@@ -43,6 +43,7 @@
"option_last": "last",
"option_above": "above",
"option_below": "below",
"option_above_below": "above/below",
"option_equal": "is equal to",
"option_notequal": "not equal to",
"button_query": "Query",

View File

@@ -4,6 +4,7 @@
"SERVICE_METRICS": "SigNoz | Service Metrics",
"SERVICE_MAP": "SigNoz | Service Map",
"GET_STARTED": "SigNoz | Get Started",
"ONBOARDING": "SigNoz | Get Started",
"GET_STARTED_APPLICATION_MONITORING": "SigNoz | Get Started | APM",
"GET_STARTED_LOGS_MANAGEMENT": "SigNoz | Get Started | Logs",
"GET_STARTED_INFRASTRUCTURE_MONITORING": "SigNoz | Get Started | Infrastructure",
@@ -31,7 +32,7 @@
"MY_SETTINGS": "SigNoz | My Settings",
"ORG_SETTINGS": "SigNoz | Organization Settings",
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
"API_KEYS": "SigNoz | Access Tokens",
"API_KEYS": "SigNoz | API Keys",
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
"UN_AUTHORIZED": "SigNoz | Unauthorized",
"NOT_FOUND": "SigNoz | Page Not Found",
@@ -44,6 +45,7 @@
"PASSWORD_RESET": "SigNoz | Password Reset",
"LIST_LICENSES": "SigNoz | List of Licenses",
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
"WORKSPACE_SUSPENDED": "SigNoz | Workspace Suspended",
"SUPPORT": "SigNoz | Support",
"LOGS_SAVE_VIEWS": "SigNoz | Logs Saved Views",
"TRACES_SAVE_VIEWS": "SigNoz | Traces Saved Views",
@@ -52,5 +54,6 @@
"INTEGRATIONS": "SigNoz | Integrations",
"ALERT_HISTORY": "SigNoz | Alert Rule History",
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
"MESSAGING_QUEUES": "SigNoz | Messaging Queues"
"MESSAGING_QUEUES": "SigNoz | Messaging Queues",
"INFRASTRUCTURE_MONITORING_HOSTS": "SigNoz | Infra Monitoring"
}

View File

@@ -1,40 +1,45 @@
/* eslint-disable react-hooks/exhaustive-deps */
import getLocalStorageApi from 'api/browser/localstorage/get';
import loginApi from 'api/user/login';
import { Logout } from 'api/utils';
import Spinner from 'components/Spinner';
import setLocalStorageApi from 'api/browser/localstorage/set';
import getOrgUser from 'api/user/getOrgUser';
import { LOCALSTORAGE } from 'constants/localStorage';
import ROUTES from 'constants/routes';
import useLicense from 'hooks/useLicense';
import { useNotifications } from 'hooks/useNotifications';
import history from 'lib/history';
import { ReactChild, useEffect, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useDispatch, useSelector } from 'react-redux';
import { matchPath, Redirect, useLocation } from 'react-router-dom';
import { Dispatch } from 'redux';
import { AppState } from 'store/reducers';
import { getInitialUserTokenRefreshToken } from 'store/utils';
import AppActions from 'types/actions';
import { UPDATE_USER_IS_FETCH } from 'types/actions/app';
import AppReducer from 'types/reducer/app';
import { isEmpty } from 'lodash-es';
import { useAppContext } from 'providers/App/App';
import { ReactChild, useCallback, useEffect, useMemo, useState } from 'react';
import { useQuery } from 'react-query';
import { matchPath, useLocation } from 'react-router-dom';
import { LicenseState, LicenseStatus } from 'types/api/licensesV3/getActive';
import { Organization } from 'types/api/user/getOrganization';
import { isCloudUser } from 'utils/app';
import { routePermission } from 'utils/permission';
import routes, {
LIST_LICENSES,
oldNewRoutesMapping,
oldRoutes,
ROUTES_NOT_TO_BE_OVERRIDEN,
SUPPORT_ROUTE,
} from './routes';
import afterLogin from './utils';
function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const location = useLocation();
const { pathname } = location;
const {
org,
orgPreferences,
user,
isLoggedIn: isLoggedInState,
isFetchingOrgPreferences,
licenses,
isFetchingLicenses,
activeLicenseV3,
isFetchingActiveLicenseV3,
} = useAppContext();
const mapRoutes = useMemo(
() =>
new Map(
[...routes, LIST_LICENSES].map((e) => {
[...routes, LIST_LICENSES, SUPPORT_ROUTE].map((e) => {
const currentPath = matchPath(pathname, {
path: e.path,
});
@@ -43,180 +48,177 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
),
[pathname],
);
const {
data: licensesData,
isFetching: isFetchingLicensesData,
} = useLicense();
const {
isUserFetching,
isUserFetchingError,
isLoggedIn: isLoggedInState,
} = useSelector<AppState, AppReducer>((state) => state.app);
const { t } = useTranslation(['common']);
const localStorageUserAuthToken = getInitialUserTokenRefreshToken();
const dispatch = useDispatch<Dispatch<AppActions>>();
const { notifications } = useNotifications();
const currentRoute = mapRoutes.get('current');
const isOldRoute = oldRoutes.indexOf(pathname) > -1;
const currentRoute = mapRoutes.get('current');
const isCloudUserVal = isCloudUser();
const isLocalStorageLoggedIn =
getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true';
const [orgData, setOrgData] = useState<Organization | undefined>(undefined);
const navigateToLoginIfNotLoggedIn = (isLoggedIn = isLoggedInState): void => {
dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
if (!isLoggedIn) {
history.push(ROUTES.LOGIN, { from: pathname });
}
};
const handleUserLoginIfTokenPresent = async (
key: keyof typeof ROUTES,
): Promise<void> => {
if (localStorageUserAuthToken?.refreshJwt) {
// localstorage token is present
// renew web access token
const response = await loginApi({
refreshToken: localStorageUserAuthToken?.refreshJwt,
});
if (response.statusCode === 200) {
const route = routePermission[key];
// get all resource and put it over redux
const userResponse = await afterLogin(
response.payload.userId,
response.payload.accessJwt,
response.payload.refreshJwt,
);
if (
userResponse &&
route &&
route.find((e) => e === userResponse.payload.role) === undefined
) {
history.push(ROUTES.UN_AUTHORIZED);
}
} else {
Logout();
notifications.error({
message: response.error || t('something_went_wrong'),
const { data: orgUsers, isFetching: isFetchingOrgUsers } = useQuery({
queryFn: () => {
if (orgData && orgData.id !== undefined) {
return getOrgUser({
orgId: orgData.id,
});
}
}
};
return undefined;
},
queryKey: ['getOrgUser'],
enabled: !isEmpty(orgData) && user.role === 'ADMIN',
});
const handlePrivateRoutes = async (
key: keyof typeof ROUTES,
): Promise<void> => {
const checkFirstTimeUser = useCallback((): boolean => {
const users = orgUsers?.payload || [];
const remainingUsers = users.filter(
(user) => user.email !== 'admin@signoz.cloud',
);
return remainingUsers.length === 1;
}, [orgUsers?.payload]);
useEffect(() => {
if (
localStorageUserAuthToken &&
localStorageUserAuthToken.refreshJwt &&
isUserFetching
isCloudUserVal &&
!isFetchingOrgPreferences &&
orgPreferences &&
!isFetchingOrgUsers &&
orgUsers &&
orgUsers.payload
) {
handleUserLoginIfTokenPresent(key);
} else {
// user does have localstorage values
const isOnboardingComplete = orgPreferences?.find(
(preference: Record<string, any>) => preference.key === 'ORG_ONBOARDING',
)?.value;
navigateToLoginIfNotLoggedIn(isLocalStorageLoggedIn);
const isFirstUser = checkFirstTimeUser();
if (
isFirstUser &&
!isOnboardingComplete &&
// if the current route is allowed to be overriden by org onboarding then only do the same
!ROUTES_NOT_TO_BE_OVERRIDEN.includes(pathname)
) {
history.push(ROUTES.ONBOARDING);
}
}
};
}, [
checkFirstTimeUser,
isCloudUserVal,
isFetchingOrgPreferences,
isFetchingOrgUsers,
orgPreferences,
orgUsers,
pathname,
]);
const navigateToWorkSpaceBlocked = (route: any): void => {
const { path } = route;
if (path && path !== ROUTES.WORKSPACE_LOCKED) {
history.push(ROUTES.WORKSPACE_LOCKED);
dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
}
};
useEffect(() => {
if (!isFetchingLicensesData) {
const shouldBlockWorkspace = licensesData?.payload?.workSpaceBlock;
if (!isFetchingLicenses) {
const currentRoute = mapRoutes.get('current');
const shouldBlockWorkspace = licenses?.workSpaceBlock;
if (shouldBlockWorkspace) {
if (shouldBlockWorkspace && currentRoute) {
navigateToWorkSpaceBlocked(currentRoute);
}
}
}, [isFetchingLicensesData]);
}, [isFetchingLicenses, licenses?.workSpaceBlock, mapRoutes, pathname]);
const navigateToWorkSpaceSuspended = (route: any): void => {
const { path } = route;
if (path && path !== ROUTES.WORKSPACE_SUSPENDED) {
history.push(ROUTES.WORKSPACE_SUSPENDED);
}
};
useEffect(() => {
if (!isFetchingActiveLicenseV3 && activeLicenseV3) {
const currentRoute = mapRoutes.get('current');
const shouldSuspendWorkspace =
activeLicenseV3.status === LicenseStatus.SUSPENDED &&
activeLicenseV3.state === LicenseState.PAYMENT_FAILED;
if (shouldSuspendWorkspace && currentRoute) {
navigateToWorkSpaceSuspended(currentRoute);
}
}
}, [isFetchingActiveLicenseV3, activeLicenseV3, mapRoutes, pathname]);
useEffect(() => {
if (org && org.length > 0 && org[0].id !== undefined) {
setOrgData(org[0]);
}
}, [org]);
// eslint-disable-next-line sonarjs/cognitive-complexity
useEffect(() => {
(async (): Promise<void> => {
try {
if (isOldRoute) {
const redirectUrl = oldNewRoutesMapping[pathname];
// if it is an old route navigate to the new route
if (isOldRoute) {
const redirectUrl = oldNewRoutesMapping[pathname];
const newLocation = {
...location,
pathname: redirectUrl,
};
history.replace(newLocation);
}
if (currentRoute) {
const { isPrivate, key } = currentRoute;
if (isPrivate && key !== String(ROUTES.WORKSPACE_LOCKED)) {
handlePrivateRoutes(key);
} else {
// no need to fetch the user and make user fetching false
if (getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true') {
history.push(ROUTES.APPLICATION);
}
dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
}
} else if (pathname === ROUTES.HOME_PAGE) {
// routing to application page over root page
if (isLoggedInState) {
history.push(ROUTES.APPLICATION);
} else {
navigateToLoginIfNotLoggedIn();
const newLocation = {
...location,
pathname: redirectUrl,
};
history.replace(newLocation);
return;
}
// if the current route
if (currentRoute) {
const { isPrivate, key } = currentRoute;
if (isPrivate) {
if (isLoggedInState) {
const route = routePermission[key];
if (route && route.find((e) => e === user.role) === undefined) {
history.push(ROUTES.UN_AUTHORIZED);
}
} else {
// not found
navigateToLoginIfNotLoggedIn(isLocalStorageLoggedIn);
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, pathname);
history.push(ROUTES.LOGIN);
}
} catch (error) {
// something went wrong
history.push(ROUTES.SOMETHING_WENT_WRONG);
} else if (isLoggedInState) {
const fromPathname = getLocalStorageApi(
LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT,
);
if (fromPathname) {
history.push(fromPathname);
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, '');
} else {
history.push(ROUTES.APPLICATION);
}
} else {
// do nothing as the unauthenticated routes are LOGIN and SIGNUP and the LOGIN container takes care of routing to signup if
// setup is not completed
}
})();
}, [dispatch, isLoggedInState, currentRoute, licensesData]);
if (isUserFetchingError) {
return <Redirect to={ROUTES.SOMETHING_WENT_WRONG} />;
}
if (isUserFetching) {
return <Spinner tip="Loading..." />;
}
} else if (isLoggedInState) {
const fromPathname = getLocalStorageApi(
LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT,
);
if (fromPathname) {
history.push(fromPathname);
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, '');
} else {
history.push(ROUTES.APPLICATION);
}
} else {
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, pathname);
history.push(ROUTES.LOGIN);
}
}, [
licenses,
isLoggedInState,
pathname,
user,
isOldRoute,
currentRoute,
location,
]);
// NOTE: disabling this rule as there is no need to have div
// eslint-disable-next-line react/jsx-no-useless-fragment

View File

@@ -1,7 +1,6 @@
import { ConfigProvider } from 'antd';
import getLocalStorageApi from 'api/browser/localstorage/get';
import setLocalStorageApi from 'api/browser/localstorage/set';
import logEvent from 'api/common/logEvent';
import NotFound from 'components/NotFound';
import Spinner from 'components/Spinner';
import { FeatureKeys } from 'constants/features';
@@ -10,27 +9,21 @@ import ROUTES from 'constants/routes';
import AppLayout from 'container/AppLayout';
import useAnalytics from 'hooks/analytics/useAnalytics';
import { KeyboardHotkeysProvider } from 'hooks/hotkeys/useKeyboardHotkeys';
import { useIsDarkMode, useThemeConfig } from 'hooks/useDarkMode';
import { THEME_MODE } from 'hooks/useDarkMode/constant';
import useFeatureFlags from 'hooks/useFeatureFlag';
import useGetFeatureFlag from 'hooks/useGetFeatureFlag';
import useLicense, { LICENSE_PLAN_KEY } from 'hooks/useLicense';
import { useThemeConfig } from 'hooks/useDarkMode';
import { LICENSE_PLAN_KEY } from 'hooks/useLicense';
import { NotificationProvider } from 'hooks/useNotifications';
import { ResourceProvider } from 'hooks/useResourceAttribute';
import history from 'lib/history';
import { identity, pick, pickBy } from 'lodash-es';
import { identity, pickBy } from 'lodash-es';
import posthog from 'posthog-js';
import AlertRuleProvider from 'providers/Alert';
import { useAppContext } from 'providers/App/App';
import { IUser } from 'providers/App/types';
import { DashboardProvider } from 'providers/Dashboard/Dashboard';
import { QueryBuilderProvider } from 'providers/QueryBuilder';
import { Suspense, useEffect, useState } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { Route, Router, Switch } from 'react-router-dom';
import { Dispatch } from 'redux';
import { AppState } from 'store/reducers';
import AppActions from 'types/actions';
import { UPDATE_FEATURE_FLAG_RESPONSE } from 'types/actions/app';
import AppReducer, { User } from 'types/reducer/app';
import { Suspense, useCallback, useEffect, useState } from 'react';
import { Redirect, Route, Router, Switch } from 'react-router-dom';
import { CompatRouter } from 'react-router-dom-v5-compat';
import { extractDomain, isCloudUser, isEECloudUser } from 'utils/app';
import PrivateRoute from './Private';
@@ -42,14 +35,20 @@ import defaultRoutes, {
function App(): JSX.Element {
const themeConfig = useThemeConfig();
const { data: licenseData } = useLicense();
const {
licenses,
user,
isFetchingUser,
isFetchingLicenses,
isFetchingFeatureFlags,
userFetchError,
licensesFetchError,
featureFlagsFetchError,
isLoggedIn: isLoggedInState,
featureFlags,
org,
} = useAppContext();
const [routes, setRoutes] = useState<AppRoutes[]>(defaultRoutes);
const { role, isLoggedIn: isLoggedInState, user, org } = useSelector<
AppState,
AppReducer
>((state) => state.app);
const dispatch = useDispatch<Dispatch<AppActions>>();
const { trackPageView } = useAnalytics();
@@ -57,218 +56,235 @@ function App(): JSX.Element {
const isCloudUserVal = isCloudUser();
const isDarkMode = useIsDarkMode();
const enableAnalytics = useCallback(
(user: IUser): void => {
// wait for the required data to be loaded before doing init for anything!
if (!isFetchingLicenses && licenses && org) {
const orgName =
org && Array.isArray(org) && org.length > 0 ? org[0].name : '';
const isChatSupportEnabled =
useFeatureFlags(FeatureKeys.CHAT_SUPPORT)?.active || false;
const { name, email, role } = user;
const isPremiumSupportEnabled =
useFeatureFlags(FeatureKeys.PREMIUM_SUPPORT)?.active || false;
const identifyPayload = {
email,
name,
company_name: orgName,
role,
source: 'signoz-ui',
};
const featureResponse = useGetFeatureFlag((allFlags) => {
dispatch({
type: UPDATE_FEATURE_FLAG_RESPONSE,
payload: {
featureFlag: allFlags,
refetch: featureResponse.refetch,
},
});
const sanitizedIdentifyPayload = pickBy(identifyPayload, identity);
const domain = extractDomain(email);
const hostNameParts = hostname.split('.');
const isOnboardingEnabled =
allFlags.find((flag) => flag.name === FeatureKeys.ONBOARDING)?.active ||
false;
const groupTraits = {
name: orgName,
tenant_id: hostNameParts[0],
data_region: hostNameParts[1],
tenant_url: hostname,
company_domain: domain,
source: 'signoz-ui',
};
if (!isOnboardingEnabled || !isCloudUserVal) {
const newRoutes = routes.filter(
(route) => route?.path !== ROUTES.GET_STARTED,
);
window.analytics.identify(email, sanitizedIdentifyPayload);
window.analytics.group(domain, groupTraits);
setRoutes(newRoutes);
}
});
posthog?.identify(email, {
email,
name,
orgName,
tenant_id: hostNameParts[0],
data_region: hostNameParts[1],
tenant_url: hostname,
company_domain: domain,
source: 'signoz-ui',
isPaidUser: !!licenses?.trialConvertedToSubscription,
});
const isOnBasicPlan =
licenseData?.payload?.licenses?.some(
(license) =>
license.isCurrent && license.planKey === LICENSE_PLAN_KEY.BASIC_PLAN,
) || licenseData?.payload?.licenses === null;
const enableAnalytics = (user: User): void => {
const orgName =
org && Array.isArray(org) && org.length > 0 ? org[0].name : '';
const { name, email } = user;
const identifyPayload = {
email,
name,
company_name: orgName,
role,
source: 'signoz-ui',
};
const sanitizedIdentifyPayload = pickBy(identifyPayload, identity);
const domain = extractDomain(email);
const hostNameParts = hostname.split('.');
const groupTraits = {
name: orgName,
tenant_id: hostNameParts[0],
data_region: hostNameParts[1],
tenant_url: hostname,
company_domain: domain,
source: 'signoz-ui',
};
window.analytics.identify(email, sanitizedIdentifyPayload);
window.analytics.group(domain, groupTraits);
posthog?.identify(email, {
email,
name,
orgName,
tenant_id: hostNameParts[0],
data_region: hostNameParts[1],
tenant_url: hostname,
company_domain: domain,
source: 'signoz-ui',
isPaidUser: !!licenseData?.payload?.trialConvertedToSubscription,
});
posthog?.group('company', domain, {
name: orgName,
tenant_id: hostNameParts[0],
data_region: hostNameParts[1],
tenant_url: hostname,
company_domain: domain,
source: 'signoz-ui',
isPaidUser: !!licenseData?.payload?.trialConvertedToSubscription,
});
};
posthog?.group('company', domain, {
name: orgName,
tenant_id: hostNameParts[0],
data_region: hostNameParts[1],
tenant_url: hostname,
company_domain: domain,
source: 'signoz-ui',
isPaidUser: !!licenses?.trialConvertedToSubscription,
});
}
},
[hostname, isFetchingLicenses, licenses, org],
);
// eslint-disable-next-line sonarjs/cognitive-complexity
useEffect(() => {
const isIdentifiedUser = getLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER);
if (
isLoggedInState &&
!isFetchingLicenses &&
licenses &&
!isFetchingUser &&
user &&
user.userId &&
user.email &&
!isIdentifiedUser
!!user.email
) {
setLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER, 'true');
}
const isOnBasicPlan =
licenses.licenses?.some(
(license) =>
license.isCurrent && license.planKey === LICENSE_PLAN_KEY.BASIC_PLAN,
) || licenses.licenses === null;
if (
isOnBasicPlan ||
(isLoggedInState && role && role !== 'ADMIN') ||
!(isCloudUserVal || isEECloudUser())
) {
const newRoutes = routes.filter((route) => route?.path !== ROUTES.BILLING);
setRoutes(newRoutes);
}
const isIdentifiedUser = getLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER);
if (isCloudUserVal || isEECloudUser()) {
const newRoutes = [...routes, SUPPORT_ROUTE];
if (isLoggedInState && user && user.id && user.email && !isIdentifiedUser) {
setLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER, 'true');
}
setRoutes(newRoutes);
} else {
const newRoutes = [...routes, LIST_LICENSES];
setRoutes(newRoutes);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isLoggedInState, isOnBasicPlan, user]);
useEffect(() => {
trackPageView(pathname);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [pathname]);
useEffect(() => {
const showAddCreditCardModal =
!isPremiumSupportEnabled &&
!licenseData?.payload?.trialConvertedToSubscription;
if (isLoggedInState && isChatSupportEnabled && !showAddCreditCardModal) {
window.Intercom('boot', {
app_id: process.env.INTERCOM_APP_ID,
email: user?.email || '',
name: user?.name || '',
});
let updatedRoutes = defaultRoutes;
// if the user is a cloud user
if (isCloudUserVal || isEECloudUser()) {
// if the user is on basic plan then remove billing
if (isOnBasicPlan) {
updatedRoutes = updatedRoutes.filter(
(route) => route?.path !== ROUTES.BILLING,
);
}
// always add support route for cloud users
updatedRoutes = [...updatedRoutes, SUPPORT_ROUTE];
} else {
// if not a cloud user then remove billing and add list licenses route
updatedRoutes = updatedRoutes.filter(
(route) => route?.path !== ROUTES.BILLING,
);
updatedRoutes = [...updatedRoutes, LIST_LICENSES];
}
setRoutes(updatedRoutes);
}
}, [
isLoggedInState,
isChatSupportEnabled,
user,
licenseData,
isPremiumSupportEnabled,
licenses,
isCloudUserVal,
isFetchingLicenses,
isFetchingUser,
]);
useEffect(() => {
if (user && user?.email && user?.userId && user?.name) {
try {
const isThemeAnalyticsSent = getLocalStorageApi(
LOCALSTORAGE.THEME_ANALYTICS_V1,
);
if (!isThemeAnalyticsSent) {
logEvent('Theme Analytics', {
theme: isDarkMode ? THEME_MODE.DARK : THEME_MODE.LIGHT,
user: pick(user, ['email', 'userId', 'name']),
org,
});
setLocalStorageApi(LOCALSTORAGE.THEME_ANALYTICS_V1, 'true');
}
} catch {
console.error('Failed to parse local storage theme analytics event');
}
if (pathname === ROUTES.ONBOARDING) {
window.Intercom('update', {
hide_default_launcher: true,
});
} else {
window.Intercom('update', {
hide_default_launcher: false,
});
}
if (isCloudUserVal && user && user.email) {
enableAnalytics(user);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [user]);
trackPageView(pathname);
}, [pathname, trackPageView]);
useEffect(() => {
console.info('We are hiring! https://jobs.gem.com/signoz');
}, []);
// feature flag shouldn't be loading and featureFlags or fetchError any one of this should be true indicating that req is complete
// licenses should also be present. there is no check for licenses for loading and error as that is mandatory if not present then routing
// to something went wrong which would ideally need a reload.
if (
!isFetchingFeatureFlags &&
(featureFlags || featureFlagsFetchError) &&
licenses
) {
let isChatSupportEnabled = false;
let isPremiumSupportEnabled = false;
if (featureFlags && featureFlags.length > 0) {
isChatSupportEnabled =
featureFlags.find((flag) => flag.name === FeatureKeys.CHAT_SUPPORT)
?.active || false;
isPremiumSupportEnabled =
featureFlags.find((flag) => flag.name === FeatureKeys.PREMIUM_SUPPORT)
?.active || false;
}
const showAddCreditCardModal =
!isPremiumSupportEnabled && !licenses.trialConvertedToSubscription;
if (isLoggedInState && isChatSupportEnabled && !showAddCreditCardModal) {
window.Intercom('boot', {
app_id: process.env.INTERCOM_APP_ID,
email: user?.email || '',
name: user?.name || '',
});
}
}
}, [
isLoggedInState,
user,
pathname,
licenses?.trialConvertedToSubscription,
featureFlags,
isFetchingFeatureFlags,
featureFlagsFetchError,
licenses,
]);
useEffect(() => {
if (!isFetchingUser && isCloudUserVal && user && user.email) {
enableAnalytics(user);
}
}, [user, isFetchingUser, isCloudUserVal, enableAnalytics]);
// if the user is in logged in state
if (isLoggedInState) {
if (pathname === ROUTES.HOME_PAGE) {
history.replace(ROUTES.APPLICATION);
}
// if the setup calls are loading then return a spinner
if (isFetchingLicenses || isFetchingUser || isFetchingFeatureFlags) {
return <Spinner tip="Loading..." />;
}
// if the required calls fails then return a something went wrong error
// this needs to be on top of data missing error because if there is an error, data will never be loaded and it will
// move to indefinitive loading
if (userFetchError || licensesFetchError) {
return <Redirect to={ROUTES.SOMETHING_WENT_WRONG} />;
}
// if all of the data is not set then return a spinner, this is required because there is some gap between loading states and data setting
if (!licenses || !user.email || !featureFlags) {
return <Spinner tip="Loading..." />;
}
}
return (
<ConfigProvider theme={themeConfig}>
<Router history={history}>
<NotificationProvider>
<PrivateRoute>
<ResourceProvider>
<QueryBuilderProvider>
<DashboardProvider>
<KeyboardHotkeysProvider>
<AlertRuleProvider>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
))}
<CompatRouter>
<NotificationProvider>
<PrivateRoute>
<ResourceProvider>
<QueryBuilderProvider>
<DashboardProvider>
<KeyboardHotkeysProvider>
<AlertRuleProvider>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
))}
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</AlertRuleProvider>
</KeyboardHotkeysProvider>
</DashboardProvider>
</QueryBuilderProvider>
</ResourceProvider>
</PrivateRoute>
</NotificationProvider>
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</AlertRuleProvider>
</KeyboardHotkeysProvider>
</DashboardProvider>
</QueryBuilderProvider>
</ResourceProvider>
</PrivateRoute>
</NotificationProvider>
</CompatRouter>
</Router>
</ConfigProvider>
);

View File

@@ -66,6 +66,10 @@ export const Onboarding = Loadable(
() => import(/* webpackChunkName: "Onboarding" */ 'pages/OnboardingPage'),
);
export const OrgOnboarding = Loadable(
() => import(/* webpackChunkName: "OrgOnboarding" */ 'pages/OrgOnboarding'),
);
export const DashboardPage = Loadable(
() =>
import(/* webpackChunkName: "DashboardPage" */ 'pages/DashboardsListPage'),
@@ -202,6 +206,13 @@ export const WorkspaceBlocked = Loadable(
import(/* webpackChunkName: "WorkspaceLocked" */ 'pages/WorkspaceLocked'),
);
export const WorkspaceSuspended = Loadable(
() =>
import(
/* webpackChunkName: "WorkspaceSuspended" */ 'pages/WorkspaceSuspended/WorkspaceSuspended'
),
);
export const ShortcutsPage = Loadable(
() => import(/* webpackChunkName: "ShortcutsPage" */ 'pages/Shortcuts'),
);
@@ -224,3 +235,10 @@ export const MQDetailPage = Loadable(
/* webpackChunkName: "MQDetailPage" */ 'pages/MessagingQueues/MQDetailPage'
),
);
export const InfrastructureMonitoring = Loadable(
() =>
import(
/* webpackChunkName: "InfrastructureMonitoring" */ 'pages/InfrastructureMonitoring'
),
);

View File

@@ -15,6 +15,7 @@ import {
EditAlertChannelsAlerts,
EditRulesPage,
ErrorDetails,
InfrastructureMonitoring,
IngestionSettings,
InstalledIntegrations,
LicensePage,
@@ -32,6 +33,7 @@ import {
OldLogsExplorer,
Onboarding,
OrganizationSettings,
OrgOnboarding,
PasswordReset,
PipelinePage,
ServiceMapPage,
@@ -51,6 +53,7 @@ import {
UnAuthorized,
UsageExplorerPage,
WorkspaceBlocked,
WorkspaceSuspended,
} from './pageComponents';
const routes: AppRoutes[] = [
@@ -68,6 +71,13 @@ const routes: AppRoutes[] = [
isPrivate: true,
key: 'GET_STARTED',
},
{
path: ROUTES.ONBOARDING,
exact: false,
component: OrgOnboarding,
isPrivate: true,
key: 'ONBOARDING',
},
{
component: LogsIndexToFields,
path: ROUTES.LOGS_INDEX_FIELDS,
@@ -355,6 +365,13 @@ const routes: AppRoutes[] = [
isPrivate: true,
key: 'WORKSPACE_LOCKED',
},
{
path: ROUTES.WORKSPACE_SUSPENDED,
exact: true,
component: WorkspaceSuspended,
isPrivate: true,
key: 'WORKSPACE_SUSPENDED',
},
{
path: ROUTES.SHORTCUTS,
exact: true,
@@ -383,6 +400,13 @@ const routes: AppRoutes[] = [
key: 'MESSAGING_QUEUES_DETAIL',
isPrivate: true,
},
{
path: ROUTES.INFRASTRUCTURE_MONITORING_HOSTS,
exact: true,
component: InfrastructureMonitoring,
key: 'INFRASTRUCTURE_MONITORING_HOSTS',
isPrivate: true,
},
];
export const SUPPORT_ROUTE: AppRoutes = {
@@ -403,24 +427,27 @@ export const LIST_LICENSES: AppRoutes = {
export const oldRoutes = [
'/pipelines',
'/logs/old-logs-explorer',
'/logs-explorer',
'/logs-explorer/live',
'/logs-save-views',
'/traces-save-views',
'/settings/api-keys',
'/settings/access-tokens',
];
export const oldNewRoutesMapping: Record<string, string> = {
'/pipelines': '/logs/pipelines',
'/logs/old-logs-explorer': '/logs/old-logs-explorer',
'/logs-explorer': '/logs/logs-explorer',
'/logs-explorer/live': '/logs/logs-explorer/live',
'/logs-save-views': '/logs/saved-views',
'/traces-save-views': '/traces/saved-views',
'/settings/api-keys': '/settings/access-tokens',
'/settings/access-tokens': '/settings/api-keys',
};
export const ROUTES_NOT_TO_BE_OVERRIDEN: string[] = [
ROUTES.WORKSPACE_LOCKED,
ROUTES.WORKSPACE_SUSPENDED,
];
export interface AppRoutes {
component: RouteProps['component'];
path: RouteProps['path'];

View File

@@ -1,92 +1,28 @@
import getLocalStorageApi from 'api/browser/localstorage/get';
import setLocalStorageApi from 'api/browser/localstorage/set';
import getUserApi from 'api/user/getUser';
import { Logout } from 'api/utils';
import { LOCALSTORAGE } from 'constants/localStorage';
import store from 'store';
import AppActions from 'types/actions';
import {
LOGGED_IN,
UPDATE_USER,
UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
UPDATE_USER_IS_FETCH,
} from 'types/actions/app';
import { SuccessResponse } from 'types/api';
import { PayloadProps } from 'types/api/user/getUser';
const afterLogin = async (
const afterLogin = (
userId: string,
authToken: string,
refreshToken: string,
): Promise<SuccessResponse<PayloadProps> | undefined> => {
interceptorRejected?: boolean,
): void => {
setLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN, authToken);
setLocalStorageApi(LOCALSTORAGE.REFRESH_AUTH_TOKEN, refreshToken);
setLocalStorageApi(LOCALSTORAGE.USER_ID, userId);
setLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN, 'true');
store.dispatch<AppActions>({
type: UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
payload: {
accessJwt: authToken,
refreshJwt: refreshToken,
},
});
const [getUserResponse] = await Promise.all([
getUserApi({
userId,
token: authToken,
}),
]);
if (getUserResponse.statusCode === 200 && getUserResponse.payload) {
store.dispatch<AppActions>({
type: LOGGED_IN,
payload: {
isLoggedIn: true,
},
});
const { payload } = getUserResponse;
store.dispatch<AppActions>({
type: UPDATE_USER,
payload: {
ROLE: payload.role,
email: payload.email,
name: payload.name,
orgName: payload.organization,
profilePictureURL: payload.profilePictureURL,
userId: payload.id,
orgId: payload.orgId,
userFlags: payload.flags,
},
});
const isLoggedInLocalStorage = getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN);
if (isLoggedInLocalStorage === null) {
setLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN, 'true');
}
store.dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
return getUserResponse;
if (!interceptorRejected) {
window.dispatchEvent(
new CustomEvent('AFTER_LOGIN', {
detail: {
accessJWT: authToken,
refreshJWT: refreshToken,
id: userId,
},
}),
);
}
store.dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
Logout();
return undefined;
};
export default afterLogin;

View File

@@ -4,6 +4,7 @@ export const apiV2 = '/api/v2/';
export const apiV3 = '/api/v3/';
export const apiV4 = '/api/v4/';
export const gatewayApiV1 = '/api/gateway/v1/';
export const gatewayApiV2 = '/api/gateway/v2/';
export const apiAlertManager = '/api/alertmanager/';
export default apiV1;

View File

@@ -7,7 +7,6 @@ import afterLogin from 'AppRoutes/utils';
import axios, { AxiosResponse, InternalAxiosRequestConfig } from 'axios';
import { ENVIRONMENT } from 'constants/env';
import { LOCALSTORAGE } from 'constants/localStorage';
import store from 'store';
import apiV1, {
apiAlertManager,
@@ -15,6 +14,7 @@ import apiV1, {
apiV3,
apiV4,
gatewayApiV1,
gatewayApiV2,
} from './apiV1';
import { Logout } from './utils';
@@ -25,10 +25,7 @@ const interceptorsResponse = (
const interceptorsRequestResponse = (
value: InternalAxiosRequestConfig,
): InternalAxiosRequestConfig => {
const token =
store.getState().app.user?.accessJwt ||
getLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN) ||
'';
const token = getLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN) || '';
if (value && value.headers) {
value.headers.Authorization = token ? `Bearer ${token}` : '';
@@ -46,41 +43,36 @@ const interceptorRejected = async (
// reject the refresh token error
if (response.status === 401 && response.config.url !== '/login') {
const response = await loginApi({
refreshToken: store.getState().app.user?.refreshJwt,
refreshToken: getLocalStorageApi(LOCALSTORAGE.REFRESH_AUTH_TOKEN) || '',
});
if (response.statusCode === 200) {
const user = await afterLogin(
afterLogin(
response.payload.userId,
response.payload.accessJwt,
response.payload.refreshJwt,
true,
);
if (user) {
const reResponse = await axios(
`${value.config.baseURL}${value.config.url?.substring(1)}`,
{
method: value.config.method,
headers: {
...value.config.headers,
Authorization: `Bearer ${response.payload.accessJwt}`,
},
data: {
...JSON.parse(value.config.data || '{}'),
},
const reResponse = await axios(
`${value.config.baseURL}${value.config.url?.substring(1)}`,
{
method: value.config.method,
headers: {
...value.config.headers,
Authorization: `Bearer ${response.payload.accessJwt}`,
},
);
data: {
...JSON.parse(value.config.data || '{}'),
},
},
);
if (reResponse.status === 200) {
return await Promise.resolve(reResponse);
}
Logout();
return await Promise.reject(reResponse);
if (reResponse.status === 200) {
return await Promise.resolve(reResponse);
}
Logout();
return await Promise.reject(value);
return await Promise.reject(reResponse);
}
Logout();
}
@@ -169,6 +161,19 @@ GatewayApiV1Instance.interceptors.response.use(
GatewayApiV1Instance.interceptors.request.use(interceptorsRequestResponse);
//
// gateway Api V2
export const GatewayApiV2Instance = axios.create({
baseURL: `${ENVIRONMENT.baseURL}${gatewayApiV2}`,
});
GatewayApiV2Instance.interceptors.response.use(
interceptorsResponse,
interceptorRejected,
);
GatewayApiV2Instance.interceptors.request.use(interceptorsRequestResponse);
//
AxiosAlertManagerInstance.interceptors.response.use(
interceptorsResponse,
interceptorRejected,

View File

@@ -0,0 +1,37 @@
import { ApiBaseInstance } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError, AxiosResponse } from 'axios';
import { baseAutoCompleteIdKeysOrder } from 'constants/queryBuilder';
import { createIdFromObjectFields } from 'lib/createIdFromObjectFields';
import { ErrorResponse, SuccessResponse } from 'types/api';
import {
BaseAutocompleteData,
IQueryAutocompleteResponse,
} from 'types/api/queryBuilder/queryAutocompleteResponse';
export const getHostAttributeKeys = async (
searchText = '',
): Promise<SuccessResponse<IQueryAutocompleteResponse> | ErrorResponse> => {
try {
const response: AxiosResponse<{
data: IQueryAutocompleteResponse;
}> = await ApiBaseInstance.get(
`/hosts/attribute_keys?dataSource=metrics&searchText=${searchText}`,
);
const payload: BaseAutocompleteData[] =
response.data.data.attributeKeys?.map(({ id: _, ...item }) => ({
...item,
id: createIdFromObjectFields(item, baseAutoCompleteIdKeysOrder),
})) || [];
return {
statusCode: 200,
error: null,
message: response.statusText,
payload: { attributeKeys: payload },
};
} catch (e) {
return ErrorResponseHandler(e as AxiosError);
}
};

View File

@@ -0,0 +1,77 @@
import { ApiBaseInstance } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export interface HostListPayload {
filters: TagFilter;
groupBy: BaseAutocompleteData[];
offset?: number;
limit?: number;
orderBy?: {
columnName: string;
order: 'asc' | 'desc';
};
}
export interface TimeSeriesValue {
timestamp: number;
value: string;
}
export interface TimeSeries {
labels: Record<string, string>;
labelsArray: Array<Record<string, string>>;
values: TimeSeriesValue[];
}
export interface HostData {
hostName: string;
active: boolean;
os: string;
cpu: number;
cpuTimeSeries: TimeSeries;
memory: number;
memoryTimeSeries: TimeSeries;
wait: number;
waitTimeSeries: TimeSeries;
load15: number;
load15TimeSeries: TimeSeries;
}
export interface HostListResponse {
status: string;
data: {
type: string;
records: HostData[];
groups: null;
total: number;
sentAnyHostMetricsData: boolean;
isSendingK8SAgentMetrics: boolean;
};
}
export const getHostLists = async (
props: HostListPayload,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponse<HostListResponse> | ErrorResponse> => {
try {
const response = await ApiBaseInstance.post('/hosts/list', props, {
signal,
headers,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
params: props,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -0,0 +1,38 @@
import { ApiBaseInstance } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import createQueryParams from 'lib/createQueryParams';
import { ErrorResponse, SuccessResponse } from 'types/api';
import {
IAttributeValuesResponse,
IGetAttributeValuesPayload,
} from 'types/api/queryBuilder/getAttributesValues';
export const getInfraAttributesValues = async ({
dataSource,
attributeKey,
filterAttributeKeyDataType,
tagType,
searchText,
}: IGetAttributeValuesPayload): Promise<
SuccessResponse<IAttributeValuesResponse> | ErrorResponse
> => {
try {
const response = await ApiBaseInstance.get(
`/hosts/attribute_values?${createQueryParams({
dataSource,
attributeKey,
searchText,
})}&filterAttributeKeyDataType=${filterAttributeKeyDataType}&tagType=${tagType}`,
);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -1,24 +1,18 @@
import { ApiV2Instance as axios } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps } from 'types/api/licenses/getAll';
const getAll = async (): Promise<
SuccessResponse<PayloadProps> | ErrorResponse
> => {
try {
const response = await axios.get('/licenses');
const response = await axios.get('/licenses');
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data.data,
};
};
export default getAll;

View File

@@ -0,0 +1,18 @@
import { ApiV3Instance as axios } from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { LicenseV3EventQueueResModel } from 'types/api/licensesV3/getActive';
const getActive = async (): Promise<
SuccessResponse<LicenseV3EventQueueResModel> | ErrorResponse
> => {
const response = await axios.get('/licenses/active');
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data.data,
};
};
export default getActive;

View File

@@ -0,0 +1,28 @@
import axios from 'api';
import {
MessagingQueueServicePayload,
MessagingQueuesPayloadProps,
} from 'pages/MessagingQueues/MQDetails/MQTables/getConsumerLagDetails';
import { ErrorResponse, SuccessResponse } from 'types/api';
export const getTopicThroughputOverview = async (
props: Omit<MessagingQueueServicePayload, 'variables'>,
): Promise<
SuccessResponse<MessagingQueuesPayloadProps['payload']> | ErrorResponse
> => {
const { detailType, start, end } = props;
const response = await axios.post(
`messaging-queues/kafka/topic-throughput/${detailType}`,
{
start,
end,
},
);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data.data,
};
};

View File

@@ -0,0 +1,39 @@
import { ApiBaseInstance } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { SOMETHING_WENT_WRONG } from 'constants/api';
import { ErrorResponse, SuccessResponse } from 'types/api';
export interface OnboardingStatusResponse {
status: string;
data: {
attribute?: string;
error_message?: string;
status?: string;
}[];
}
const getOnboardingStatus = async (props: {
start: number;
end: number;
endpointService?: string;
}): Promise<SuccessResponse<OnboardingStatusResponse> | ErrorResponse> => {
const { endpointService, ...rest } = props;
try {
const response = await ApiBaseInstance.post(
`/messaging-queues/kafka/onboarding/${endpointService || 'consumers'}`,
rest,
);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler((error as AxiosError) || SOMETHING_WENT_WRONG);
}
};
export default getOnboardingStatus;

View File

@@ -0,0 +1,20 @@
import { GatewayApiV2Instance } from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { UpdateProfileProps } from 'types/api/onboarding/types';
const updateProfile = async (
props: UpdateProfileProps,
): Promise<SuccessResponse<UpdateProfileProps> | ErrorResponse> => {
const response = await GatewayApiV2Instance.put('/profiles/me', {
...props,
});
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data.data,
};
};
export default updateProfile;

View File

@@ -0,0 +1,18 @@
import axios from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { GetAllOrgPreferencesResponseProps } from 'types/api/preferences/userOrgPreferences';
const getAllOrgPreferences = async (): Promise<
SuccessResponse<GetAllOrgPreferencesResponseProps> | ErrorResponse
> => {
const response = await axios.get(`/org/preferences`);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data,
};
};
export default getAllOrgPreferences;

View File

@@ -0,0 +1,18 @@
import axios from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { GetAllUserPreferencesResponseProps } from 'types/api/preferences/userOrgPreferences';
const getAllUserPreferences = async (): Promise<
SuccessResponse<GetAllUserPreferencesResponseProps> | ErrorResponse
> => {
const response = await axios.get(`/user/preferences`);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data,
};
};
export default getAllUserPreferences;

View File

@@ -0,0 +1,20 @@
import axios from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { GetOrgPreferenceResponseProps } from 'types/api/preferences/userOrgPreferences';
const getOrgPreference = async ({
preferenceID,
}: {
preferenceID: string;
}): Promise<SuccessResponse<GetOrgPreferenceResponseProps> | ErrorResponse> => {
const response = await axios.get(`/org/preferences/${preferenceID}`);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data,
};
};
export default getOrgPreference;

View File

@@ -0,0 +1,22 @@
import axios from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { GetUserPreferenceResponseProps } from 'types/api/preferences/userOrgPreferences';
const getUserPreference = async ({
preferenceID,
}: {
preferenceID: string;
}): Promise<
SuccessResponse<GetUserPreferenceResponseProps> | ErrorResponse
> => {
const response = await axios.get(`/user/preferences/${preferenceID}`);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data,
};
};
export default getUserPreference;

View File

@@ -0,0 +1,28 @@
import axios from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import {
UpdateOrgPreferenceProps,
UpdateOrgPreferenceResponseProps,
} from 'types/api/preferences/userOrgPreferences';
const updateOrgPreference = async (
preferencePayload: UpdateOrgPreferenceProps,
): Promise<
SuccessResponse<UpdateOrgPreferenceResponseProps> | ErrorResponse
> => {
const response = await axios.put(
`/org/preferences/${preferencePayload.preferenceID}`,
{
preference_value: preferencePayload.value,
},
);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data.data,
};
};
export default updateOrgPreference;

View File

@@ -0,0 +1,25 @@
import axios from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import {
UpdateUserPreferenceProps,
UpdateUserPreferenceResponseProps,
} from 'types/api/preferences/userOrgPreferences';
const updateUserPreference = async (
preferencePayload: UpdateUserPreferenceProps,
): Promise<
SuccessResponse<UpdateUserPreferenceResponseProps> | ErrorResponse
> => {
const response = await axios.put(`/user/preferences`, {
preference_value: preferencePayload.value,
});
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data.data,
};
};
export default updateUserPreference;

View File

@@ -5,7 +5,6 @@ import { baseAutoCompleteIdKeysOrder } from 'constants/queryBuilder';
import { createIdFromObjectFields } from 'lib/createIdFromObjectFields';
import createQueryParams from 'lib/createQueryParams';
import { ErrorResponse, SuccessResponse } from 'types/api';
// ** Types
import { IGetAttributeKeysPayload } from 'types/api/queryBuilder/getAttributeKeys';
import {
BaseAutocompleteData,

View File

@@ -1,28 +1,18 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/user/getUser';
const getUser = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(`/user/${props.userId}`, {
headers: {
Authorization: `bearer ${props.token}`,
},
});
const response = await axios.get(`/user/${props.userId}`);
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
};
export default getUser;

View File

@@ -0,0 +1,18 @@
import axios from 'api';
import { SuccessResponse } from 'types/api';
import { InviteUsersResponse, UsersProps } from 'types/api/user/inviteUsers';
const inviteUsers = async (
users: UsersProps,
): Promise<SuccessResponse<InviteUsersResponse>> => {
const response = await axios.post(`/invite/bulk`, users);
return {
statusCode: 200,
error: null,
message: response.data.status,
payload: response.data,
};
};
export default inviteUsers;

View File

@@ -2,14 +2,6 @@ import deleteLocalStorageKey from 'api/browser/localstorage/remove';
import { LOCALSTORAGE } from 'constants/localStorage';
import ROUTES from 'constants/routes';
import history from 'lib/history';
import store from 'store';
import {
LOGGED_IN,
UPDATE_ORG,
UPDATE_USER,
UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
UPDATE_USER_ORG_ROLE,
} from 'types/actions/app';
export const Logout = (): void => {
deleteLocalStorageKey(LOCALSTORAGE.AUTH_TOKEN);
@@ -19,50 +11,9 @@ export const Logout = (): void => {
deleteLocalStorageKey(LOCALSTORAGE.LOGGED_IN_USER_EMAIL);
deleteLocalStorageKey(LOCALSTORAGE.LOGGED_IN_USER_NAME);
deleteLocalStorageKey(LOCALSTORAGE.CHAT_SUPPORT);
deleteLocalStorageKey(LOCALSTORAGE.USER_ID);
store.dispatch({
type: LOGGED_IN,
payload: {
isLoggedIn: false,
},
});
store.dispatch({
type: UPDATE_USER_ORG_ROLE,
payload: {
org: null,
role: null,
},
});
store.dispatch({
type: UPDATE_USER,
payload: {
ROLE: 'VIEWER',
email: '',
name: '',
orgId: '',
orgName: '',
profilePictureURL: '',
userId: '',
userFlags: {},
},
});
store.dispatch({
type: UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
payload: {
accessJwt: '',
refreshJwt: '',
},
});
store.dispatch({
type: UPDATE_ORG,
payload: {
org: [],
},
});
window.dispatchEvent(new CustomEvent('LOGOUT'));
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore

View File

@@ -2,9 +2,9 @@ import { Button, Modal, Typography } from 'antd';
import updateCreditCardApi from 'api/billing/checkout';
import logEvent from 'api/common/logEvent';
import { SOMETHING_WENT_WRONG } from 'constants/api';
import useLicense from 'hooks/useLicense';
import { useNotifications } from 'hooks/useNotifications';
import { CreditCard, X } from 'lucide-react';
import { useAppContext } from 'providers/App/App';
import { useEffect, useState } from 'react';
import { useMutation } from 'react-query';
import { useLocation } from 'react-router-dom';
@@ -20,16 +20,16 @@ export default function ChatSupportGateway(): JSX.Element {
false,
);
const { data: licenseData, isFetching } = useLicense();
const { licenses, isFetchingLicenses } = useAppContext();
useEffect(() => {
const activeValidLicense =
licenseData?.payload?.licenses?.find(
(license) => license.isCurrent === true,
) || null;
if (!isFetchingLicenses && licenses) {
const activeValidLicense =
licenses.licenses?.find((license) => license.isCurrent === true) || null;
setActiveLicense(activeValidLicense);
}, [licenseData, isFetching]);
setActiveLicense(activeValidLicense);
}
}, [licenses, isFetchingLicenses]);
const handleBillingOnSuccess = (
data: ErrorResponse | SuccessResponse<CheckoutSuccessPayloadProps, unknown>,

View File

@@ -40,7 +40,7 @@
&.custom-time {
input:not(:focus) {
min-width: 240px;
min-width: 280px;
}
}
@@ -119,3 +119,69 @@
color: var(--bg-slate-400) !important;
}
}
.date-time-popover__footer {
border-top: 1px solid var(--bg-ink-200);
padding: 8px 14px;
.timezone-container {
&,
.timezone {
font-family: Inter;
font-size: 12px;
line-height: 16px;
letter-spacing: -0.06px;
}
display: flex;
align-items: center;
color: var(--bg-vanilla-400);
gap: 6px;
.timezone {
display: flex;
align-items: center;
gap: 4px;
border-radius: 2px;
background: rgba(171, 189, 255, 0.04);
cursor: pointer;
padding: 0px 4px;
color: var(--bg-vanilla-100);
border: none;
}
}
}
.timezone-badge {
display: flex;
align-items: center;
justify-content: center;
padding: 0 4px;
border-radius: 2px;
background: rgba(171, 189, 255, 0.04);
color: var(--bg-vanilla-100);
font-size: 12px;
font-weight: 400;
line-height: 16px;
letter-spacing: -0.06px;
cursor: pointer;
}
.lightMode {
.date-time-popover__footer {
border-color: var(--bg-vanilla-400);
}
.timezone-container {
color: var(--bg-ink-400);
&__clock-icon {
stroke: var(--bg-ink-400);
}
.timezone {
color: var(--bg-ink-100);
background: rgb(179 179 179 / 15%);
&__icon {
stroke: var(--bg-ink-100);
}
}
}
.timezone-badge {
color: var(--bg-ink-100);
background: rgb(179 179 179 / 15%);
}
}

View File

@@ -3,6 +3,7 @@
import './CustomTimePicker.styles.scss';
import { Input, Popover, Tooltip, Typography } from 'antd';
import logEvent from 'api/common/logEvent';
import cx from 'classnames';
import { DateTimeRangeType } from 'container/TopNav/CustomDateTimeModal';
import {
@@ -15,11 +16,14 @@ import { isValidTimeFormat } from 'lib/getMinMax';
import { defaultTo, isFunction, noop } from 'lodash-es';
import debounce from 'lodash-es/debounce';
import { CheckCircle, ChevronDown, Clock } from 'lucide-react';
import { useTimezone } from 'providers/Timezone';
import {
ChangeEvent,
Dispatch,
SetStateAction,
useCallback,
useEffect,
useMemo,
useState,
} from 'react';
import { useLocation } from 'react-router-dom';
@@ -28,6 +32,8 @@ import { popupContainer } from 'utils/selectPopupContainer';
import CustomTimePickerPopoverContent from './CustomTimePickerPopoverContent';
const maxAllowedMinTimeInMonths = 6;
type ViewType = 'datetime' | 'timezone';
const DEFAULT_VIEW: ViewType = 'datetime';
interface CustomTimePickerProps {
onSelect: (value: string) => void;
@@ -81,11 +87,42 @@ function CustomTimePicker({
const location = useLocation();
const [isInputFocused, setIsInputFocused] = useState(false);
const [activeView, setActiveView] = useState<ViewType>(DEFAULT_VIEW);
const { timezone, browserTimezone } = useTimezone();
const activeTimezoneOffset = timezone.offset;
const isTimezoneOverridden = useMemo(
() => timezone.offset !== browserTimezone.offset,
[timezone, browserTimezone],
);
const handleViewChange = useCallback(
(newView: 'timezone' | 'datetime'): void => {
if (activeView !== newView) {
setActiveView(newView);
}
setOpen(true);
},
[activeView, setOpen],
);
const [isOpenedFromFooter, setIsOpenedFromFooter] = useState(false);
const getSelectedTimeRangeLabel = (
selectedTime: string,
selectedTimeValue: string,
): string => {
if (selectedTime === 'custom') {
// Convert the date range string to 12-hour format
const dates = selectedTimeValue.split(' - ');
if (dates.length === 2) {
const startDate = dayjs(dates[0], 'DD/MM/YYYY HH:mm');
const endDate = dayjs(dates[1], 'DD/MM/YYYY HH:mm');
return `${startDate.format('DD/MM/YYYY hh:mm A')} - ${endDate.format(
'DD/MM/YYYY hh:mm A',
)}`;
}
return selectedTimeValue;
}
@@ -120,7 +157,6 @@ function CustomTimePicker({
useEffect(() => {
const value = getSelectedTimeRangeLabel(selectedTime, selectedValue);
setSelectedTimePlaceholderValue(value);
}, [selectedTime, selectedValue]);
@@ -132,6 +168,7 @@ function CustomTimePicker({
setOpen(newOpen);
if (!newOpen) {
setCustomDTPickerVisible?.(false);
setActiveView('datetime');
}
};
@@ -245,6 +282,7 @@ function CustomTimePicker({
const handleFocus = (): void => {
setIsInputFocused(true);
setActiveView('datetime');
};
const handleBlur = (): void => {
@@ -260,6 +298,18 @@ function CustomTimePicker({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [location.pathname]);
const handleTimezoneHintClick = (e: React.MouseEvent): void => {
e.stopPropagation();
handleViewChange('timezone');
setIsOpenedFromFooter(false);
logEvent(
'DateTimePicker: Timezone picker opened from time range input badge',
{
page: location.pathname,
},
);
};
return (
<div className="custom-time-picker">
<Popover
@@ -281,6 +331,10 @@ function CustomTimePicker({
handleGoLive={defaultTo(handleGoLive, noop)}
options={items}
selectedTime={selectedTime}
activeView={activeView}
setActiveView={setActiveView}
setIsOpenedFromFooter={setIsOpenedFromFooter}
isOpenedFromFooter={isOpenedFromFooter}
/>
) : (
content
@@ -317,12 +371,17 @@ function CustomTimePicker({
)
}
suffix={
<ChevronDown
size={14}
onClick={(): void => {
setOpen(!open);
}}
/>
<>
{!!isTimezoneOverridden && activeTimezoneOffset && (
<div className="timezone-badge" onClick={handleTimezoneHintClick}>
<span>{activeTimezoneOffset}</span>
</div>
)}
<ChevronDown
size={14}
onClick={(): void => handleViewChange('datetime')}
/>
</>
}
/>
</Popover>

View File

@@ -1,6 +1,8 @@
import './CustomTimePicker.styles.scss';
import { Color } from '@signozhq/design-tokens';
import { Button } from 'antd';
import logEvent from 'api/common/logEvent';
import cx from 'classnames';
import ROUTES from 'constants/routes';
import { DateTimeRangeType } from 'container/TopNav/CustomDateTimeModal';
@@ -9,10 +11,13 @@ import {
Option,
RelativeDurationSuggestionOptions,
} from 'container/TopNav/DateTimeSelectionV2/config';
import { Clock, PenLine } from 'lucide-react';
import { useTimezone } from 'providers/Timezone';
import { Dispatch, SetStateAction, useMemo } from 'react';
import { useLocation } from 'react-router-dom';
import RangePickerModal from './RangePickerModal';
import TimezonePicker from './TimezonePicker';
interface CustomTimePickerPopoverContentProps {
options: any[];
@@ -26,8 +31,13 @@ interface CustomTimePickerPopoverContentProps {
onSelectHandler: (label: string, value: string) => void;
handleGoLive: () => void;
selectedTime: string;
activeView: 'datetime' | 'timezone';
setActiveView: Dispatch<SetStateAction<'datetime' | 'timezone'>>;
isOpenedFromFooter: boolean;
setIsOpenedFromFooter: Dispatch<SetStateAction<boolean>>;
}
// eslint-disable-next-line sonarjs/cognitive-complexity
function CustomTimePickerPopoverContent({
options,
setIsOpen,
@@ -37,12 +47,18 @@ function CustomTimePickerPopoverContent({
onSelectHandler,
handleGoLive,
selectedTime,
activeView,
setActiveView,
isOpenedFromFooter,
setIsOpenedFromFooter,
}: CustomTimePickerPopoverContentProps): JSX.Element {
const { pathname } = useLocation();
const isLogsExplorerPage = useMemo(() => pathname === ROUTES.LOGS_EXPLORER, [
pathname,
]);
const { timezone } = useTimezone();
const activeTimezoneOffset = timezone.offset;
function getTimeChips(options: Option[]): JSX.Element {
return (
@@ -63,55 +79,105 @@ function CustomTimePickerPopoverContent({
);
}
const handleTimezoneHintClick = (): void => {
setActiveView('timezone');
setIsOpenedFromFooter(true);
logEvent(
'DateTimePicker: Timezone picker opened from time range picker footer',
{
page: pathname,
},
);
};
if (activeView === 'timezone') {
return (
<div className="date-time-popover">
<TimezonePicker
setActiveView={setActiveView}
setIsOpen={setIsOpen}
isOpenedFromFooter={isOpenedFromFooter}
/>
</div>
);
}
return (
<div className="date-time-popover">
<div className="date-time-options">
{isLogsExplorerPage && (
<Button className="data-time-live" type="text" onClick={handleGoLive}>
Live
</Button>
)}
{options.map((option) => (
<Button
type="text"
key={option.label + option.value}
onClick={(): void => {
onSelectHandler(option.label, option.value);
}}
className={cx(
'date-time-options-btn',
customDateTimeVisible
? option.value === 'custom' && 'active'
: selectedTime === option.value && 'active',
)}
>
{option.label}
</Button>
))}
<>
<div className="date-time-popover">
<div className="date-time-options">
{isLogsExplorerPage && (
<Button className="data-time-live" type="text" onClick={handleGoLive}>
Live
</Button>
)}
{options.map((option) => (
<Button
type="text"
key={option.label + option.value}
onClick={(): void => {
onSelectHandler(option.label, option.value);
}}
className={cx(
'date-time-options-btn',
customDateTimeVisible
? option.value === 'custom' && 'active'
: selectedTime === option.value && 'active',
)}
>
{option.label}
</Button>
))}
</div>
<div
className={cx(
'relative-date-time',
selectedTime === 'custom' || customDateTimeVisible
? 'date-picker'
: 'relative-times',
)}
>
{selectedTime === 'custom' || customDateTimeVisible ? (
<RangePickerModal
setCustomDTPickerVisible={setCustomDTPickerVisible}
setIsOpen={setIsOpen}
onCustomDateHandler={onCustomDateHandler}
selectedTime={selectedTime}
/>
) : (
<div className="relative-times-container">
<div className="time-heading">RELATIVE TIMES</div>
<div>{getTimeChips(RelativeDurationSuggestionOptions)}</div>
</div>
)}
</div>
</div>
<div
className={cx(
'relative-date-time',
selectedTime === 'custom' || customDateTimeVisible
? 'date-picker'
: 'relative-times',
)}
>
{selectedTime === 'custom' || customDateTimeVisible ? (
<RangePickerModal
setCustomDTPickerVisible={setCustomDTPickerVisible}
setIsOpen={setIsOpen}
onCustomDateHandler={onCustomDateHandler}
selectedTime={selectedTime}
<div className="date-time-popover__footer">
<div className="timezone-container">
<Clock
color={Color.BG_VANILLA_400}
className="timezone-container__clock-icon"
height={12}
width={12}
/>
) : (
<div className="relative-times-container">
<div className="time-heading">RELATIVE TIMES</div>
<div>{getTimeChips(RelativeDurationSuggestionOptions)}</div>
</div>
)}
<span className="timezone__icon">Current timezone</span>
<div></div>
<button
type="button"
className="timezone"
onClick={handleTimezoneHintClick}
>
<span>{activeTimezoneOffset}</span>
<PenLine
color={Color.BG_VANILLA_100}
className="timezone__icon"
size={10}
/>
</button>
</div>
</div>
</div>
</>
);
}

View File

@@ -4,7 +4,8 @@ import { DatePicker } from 'antd';
import { DateTimeRangeType } from 'container/TopNav/CustomDateTimeModal';
import { LexicalContext } from 'container/TopNav/DateTimeSelectionV2/config';
import dayjs, { Dayjs } from 'dayjs';
import { Dispatch, SetStateAction } from 'react';
import { useTimezone } from 'providers/Timezone';
import { Dispatch, SetStateAction, useMemo } from 'react';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
import { GlobalReducer } from 'types/reducer/globalTime';
@@ -31,7 +32,10 @@ function RangePickerModal(props: RangePickerModalProps): JSX.Element {
(state) => state.globalTime,
);
const disabledDate = (current: Dayjs): boolean => {
// Using any type here because antd's DatePicker expects its own internal Dayjs type
// which conflicts with our project's Dayjs type that has additional plugins (tz, utc etc).
// eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/explicit-module-boundary-types
const disabledDate = (current: any): boolean => {
const currentDay = dayjs(current);
return currentDay.isAfter(dayjs());
};
@@ -49,16 +53,32 @@ function RangePickerModal(props: RangePickerModalProps): JSX.Element {
}
onCustomDateHandler(date_time, LexicalContext.CUSTOM_DATE_PICKER);
};
const { timezone } = useTimezone();
const rangeValue: [Dayjs, Dayjs] = useMemo(
() => [
dayjs(minTime / 1000_000).tz(timezone.value),
dayjs(maxTime / 1000_000).tz(timezone.value),
],
[maxTime, minTime, timezone.value],
);
return (
<div className="custom-date-picker">
<RangePicker
disabledDate={disabledDate}
allowClear
showTime
showTime={{
use12Hours: true,
format: 'hh:mm A',
}}
format={(date: Dayjs): string =>
date.tz(timezone.value).format('YYYY-MM-DD hh:mm A')
}
onOk={onModalOkHandler}
// eslint-disable-next-line react/jsx-props-no-spreading
{...(selectedTime === 'custom' && {
defaultValue: [dayjs(minTime / 1000000), dayjs(maxTime / 1000000)],
value: rangeValue,
})}
/>
</div>

Some files were not shown because too many files have changed in this diff Show More