Compare commits

...

74 Commits

Author SHA1 Message Date
Prashant Shahi
abea45f255 Merge branch 'main' into fix/install-no-sudo 2025-01-30 18:47:43 +05:45
Nityananda Gohain
d1e7cc128f fix: telemetry store (#6923)
* fix: inital changes for telemetry store

* fix: add tests and use proper config for conn

* fix: add telemetry store test

* fix: add backward compatibility for old variables and update example conf

* fix: move wrapper to telemetry store

* fix: no need to pass query for settings

* fix: remove redundant config for ch conn

* fix: use clickhouse dsn instead

* fix: update example config

* fix: update backward compatibility code

* fix: use hooks in telemetrystore

* fix: address minor comments

---------

Co-authored-by: Vibhu Pandey <vibhupandey28@gmail.com>
2025-01-30 10:21:55 +00:00
Amlan Kumar Nandy
ffd72cf406 chore: fix visibility toggle in host filters (#6976) 2025-01-30 00:15:19 +05:30
Vikrant Gupta
6dfea14219 chore(license): default to basic plan if license validation fails (#6972)
* chore(license): default to basic plan if license validation fails 3 times

* chore(license): revert the on-boot validation check

* chore(license): reset the atomic counter

* chore(license): revert the table creation removal

* chore(license): remove verify issue workflow

* chore(license): add proper log level

* chore(license): add proper log level

* chore(license): close the validation go routine post defaulting to basic plan

* chore(license): set the validator running flag to false as well
2025-01-29 18:17:50 +00:00
Shaheer Kochai
f2be856f63 fix: apply x axis date/time formatting only to panel types that have date/time in x axis (#6956) 2025-01-29 18:09:04 +00:00
Shaheer Kochai
f04589a0b2 fix: properly get the alert severity (#6974) 2025-01-29 17:30:51 +00:00
Amlan Kumar Nandy
1378590429 chore: fix filter editing issue in infra monitoring (#6961) 2025-01-29 17:21:57 +00:00
Srikanth Chekuri
88084af4d4 chore: add cluster, nodes that are sending incorrect host metrics (#6969) 2025-01-29 14:15:48 +00:00
Amlan Kumar Nandy
d0eefa0cf2 feat: add hostname and os type quick filter to hosts list (#6926) 2025-01-29 13:47:23 +00:00
Vikrant Gupta
cc9eb32c50 chore(license): clean up the older endpoints (#6971) 2025-01-29 18:13:50 +05:30
primus-bot[bot]
70169986be chore(release): bump to v0.70.0 (#6970)
#### Summary
 - Release SigNoz v0.70.0
 - Bump SigNoz OTel Collector to v0.111.25

 Created by [Primus-Bot](https://github.com/apps/primus-bot)
2025-01-29 17:49:01 +05:30
SagarRajput-7
fb3b70b729 feat: added worker count and other misc changes (#6951)
* feat: added worker count - via autocomplete API

* feat: added worker count and code fix

* feat: added lightMode styles for all celery feat

* feat: changed routes to have redirects, subroute etc

* feat: feedback changes

* feat: removed console.log
2025-01-29 17:32:01 +05:30
Prashant Shahi
36f91d1993 feat: Docker volume migration - OSS improvements Part 2 (#6962)
This is the second part of the OSS installation and improvement PR.

Users must run the migration script to migrate an existing SigNoz installation from Docker Standalone or Docker Swarm.

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-01-29 15:38:31 +05:30
Srikanth Chekuri
333f90d8ac fix: window size for small time ranges (#6964) 2025-01-29 09:17:47 +00:00
Yunus M
ea83a7e62d fix: show upgrade plan option in grace period if not converted to trial (#6909) 2025-01-29 08:16:26 +00:00
Shaheer Kochai
42a5d71d81 feat: add documentation link for logs explorer quick filters (#6854)
* feat: add documentation link for logs explorer quick filters

* refactor: simplify QuickFilters component rendering logic

* fix: properly implement the log fast filter empty state UI

* fix: don't display empty state while loading quick filters

* refactor: extract quick filter empty state into a separate component

* refactor: render quick filters empty state based on  source param

* refactor: update QuickFilters source to use QuickFiltersSource.INFRA_MONITORING for consistency

* fix: address review comments

* refactor: fix the failing test by moving QuickFilters types to types.tsx

* refactor: properly import use QuickFiltersSource

---------

Co-authored-by: Vikrant Gupta <vikrant.thomso@gmail.com>
2025-01-29 10:39:42 +04:30
Vikrant Gupta
08e1fd3ca5 feat(trace-details): frontend changes for trace details (#6905)
* feat(trace-details): frontend changes for trace details

* feat(trace-detail): address review comments from elipsis

* feat(trace0-detail): add the new drawer designs

* feat(trace-detail): handle the selected span hover

* feat(trace-detail): address theme colors and span selection

* feat(trace-detail): fix some more css

* feat(trace-detail): fix some more css

* feat(trace-detail): add hoverred span and handled no data components for new drawer

* feat(trace-detail): handle light mode designs

* feat(trace-detail): remove the hover functionality in favor of performance

* feat(trace-detail): span lines connectors

* feat(trace-detail): span lines connectors

* feat(trace-detail): handle the line matching for flamegraph and waterfall

* feat(trace-waterfall): change the timeline color to make it less poky

* feat(trace-waterfall): added where clause support in trace details page

* feat(trace-waterfall): added where clause support in trace details page

* feat(trace-detail): handle light mode designs

* feat(trace-detail): handle light mode designs

* feat(trace-detail): fix build issues

* feat(trace-detail): handle loading error state for filters and flamegraph hovered state

* feat(trace-detail): fix the hardcoded traceID

* feat(trace-detail): remove unnecessaru use effects

* feat(trace-detail): handled the flamegraph update with ID

* feat(trace-detail): added timestamp bucketing and latency sampling

* feat(trace-detail): extract the buckets and span limit in constants

* feat(trace-detail): minor VQA comments

* feat(trace-detail): remove unnecessaru use effects

* feat(trace-detail): add go to related logs

* feat(trace-detail): address review comments

* feat(trace-detail): address review comments

* feat(trace-detail): address review comments

* feat(trace-detail): address review comments
2025-01-28 22:04:24 +05:30
Shaheer Kochai
8cb8beb63f fix: adjust the buffer time for start and end time in trace to logs (#6953) 2025-01-28 12:02:28 +05:30
SagarRajput-7
35c61045c4 feat: added right panel graphs in overview page (#6944)
* feat: added right panel graphs in overview page

* feat: added right panel trace navigation

* feat: implement search column wise and global table wise

* feat: implemented filter on table with api filtering

* feat: added allow clear to table filters

* feat: fixed empty table issue

* feat: fixed celery state - count display
2025-01-28 10:04:06 +05:30
Amlan Kumar Nandy
7c85befc17 feat: implement page size configuration in infra monitoring (#6950) 2025-01-28 09:50:34 +05:30
Amlan Kumar Nandy
813ca8bc23 feat: statefulsets, daemonsets, jobs and volumes implementation in k8s infra monitoring (#6629) 2025-01-27 22:22:48 +05:30
Shaheer Kochai
98cdbcd711 feat: show/hide timestamp and body fields in logs explorer (raw, default, column views) (#6903)
* feat: show/hide timestamp and body fields in logs explorer (raw, default, column views)

* fix: add width to log indicator column to ensure that a single column doesn't take half the space

* fix: handle edge cases and fix issues for show/hide body and timestamp in logs explorer
2025-01-27 21:54:59 +05:30
Prashant Shahi
61a6c21edb feat: docker/swarm deployment restructuring and improvements (#6904)
This PR consists of improvements and fixes related to SigNoz in Docker Standalone and Docker Swarm.

- Docker/Swarm deploy directory restructuring and improvements
- Prometheus metrics scraping using labels in Docker Standalone and Docker Swarm
- Swarm: single schema-migrator container for both sync/async migrations
- Remove Kubernetes HOTRod files (will be migrated to charts)
- Fetch histogram-quantile from release instead of local binary mount

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-01-27 12:48:42 +00:00
Shaheer Kochai
8d6731c7ba feat: Centralize and Standardize Date/Time Format Handling (#6925)
* feat: change the format of uplot x axis to 24 hours

* feat: centralize date/time formats

* refactor: use the centralized 24 hour date/time formatting across components

* refactor: centralize uPlot x-axis values formatting

* feat: make the x axis of alert history in 24 hour format
2025-01-27 11:58:54 +00:00
Ekansh Gupta
6dc7a76853 feat: changed the traces tab query to incorporate span scopes (#6922) 2025-01-27 08:37:47 +00:00
Shaheer Kochai
cc3d78cd71 feat: add support for span scope (#6910)
* feat: add support for span scope

* refactor: remove inline style
2025-01-27 08:25:10 +00:00
Prashant Shahi
c0d8f8de3a Merge branch 'main' into fix/install-no-sudo 2025-01-27 11:35:23 +05:45
SagarRajput-7
bd0c4beeee feat: celery overview page (#6918)
* feat: added celery task feature - with task garphs and details

* feat: added celery bar graph toggle states UI

* feat: added histogram charts and right panel

* feat: added task latency graph with different states

* feat: added right panel trace navigation

* feat: added dynamic stepinterval based on timerange

* feat: changed histogram occurences to bar

* feat: onclick right panels for celery state bar graphs

* feat: pagesetup and tabs with kafka setup

* feat: custom series for bar for color generation

* feat: fixed test cases

* feat: added new celery overview page

* feat: added table feat and column details

* feat: improved table style and column configs

* feat: added service name filter and common filter logic

* feat: code fix

* feat: code fix
2025-01-27 10:56:01 +05:30
SagarRajput-7
e83e691ef5 feat: added celery task feature - with task graphs and details (#6840)
* feat: added celery task feature - with task garphs and details

* feat: added celery bar graph toggle states UI

* feat: added histogram charts and right panel

* feat: added task latency graph with different states

* feat: added right panel trace navigation

* feat: added navigateToTrace logic

* feat: added value graph and global filter logic

* feat: added dynamic stepinterval based on timerange

* feat: changed histogram occurences to bar

* feat: onclick right panels for celery state bar graphs

* feat: pagesetup and tabs with kafka setup

* feat: custom series for bar for color generation

* feat: fixed test cases

* feat: update styles
2025-01-27 08:53:19 +05:30
Prashant Shahi
860fa4a995 fix(install-script): handle no sudo command scenerio
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-01-27 01:59:35 +05:30
Prashant Shahi
c30c882aae chore: integrate goreleaser for user_scripts and restructuring (#6911)
* chore: integrate goreleaser for user_scripts and restructuring
* ci(goreleaser): add goreleaser workflow
* chore(goreleaser): update Makefile and config

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-01-25 17:10:21 +05:30
Vibhu Pandey
001122db2c feat(instrumentation): adopt slog (#6907)
### Summary

feat(instrumentation): adopt slog
2025-01-24 09:23:02 +00:00
Nityananda Gohain
b544a54c40 fix: logging middleware add excluded routes (#6917)
* fix: logging middleware add excluded routes

* fix: consistant name and update example

* fix: consistant name
2025-01-24 14:43:28 +05:30
SagarRajput-7
af6b54aeb4 fix: added safety check on uplot-xaxis logic to solve sentry issue (#6869) 2025-01-24 03:48:47 +05:30
Vikrant Gupta
1e61e6c2f6 feat(trace-details): query service changes for trace details (#6906)
* feat(trace-details): query service changes for trace details

* feat(trace-detail): refactor query service trace apis

* feat(trace-detail): minor bug fixes

* feat(trace-detail): address review comments

* feat(trace-detail): address review comments
2025-01-24 00:16:38 +05:30
Shivanshu Raj Shrivastava
390b04c015 chore: add global to inner join (#6912)
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-01-23 14:05:22 +00:00
Nityananda Gohain
c56345e1db fix: move analytics middleware (#6901)
* fix: move analytics middleware

* fix: use logger from params

* fix: use remove custom response writer

* fix: use correct names
2025-01-23 16:21:08 +05:30
Nityananda Gohain
7f25674640 fix: move log comment middleware (#6899)
* fix: move log comment middleware

* fix: move log comment to logger middleware

* fix: make logcomment a method of logging struct
2025-01-23 13:27:21 +05:30
Nityananda Gohain
df5ab64c83 fix: use common timeout middleware (#6866)
* fix: use common timeout middleware

* fix: use apiserver factory for config

* fix: add backward compatibility for old variables

* fix: remove apiserver provider and use config directly

* fix: remove apiserver interface

* fix: address comments

* fix: address minor comments

* fix: address minor comments
2025-01-22 17:48:47 +00:00
primus-bot[bot]
726f2b0fa2 chore(release): bump to v0.69.0 (#6898)
#### Summary
 - Release SigNoz v0.69.0
 - Bump SigNoz OTel Collector to v0.111.24

 Created by [Primus-Bot](https://github.com/apps/primus-bot)
2025-01-22 15:12:06 +05:30
Nityananda Gohain
4a0b0aafbd fix: use common logging middleware (#6865)
* feat: use common logging middleware

* fix: update comment

* fix: remove privatePort:true

* fix: remove staticfields from logging

* fix: remove staticfields from logging
2025-01-22 07:42:21 +00:00
Vibhu Pandey
837f434fe9 feat(sqlstore): open a connection to sql once (#6867) 2025-01-22 07:19:38 +00:00
Srikanth Chekuri
0baf0e9453 chore: address some gaps in infra (#6871) 2025-01-21 23:25:15 +05:30
Amlan Kumar Nandy
403043e076 feat: implement deployments, clusters and namespaces in k8s infra monitoring (#6786) 2025-01-21 21:57:32 +05:30
Shaheer Kochai
7730f76128 Revert "feat: show/hide timestamp and body fields in logs explorer (raw, defa…" (#6868)
This reverts commit b465f74e4a.
2025-01-21 15:03:03 +00:00
Vishal Sharma
6e3ffd555d fix: update azure event hub receiver config (#6864) 2025-01-21 14:35:23 +05:30
Nityananda Gohain
c565c2b865 fix: remove depricated message from ingestion dashboard (#6862) 2025-01-21 07:17:21 +00:00
Srikanth Chekuri
4ec1e66c7e chore: clickhouse sql support for v3 (#6778) 2025-01-21 04:09:40 +00:00
Srikanth Chekuri
89541862cc chore: correct order within page result (#6847) 2025-01-20 13:38:49 +00:00
Prashant Shahi
610f4d43e7 chore(install-script): install.sh script improvements (#6857)
### Summary

- support for docker compose plugin
- check for occupied port when installing SigNoz for the first time
- remove unused code

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-01-20 13:26:40 +00:00
Ekansh Gupta
044a124cc1 feat: add search span scope in the list view tab to add the scope at per Query level (#6810)
* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level

* feat: add search span scope in the list view tab to add the scope at per query level
2025-01-20 18:04:19 +05:30
Vibhu Pandey
0cf9003e3a feat(.): initialize all factories (#6844)
### Summary

feat(.): initialize all factories

#### Related Issues / PR's

Removed all redundant commits of https://github.com/SigNoz/signoz/pull/6843
Closes https://github.com/SigNoz/signoz/pull/6782
2025-01-20 17:45:33 +05:30
Nityananda Gohain
644135a933 fix: remove analytics schema migrations (#6856) 2025-01-20 17:09:55 +05:30
Shaheer Kochai
b465f74e4a feat: show/hide timestamp and body fields in logs explorer (raw, default, column views) (#6831)
* feat: show/hide timestamp and body fields in logs explorer (raw, default, column views)

* fix: add width to log indicator column to ensure that a single column doesn't take half the space

---------

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-01-20 05:43:48 +00:00
Shaheer Kochai
e00e365964 chore: new alert rule documentation redirection tests (#6822)
* chore: new alert rule documentation redirection tests

* fix: fix the failing test

* fix: add test cases for anomaly based alert and fix the failing tests
2025-01-20 05:18:41 +00:00
Amlan Kumar Nandy
5c45e1f7b3 chore: infra monitoring bug fixes (#6795) 2025-01-18 10:55:50 +00:00
Nityananda Gohain
16e61b45ac fix: support in operator for json search (#6689)
* fix: support in in json search

* fix: remove else condition and update test cases

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-01-18 01:44:25 +05:30
Vibhu Pandey
fdcdbf021a refactor(web): move to provider pattern (#6838)
### Summary

move to provider pattern
2025-01-17 20:03:21 +05:30
Vibhu Pandey
c92ef53e9c refactor(cache): move to provider pattern (#6837)
### Summary

Move cache to provider pattern
2025-01-17 18:09:39 +05:30
Vibhu Pandey
268f283785 feat(sqlmigrator): add sqlmigrator package (#6836)
### Summary

- add sqlmigrator package
2025-01-17 16:52:55 +05:30
Vibhu Pandey
c574adc634 feat(sqlstore): add sqlstore package (#6835)
### Summary

Add `sqlstore` package
2025-01-17 15:54:48 +05:30
Vibhu Pandey
939ab5270e feat(instrumentation): use new config factory (#6834)
### Summary

Use new config factory and remove redundant configuration possibilities from the upstream
2025-01-17 14:54:33 +05:30
Vibhu Pandey
42525b6067 feat(factory): add factory package (#6832)
- Introduces `Config`, `ConfigFactory`, `ProviderFactory`, and `Service` interfaces in `config.go`, `provider.go`, and `service.go`.
- Implements `NamedMap` for managing named factories in `named.go`.
- Adds `ProviderSettings` and `ScopedProviderSettings` for managing provider settings in `setting.go`.
2025-01-17 09:13:11 +00:00
Nishanth Arcot
c66cd3ce4e feat: update page titles for dashboards and alerts (#6706) 2025-01-17 09:02:41 +00:00
Vikrant Gupta
e9618d64bc fix(infra-monitoring): use proper axios instance for retrying the 401 req (#6841) 2025-01-17 10:33:09 +05:30
Raj Kamal Singh
8e11a988be chore: cloud integrations: include cloud account id in account status response (#6833) 2025-01-16 22:51:35 +05:30
Srikanth Chekuri
92299e1b08 chore: add count based limits for metrics (#6738) 2025-01-16 18:29:53 +05:30
Raj Kamal Singh
bab8c8274c feat: aws integration UI facing api: services (#6803)
* feat: cloud service integrations: get model and repo interface started

* feat: cloud service integrations: flesh out more of cloud services model

* feat: cloud integrations: reorganize things a little

* feat: cloud integrations: get svc controller started

* feat: cloud integrations: add stubs for EC2 and RDS postgres services

* feat: cloud integrations: add validation for listing and getting available svcs and some cleanup

* feat: cloud integrations: refactor helpers in existing integrations code for reuse

* feat: cloud integrations: parsing of cloud service definitions

* feat: cloud integrations: impl for getCloudProviderService

* feat: cloud integrations: some reorganization

* feat: cloud integrations: some more cleanup

* feat: cloud integrations: add validation for listing available cloud provider services

* feat: cloud integrations: API endpoint for listing available cloud provider services

* feat: cloud integrations: add validation for getting details of a particular service

* feat: cloud integrations: API endpoint for getting details of a service

* feat: cloud integrations: add controller validation for configuring cloud services

* feat: cloud integrations: get serviceConfigRepo started

* feat: cloud integrations: service config in service list summaries when queried for cloud account id

* feat: cloud integrations: only a supported service for a connected cloud account can be configured

* feat: cloud integrations: add validation for configuring services via the API

* feat: cloud integrations: API for configuring services

* feat: cloud integrations: some cleanup

* feat: cloud integrations: fix broken test

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-01-16 17:36:09 +05:30
primus-bot[bot]
265c67e5bd chore(release): bump to v0.68.0 (#6824)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-01-15 15:15:09 +05:30
SagarRajput-7
efc8c95d59 fix: fixed tables view not honouring order by in traces explorer (#6769)
* fix: fixed tables view not honouring order by in traces explorer

* fix: fixed order by count not being honoured in table view - trace
2025-01-15 15:00:40 +05:30
aniketio-ctrl
5708079c3c chore: added series aggregation for group by with value type panel (#6744) 2025-01-14 23:13:47 +05:30
dependabot[bot]
dbe78e55a9 chore(deps): bump golang.org/x/net from 0.29.0 to 0.33.0 (#6820)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-14 15:06:51 +00:00
Vishal Sharma
a60371fb80 chore: update telemetry events (#6804) 2025-01-14 01:03:12 +05:30
Raj Kamal Singh
d5b847c091 feat: aws integration: UI facing QS api for cloud account management (#6771)
* feat: init app/cloud_integrations

* feat: get API test started for cloudintegrations account lifecycle

* feat: cloudintegrations: get controller started

* feat: cloud integrations: add cloudintegrations.Controller to APIHandler and servers

* feat: cloud integrations: get routes started

* feat: cloud integrations: get accounts table schema started

* feat: cloud integrations: get cloudProviderAccountsSQLRepository started

* feat: cloud integrations: cloudProviderAccountsSQLRepository.listAccounts

* feat: cloud integrations: http handler and controller plumbing for /generate-connection-url

* feat: cloud integrations: cloudProviderAccountsSQLRepository.upsert

* feat: cloud integrations: finish up with /generate-connection-url

* feat: cloud integrations: add cloudProviderAccountsRepository.get

* feat: cloud integrations: add API test expectation for being able to get account status

* feat: cloud integrations: add http handler and controller method for getting account status

* feat: cloud integrations: ensure unconnected accounts aren't included in list of connected accounts

* feat: cloud integrations: add test expectation for agent check in request

* feat: cloud integrations: agent check in API

* feat: cloud integrations: ensure polling for status after agent check in works

* feat: cloud integrations: ensure account included in connected account list after agent check in

* feat: cloud integrations: add API expectation for updating account config

* feat: cloud integrations: API for updating cloud account config

* feat: cloud integrations: expectation for agent receiving latest config after account config update

* feat: cloud integrations: expectation for disconnecting cloud accounts from UI

* feat: cloud integrations: API for disconnecting cloud accounts

* feat: cloud integrations: some cleanup

* feat: cloud integrations: some more cleanup

* feat: cloud integrations: repo: scope rows by cloud provider

* feat: testutils: refactor out helper for creating a test sqlite DB

* feat: cloud integrations: controller: add test validating regeneration of connection url

* feat: cloud integrations: controller: validations for agent check ins

* feat: cloud integrations: connected account response structure

* feat: cloud integrations: API response account structure

* feat: cloud integrations: some more cleanup

* feat: cloud integrations: remove cloudProviderAccountsRepository.GetById

* feat: cloud integrations: shouldn't be able to disconnect non-existent account

* feat: cloud integrations: validate agents can't check in to cloud account with 2 signoz ids

* feat: cloud integrations: ensure agents can't check in to cloud account with 2 signoz ids

* feat: cloud integrations: remove stray import of ee/model in cloudintegrations controller
2025-01-10 18:43:35 +05:30
618 changed files with 46222 additions and 13175 deletions

View File

@@ -3,4 +3,7 @@
.vscode
README.md
deploy
sample-apps
sample-apps
# frontend
node_modules

35
.github/workflows/goreleaser.yaml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: goreleaser
on:
push:
tags:
- v*
- histogram-quantile/v*
permissions:
contents: write
jobs:
goreleaser:
runs-on: ubuntu-latest
strategy:
matrix:
workdirs:
- scripts/clickhouse/histogramquantile
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: set-up-go
uses: actions/setup-go@v5
- name: run-goreleaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --clean
workdir: ${{ matrix.workdirs }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -1,19 +0,0 @@
# This workflow will inspect a pull request to ensure there is a linked issue or a
# valid issue is mentioned in the body. If neither is present it fails the check and adds
# a comment alerting users of this missing requirement.
name: VerifyIssue
on:
pull_request:
types: [edited, opened]
check_run:
jobs:
verify_linked_issue:
runs-on: ubuntu-latest
name: Ensure Pull Request has a linked issue.
steps:
- name: Verify Linked Issue
uses: srikanthccv/verify-linked-issue-action@v0.71
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -7,6 +7,7 @@ on:
jobs:
detect:
if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
runs-on: ubuntu-latest
outputs:
release_type: ${{ steps.find.outputs.release_type }}
@@ -22,6 +23,7 @@ jobs:
fi
echo "release_type=${release_type}" >> "$GITHUB_OUTPUT"
charts:
if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
secrets: inherit
needs: [detect]

8
.gitignore vendored
View File

@@ -52,7 +52,7 @@ ee/query-service/tests/test-deploy/data/
/deploy/docker/clickhouse-setup/data/
/deploy/docker-swarm/clickhouse-setup/data/
bin/
.local/
*/query-service/queries.active
# e2e
@@ -70,3 +70,9 @@ vendor/
# git-town
.git-branches.toml
# goreleaser
dist/
# ignore user_scripts that is fetched by init-clickhouse
deploy/common/clickhouse/user_scripts/

View File

@@ -3,16 +3,10 @@
tasks:
- name: Run Script to Comment ut required lines
init: |
cd ./.scripts
sh commentLinesForSetup.sh
- name: Run Docker Images
init: |
cd ./deploy
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
# command:
cd ./deploy/docker
sudo docker compose up -d
- name: Run Frontend
init: |

View File

@@ -141,9 +141,9 @@ Depending upon your area of expertise & interest, you can choose one or more to
# 3. Develop Frontend 🌚
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
## 3.1 Contribute to Frontend with Docker installation of SigNoz
@@ -151,14 +151,14 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
![develop-frontend](https://user-images.githubusercontent.com/52788043/179009217-6692616b-17dc-4d27-b587-9d007098d739.jpeg)
- run `cd deploy` to move to deploy directory,
- Install signoz locally **without** the frontend,
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
```
ports:
- "8080:8080"
@@ -167,9 +167,10 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
- Next run,
```
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
cd deploy/docker
sudo docker compose up -d
```
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
If you have backend api exposed via frontend nginx:
```
@@ -186,11 +187,6 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
yarn dev
```
### Important Notes:
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
**[`^top^`](#contributing-guidelines)**
## 3.2 Contribute to Frontend without installing SigNoz backend
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
@@ -216,7 +212,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
# 4. Contribute to Backend (Query-Service) 🌑
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
## 4.1 Prerequisites
@@ -242,13 +238,13 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- run `sudo make dev-setup` to configure local setup to run query-service,
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
```
ports:
- 9001:9000
@@ -258,9 +254,9 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
- run `cd pkg/query-service/` to move to `query-service` directory,
- Then, you need to create a `.env` file with the following environment variable
```
SIGNOZ_LOCAL_DB_PATH="./signoz.db"
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
```
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
@@ -294,13 +290,10 @@ docker pull signoz/query-service:develop
```
### Important Note:
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.

View File

@@ -15,8 +15,9 @@ DEV_BUILD ?= "" # set to any non-empty value to enable dev build
FRONTEND_DIRECTORY ?= frontend
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
STANDALONE_DIRECTORY ?= deploy/docker
SWARM_DIRECTORY ?= deploy/docker-swarm
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
@@ -142,16 +143,6 @@ dev-setup:
@echo "--> Local Setup completed"
@echo "------------------"
run-local:
@docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
up --build -d
down-local:
@docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
down -v
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
@@ -191,3 +182,15 @@ check-no-ee-references:
test:
go test ./pkg/...
goreleaser-snapshot:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
cd ${GORELEASER_WORKDIR} && \
goreleaser release --clean --snapshot; \
cd -; \
else \
goreleaser release --clean --snapshot; \
fi
goreleaser-snapshot-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot

View File

@@ -13,9 +13,9 @@
<h3 align="center">
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.md"><b>Readme auf Englisch </b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>

View File

@@ -17,9 +17,9 @@
<h3 align="center">
<a href="https://signoz.io/docs"><b>Documentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>ReadMe in German</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>

View File

@@ -12,9 +12,9 @@
<h3 align="center">
<a href="https://signoz.io/docs"><b>文档</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>中文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>德文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>葡萄牙语ReadMe</b></a>
<a href="https://signoz.io/slack"><b>Slack 社区</b></a>
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>

View File

@@ -1,32 +0,0 @@
##################### SigNoz Configuration Defaults #####################
#
# Do not modify this file
#
##################### Web #####################
web:
# The prefix to serve web on
prefix: /
# The directory containing the static build files.
directory: /etc/signoz/web
##################### Cache #####################
cache:
# specifies the caching provider to use.
provider: memory
# memory: Uses in-memory caching.
memory:
# Time-to-live for cache entries in memory. Specify the duration in ns
ttl: 60000000000
# The interval at which the cache will be cleaned up
cleanupInterval:
# redis: Uses Redis as the caching backend.
redis:
# The hostname or IP address of the Redis server.
host: localhost
# The port on which the Redis server is running. Default is usually 6379.
port: 6379
# The password for authenticating with the Redis server, if required.
password:
# The Redis database number to use
db: 0

95
conf/example.yaml Normal file
View File

@@ -0,0 +1,95 @@
##################### SigNoz Configuration Example #####################
#
# Do not modify this file
#
##################### Instrumentation #####################
instrumentation:
logs:
# The log level to use.
level: info
traces:
# Whether to enable tracing.
enabled: false
processors:
batch:
exporter:
otlp:
endpoint: localhost:4317
metrics:
# Whether to enable metrics.
enabled: true
readers:
pull:
exporter:
prometheus:
host: "0.0.0.0"
port: 9090
##################### Web #####################
web:
# Whether to enable the web frontend
enabled: true
# The prefix to serve web on
prefix: /
# The directory containing the static build files.
directory: /etc/signoz/web
##################### Cache #####################
cache:
# specifies the caching provider to use.
provider: memory
# memory: Uses in-memory caching.
memory:
# Time-to-live for cache entries in memory. Specify the duration in ns
ttl: 60000000000
# The interval at which the cache will be cleaned up
cleanupInterval: 1m
# redis: Uses Redis as the caching backend.
redis:
# The hostname or IP address of the Redis server.
host: localhost
# The port on which the Redis server is running. Default is usually 6379.
port: 6379
# The password for authenticating with the Redis server, if required.
password:
# The Redis database number to use
db: 0
##################### SQLStore #####################
sqlstore:
# specifies the SQLStore provider to use.
provider: sqlite
# The maximum number of open connections to the database.
max_open_conns: 100
sqlite:
# The path to the SQLite database file.
path: /var/lib/signoz/signoz.db
##################### APIServer #####################
apiserver:
timeout:
default: 60s
max: 600s
excluded_routes:
- /api/v1/logs/tail
- /api/v3/logs/livetail
logging:
excluded_routes:
- /api/v1/health
##################### TelemetryStore #####################
telemetrystore:
# specifies the telemetrystore provider to use.
provider: clickhouse
clickhouse:
# The DSN to use for ClickHouse.
dsn: http://localhost:9000
# Maximum number of idle connections in the connection pool.
max_idle_conns: 50
# Maximum number of open connections to the database.
max_open_conns: 100
# Maximum time to wait for a connection to be established.
dial_timeout: 5s

View File

@@ -18,65 +18,64 @@ Now run the following command to install:
### Using Docker Compose
If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
If you don't have docker compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
to set up docker compose before proceeding with the next steps.
For x86 chip (amd):
```sh
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
cd deploy/docker
docker compose up -d
```
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
the data generated from hotrod in SigNoz UI.
Open http://localhost:3301 in your favourite browser.
## Kubernetes
### Using Helm
#### Bring up SigNoz cluster
To start collecting logs and metrics from your infrastructure, run the following command:
```sh
helm repo add signoz https://charts.signoz.io
kubectl create ns platform
helm -n platform install my-release signoz/signoz
cd generator/infra
docker compose up -d
```
To access the UI, you can `port-forward` the frontend service:
To start generating sample traces, run the following command:
```sh
kubectl -n platform port-forward svc/my-release-frontend 3301:3301
cd generator/hotrod
docker compose up -d
```
Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
In a couple of minutes, you should see the data generated from hotrod in SigNoz UI.
#### Test HotROD application with SigNoz
For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker/).
## Docker Swarm
To install SigNoz using Docker Swarm, run the following command:
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
cd deploy/docker-swarm
docker stack deploy -c docker-compose.yaml signoz
```
To generate load:
Open http://localhost:3301 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
cd generator/infra
docker stack deploy -c docker-compose.yaml infra
```
To stop load:
To start generating sample traces, run the following command:
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
cd generator/hotrod
docker stack deploy -c docker-compose.yaml hotrod
```
In a couple of minutes, you should see the data generated from hotrod in SigNoz UI.
For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker-swarm/).
## Uninstall/Troubleshoot?
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.

View File

@@ -10,14 +10,14 @@
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</node>
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
@@ -58,7 +58,7 @@
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
@@ -69,7 +69,7 @@
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</shard>
</cluster>
</remote_servers>
</clickhouse>
</clickhouse>

View File

@@ -72,4 +72,4 @@
</shard> -->
</cluster>
</remote_servers>
</clickhouse>
</clickhouse>

View File

@@ -370,7 +370,7 @@
<!-- Path to temporary data for processing hard queries. -->
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
<!-- Disable AuthType plaintext_password and no_password for ACL. -->
<!-- <allow_plaintext_password>0</allow_plaintext_password> -->
<!-- <allow_no_password>0</allow_no_password> -->`
@@ -652,12 +652,12 @@
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
-->
<macros>
<shard>01</shard>
<replica>example01-01-1</replica>
</macros>
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
@@ -716,7 +716,7 @@
asynchronous_metrics - send data from table system.asynchronous_metrics
status_info - send data from different component from CH, ex: Dictionaries status
-->
<!--
<prometheus>
<endpoint>/metrics</endpoint>
<port>9363</port>
@@ -726,7 +726,6 @@
<asynchronous_metrics>true</asynchronous_metrics>
<status_info>true</status_info>
</prometheus>
-->
<!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log>

View File

View File

@@ -44,7 +44,7 @@ server {
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
proxy_read_timeout 600s;
}
location /ws {

View File

@@ -12,10 +12,10 @@ alerting:
- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
rule_files: []
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# - 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.

View File

@@ -1,35 +0,0 @@
global:
resolve_timeout: 1m
slack_api_url: 'https://hooks.slack.com/services/xxx'
route:
receiver: 'slack-notifications'
receivers:
- name: 'slack-notifications'
slack_configs:
- channel: '#alerts'
send_resolved: true
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: |-
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
{{" "}}(
{{- with .CommonLabels.Remove .GroupLabels.Names }}
{{- range $index, $label := .SortedPairs -}}
{{ if $index }}, {{ end }}
{{- $label.Name }}="{{ $label.Value -}}"
{{- end }}
{{- end -}}
)
{{- end }}
text: >-
{{ range .Alerts -}}
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
*Description:* {{ .Annotations.description }}
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}

View File

@@ -1,11 +0,0 @@
groups:
- name: ExampleCPULoadGroup
rules:
- alert: HighCpuLoad
expr: system_cpu_load_average_1m > 0.1
for: 0m
labels:
severity: warning
annotations:
summary: High CPU load
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

File diff suppressed because it is too large Load Diff

View File

@@ -1,251 +0,0 @@
version: "3.9"
x-clickhouse-defaults: &clickhouse-defaults
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
deploy:
restart_policy:
condition: on-failure
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
- clickhouse
- otel-collector-migrator
# - clickhouse-2
# - clickhouse-3
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
!!merge <<: *clickhouse-defaults
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
# clickhouse-2:
# <<: *clickhouse-defaults
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# clickhouse-3:
# <<: *clickhouse-defaults
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.7
volumes:
- ./data/alertmanager:/data
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
depends_on:
- query-service
deploy:
restart_policy:
condition: on-failure
query-service:
image: signoz/query-service:0.67.1
command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
deploy:
restart_policy:
condition: on-failure
!!merge <<: *db-depend
frontend:
image: signoz/frontend:0.67.1
deploy:
restart_policy:
condition: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.111.22
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # Health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
deploy:
mode: global
restart_policy:
condition: on-failure
depends_on:
- clickhouse
- otel-collector-migrator
- query-service
otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.111.22
deploy:
restart_policy:
condition: on-failure
delay: 5s
command:
- "sync"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
logspout:
image: "gliderlabs/logspout:v3.2.14"
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
deploy:
mode: global
restart_policy:
condition: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
logging:
options:
max-size: 50m
max-file: "3"
load-hotrod:
image: "signoz/locust:1.2.3"
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -1,31 +0,0 @@
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@@ -1,25 +0,0 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: []
remote_read:
- url: tcp://clickhouse:9000/signoz_metrics

View File

@@ -1,51 +0,0 @@
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
proxy_pass http://query-service:8080;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
}
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -0,0 +1,282 @@
version: "3"
x-common: &common
networks:
- signoz-net
deploy:
restart_policy:
condition: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
deploy:
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
- zookeeper-1
- zookeeper-2
- zookeeper-3
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
deploy:
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
- clickhouse
- clickhouse-2
- clickhouse-3
- schema-migrator
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
# ports:
# - "2181:2181"
# - "2888:2888"
# - "3888:3888"
volumes:
- ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
zookeeper-2:
!!merge <<: *zookeeper-defaults
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
volumes:
- ./clickhouse-setup/data/zookeeper-2:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=2
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
zookeeper-3:
!!merge <<: *zookeeper-defaults
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
volumes:
- ./clickhouse-setup/data/zookeeper-3:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=3
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
# TODO: needed for schema-migrator to work, remove this redundancy once we have a better solution
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-2:
!!merge <<: *clickhouse-defaults
hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-2/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-3:
!!merge <<: *clickhouse-defaults
hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- ./clickhouse-setup/data/alertmanager:/data
depends_on:
- query-service
query-service:
!!merge <<: *db-depend
image: signoz/query-service:0.70.0
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.70.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.24
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
deploy:
replicas: 3
depends_on:
- clickhouse
- schema-migrator
- query-service
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.24
deploy:
restart_policy:
condition: on-failure
delay: 5s
entrypoint: sh
command:
- -c
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
depends_on:
- clickhouse
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:
name: signoz-clickhouse-2
clickhouse-3:
name: signoz-clickhouse-3
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1
zookeeper-2:
name: signoz-zookeeper-2
zookeeper-3:
name: signoz-zookeeper-3

View File

@@ -0,0 +1,210 @@
version: "3"
x-common: &common
networks:
- signoz-net
deploy:
restart_policy:
condition: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
deploy:
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
- init-clickhouse
- zookeeper-1
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
deploy:
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
- clickhouse
- schema-migrator
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
# ports:
# - "2181:2181"
# - "2888:2888"
# - "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
# TODO: needed for clickhouse TCP connectio
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
- query-service
query-service:
!!merge <<: *db-depend
image: signoz/query-service:0.70.0
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.70.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.24
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
deploy:
replicas: 3
depends_on:
- clickhouse
- schema-migrator
- query-service
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.24
deploy:
restart_policy:
condition: on-failure
delay: 5s
entrypoint: sh
command:
- -c
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
depends_on:
- clickhouse
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1

View File

@@ -0,0 +1,38 @@
version: "3"
x-common: &common
networks:
- signoz-net
extra_hosts:
- host.docker.internal:host-gateway
logging:
options:
max-size: 50m
max-file: "3"
deploy:
restart_policy:
condition: on-failure
services:
hotrod:
<<: *common
image: jaegertracing/example-hotrod:1.61.0
command: [ "all" ]
environment:
- OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 #
load-hotrod:
<<: *common
image: "signoz/locust:1.2.3"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../../../common/locust-scripts:/locust
networks:
signoz-net:
name: signoz-net
external: true

View File

@@ -0,0 +1,69 @@
version: "3"
x-common: &common
networks:
- signoz-net
extra_hosts:
- host.docker.internal:host-gateway
logging:
options:
max-size: 50m
max-file: "3"
deploy:
mode: global
restart_policy:
condition: on-failure
services:
otel-agent:
<<: *common
image: otel/opentelemetry-collector-contrib:0.111.0
command:
- --config=/etc/otel-collector-config.yaml
volumes:
- ./otel-agent-config.yaml:/etc/otel-collector-config.yaml
- /:/hostfs:ro
environment:
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
# Before exposing the ports, make sure the ports are not used by other services
# ports:
# - "4317:4317"
# - "4318:4318"
otel-metrics:
<<: *common
image: otel/opentelemetry-collector-contrib:0.111.0
user: 0:0 # If you have security concerns, you can replace this with your `UID:GID` that has necessary permissions to docker.sock
command:
- --config=/etc/otel-collector-config.yaml
volumes:
- ./otel-metrics-config.yaml:/etc/otel-collector-config.yaml
- /var/run/docker.sock:/var/run/docker.sock
environment:
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
# Before exposing the ports, make sure the ports are not used by other services
# ports:
# - "4317:4317"
# - "4318:4318"
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
logspout:
<<: *common
image: "gliderlabs/logspout:v3.2.14"
command: syslog+tcp://otel-agent:2255
user: root
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- otel-agent
networks:
signoz-net:
name: signoz-net
external: true

View File

@@ -0,0 +1,102 @@
receivers:
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-agent
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-agent
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors:
# - ec2
# - gcp
# - azure
- env
- system
timeout: 2s
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
otlp:
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
tls:
insecure: true
headers:
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- pprof
pipelines:
traces:
receivers: [otlp]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics:
receivers: [otlp]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics/prometheus:
receivers: [prometheus]
processors: [resourcedetection, batch]
exporters: [otlp]
logs:
receivers: [otlp, tcplog/docker]
processors: [resourcedetection, batch]
exporters: [otlp]

View File

@@ -0,0 +1,103 @@
receivers:
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-metrics
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-metrics
# For Docker daemon metrics to be scraped, it must be configured to expose
# Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
# - job_name: docker-daemon
# dockerswarm_sd_configs:
# - host: unix:///var/run/docker.sock
# role: nodes
# relabel_configs:
# - source_labels: [__meta_dockerswarm_node_address]
# target_label: __address__
# replacement: $1:9323
- job_name: "dockerswarm"
dockerswarm_sd_configs:
- host: unix:///var/run/docker.sock
role: tasks
relabel_configs:
- action: keep
regex: running
source_labels:
- __meta_dockerswarm_task_desired_state
- action: keep
regex: true
source_labels:
- __meta_dockerswarm_service_label_signoz_io_scrape
- regex: ([^:]+)(?::\d+)?
replacement: $1
source_labels:
- __address__
target_label: swarm_container_ip
- separator: .
source_labels:
- __meta_dockerswarm_service_name
- __meta_dockerswarm_task_slot
- __meta_dockerswarm_task_id
target_label: swarm_container_name
- target_label: __address__
source_labels:
- swarm_container_ip
- __meta_dockerswarm_service_label_signoz_io_port
separator: ":"
- source_labels:
- __meta_dockerswarm_service_label_signoz_io_path
target_label: __metrics_path__
- source_labels:
- __meta_dockerswarm_service_label_com_docker_stack_namespace
target_label: namespace
- source_labels:
- __meta_dockerswarm_service_name
target_label: service_name
- source_labels:
- __meta_dockerswarm_task_id
target_label: service_instance_id
- source_labels:
- __meta_dockerswarm_node_hostname
target_label: host_name
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
resourcedetection:
detectors:
- env
- system
timeout: 2s
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
otlp:
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
tls:
insecure: true
headers:
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- pprof
pipelines:
metrics:
receivers: [prometheus]
processors: [resourcedetection, batch]
exporters: [otlp]

View File

@@ -1,85 +1,29 @@
receivers:
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
opencensus:
endpoint: 0.0.0.0:55678
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
# thrift_compact:
# endpoint: 0.0.0.0:6831
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite
@@ -106,15 +50,11 @@ processors:
- name: host.name
- name: host.type
- name: container.name
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
@@ -132,8 +72,7 @@ exporters:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
use_new_schema: true
# logging: {}
# debug: {}
service:
telemetry:
logs:
@@ -142,26 +81,21 @@ service:
address: 0.0.0.0:8888
extensions:
- health_check
- zpages
- pprof
pipelines:
traces:
receivers: [jaeger, otlp]
receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp, tcplog/docker]
receivers: [otlp]
processors: [batch]
exporters: [clickhouselogsexporter]
exporters: [clickhouselogsexporter]

1
deploy/docker/.env Normal file
View File

@@ -0,0 +1 @@
COMPOSE_PROJECT_NAME=signoz

View File

@@ -0,0 +1,3 @@
This data directory is deprecated and will be removed in the future.
Please use the migration script under `scripts/volume-migration` to migrate data from bind mounts to Docker volumes.
The script also renames the project name to `signoz` and the network name to `signoz-net` (if not already in place).

View File

View File

@@ -1,35 +0,0 @@
global:
resolve_timeout: 1m
slack_api_url: 'https://hooks.slack.com/services/xxx'
route:
receiver: 'slack-notifications'
receivers:
- name: 'slack-notifications'
slack_configs:
- channel: '#alerts'
send_resolved: true
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: |-
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
{{" "}}(
{{- with .CommonLabels.Remove .GroupLabels.Names }}
{{- range $index, $label := .SortedPairs -}}
{{ if $index }}, {{ end }}
{{- $label.Name }}="{{ $label.Value -}}"
{{- end }}
{{- end -}}
)
{{- end }}
text: >-
{{ range .Alerts -}}
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
*Description:* {{ .Annotations.description }}
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}

View File

@@ -1,11 +0,0 @@
groups:
- name: ExampleCPULoadGroup
rules:
- alert: HighCpuLoad
expr: system_cpu_load_average_1m > 0.1
for: 0m
labels:
severity: warning
annotations:
summary: High CPU load
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -1,41 +0,0 @@
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>
<default>
<keep_free_space_bytes>10485760</keep_free_space_bytes>
</default>
<s3>
<type>s3</type>
<!-- For S3 cold storage,
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
For GCS cold storage,
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
-->
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
<!-- In case of S3, uncomment the below configuration in case you want to read
AWS credentials from the Environment variables if they exist. -->
<!-- <use_environment_credentials>true</use_environment_credentials> -->
<!-- In case of GCS, uncomment the below configuration, since GCS does
not support batch deletion and result in error messages in logs. -->
<!-- <support_batch_delete>false</support_batch_delete> -->
</s3>
</disks>
<policies>
<tiered>
<volumes>
<default>
<disk>default</disk>
</default>
<s3>
<disk>s3</disk>
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
</s3>
</volumes>
</tiered>
</policies>
</storage_configuration>
</clickhouse>

View File

@@ -1,123 +0,0 @@
<?xml version="1.0"?>
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
<load_balancing>random</load_balancing>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<!-- See also the files in users.d directory where the password can be overridden.
Password could be specified in plaintext or in SHA256 (in hex format).
If you want to specify password in plaintext (not recommended), place it in 'password' element.
Example: <password>qwerty</password>.
Password could be empty.
If you want to specify SHA256, place it in 'password_sha256_hex' element.
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
place its name in 'server' element inside 'ldap' element.
Example: <ldap><server>my_ldap_server</server></ldap>
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
place 'kerberos' element instead of 'password' (and similar) elements.
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
whose initiator's realm matches it.
Example: <kerberos />
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.
How to generate double SHA1:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
In first line will be password and in second - corresponding double SHA1.
-->
<password></password>
<!-- List of networks with open access.
To open access from everywhere, specify:
<ip>::/0</ip>
To open access only from localhost, specify:
<ip>::1</ip>
<ip>127.0.0.1</ip>
Each element of list has one of the following forms:
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
<host> Hostname. Example: server01.clickhouse.com.
To check access, DNS query is performed, and all received addresses compared to peer address.
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
To check access, DNS PTR query is performed for peer address and then regexp is applied.
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
Strongly recommended that regexp is ends with $
All results of DNS requests are cached till server restart.
-->
<networks>
<ip>::/0</ip>
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
<!-- User can create other users and grant rights to them. -->
<!-- <access_management>1</access_management> -->
</default>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@@ -1,115 +0,0 @@
version: "2.4"
include:
- test-app-docker-compose.yaml
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
tty: true
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
container_name: signoz-alertmanager
image: signoz/alertmanager:0.23.7
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.22}
container_name: otel-migrator
command:
- "sync"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.22}
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
# user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure

View File

@@ -1,68 +0,0 @@
version: "2.4"
services:
query-service:
hostname: query-service
build:
context: "../../../"
dockerfile: "./pkg/query-service/Dockerfile"
args:
LDFLAGS: ""
TARGETPLATFORM: "${GOOS}/${GOARCH}"
container_name: signoz-query-service
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true",
"--use-trace-new-schema=true"
]
ports:
- "6060:6060"
- "8080:8080"
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s
timeout: 5s
retries: 3
depends_on:
clickhouse:
condition: service_healthy
frontend:
build:
context: "../../../frontend"
dockerfile: "./Dockerfile"
args:
TARGETOS: "${GOOS}"
TARGETPLATFORM: "${GOARCH}"
container_name: signoz-frontend
environment:
- FRONTEND_API_ENDPOINT=http://query-service:8080
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@@ -1,257 +0,0 @@
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator-sync:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.67.1}
container_name: signoz-query-service
command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
!!merge <<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.67.1}
container_name: signoz-frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator-sync:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.22}
container_name: otel-migrator-sync
command:
- "sync"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector-migrator-async:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.22}
container_name: otel-migrator-async
command:
- "async"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator-sync:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.22}
container_name: signoz-otel-collector
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator-sync:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure

View File

@@ -1,243 +0,0 @@
version: "2.4"
include:
- test-app-docker-compose.yaml
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.67.1}
container_name: signoz-query-service
command: ["-config=/root/config/prometheus.yml", "-gateway-url=https://api.staging.signoz.cloud", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
!!merge <<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.67.1}
container_name: signoz-frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.22}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.22}
container_name: signoz-otel-collector
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure

View File

@@ -1,3 +0,0 @@
include:
- test-app-docker-compose.yaml
- docker-compose-minimal.yaml

View File

@@ -1,64 +0,0 @@
<clickhouse>
<logger>
<!-- Possible levels [1]:
- none (turns off logging)
- fatal
- critical
- error
- warning
- notice
- information
- debug
- trace
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
-->
<level>information</level>
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
<!-- Rotation policy
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
-->
<size>1000M</size>
<count>10</count>
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
</logger>
<listen_host>0.0.0.0</listen_host>
<max_connections>4096</max_connections>
<keeper_server>
<tcp_port>9181</tcp_port>
<!-- Must be unique among all keeper serves -->
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<min_session_timeout_ms>10000</min_session_timeout_ms>
<session_timeout_ms>100000</session_timeout_ms>
<raft_logs_level>information</raft_logs_level>
<compress_logs>false</compress_logs>
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
</coordination_settings>
<!-- enable sanity hostname checks for cluster configuration (e.g. if localhost is used with remote endpoints) -->
<hostname_checks_enabled>true</hostname_checks_enabled>
<raft_configuration>
<server>
<id>1</id>
<!-- Internal port and hostname -->
<hostname>clickhouses-keeper-1</hostname>
<port>9234</port>
</server>
<!-- Add more servers here -->
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@@ -1 +0,0 @@
server_endpoint: ws://query-service:4320/v1/opamp

View File

@@ -1,26 +0,0 @@
services:
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -0,0 +1,2 @@
This directory is deprecated and will be removed in the future.
Please use the new directory for Clickhouse setup scripts: `scripts/clickhouse` instead.

View File

@@ -1,16 +0,0 @@
from locust import HttpUser, task, between
class UserTasks(HttpUser):
wait_time = between(5, 15)
@task
def rachel(self):
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
@task
def trom(self):
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
@task
def japanese(self):
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
@task
def coffee(self):
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")

View File

@@ -0,0 +1,300 @@
version: "3"
x-common: &common
networks:
- signoz-net
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
init-clickhouse:
condition: service_completed_successfully
zookeeper-1:
condition: service_healthy
zookeeper-2:
condition: service_healthy
zookeeper-3:
condition: service_healthy
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-init-clickhouse
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-1
# ports:
# - "2181:2181"
# - "2888:2888"
# - "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
zookeeper-2:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-2
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
volumes:
- zookeeper-2:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=2
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
zookeeper-3:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-3
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
volumes:
- zookeeper-3:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=3
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-2:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse-2:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-3:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse-3:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.70.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "3301:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.70.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:
name: signoz-clickhouse-2
clickhouse-3:
name: signoz-clickhouse-3
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1
zookeeper-2:
name: signoz-zookeeper-2
zookeeper-3:
name: signoz-zookeeper-3

View File

@@ -0,0 +1,222 @@
version: "3"
x-common: &common
networks:
- signoz-net
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
init-clickhouse:
condition: service_completed_successfully
zookeeper-1:
condition: service_healthy
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-init-clickhouse
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-1
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.70.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --gateway-url=https://api.staging.signoz.cloud
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.70.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1

View File

@@ -0,0 +1,220 @@
version: "3"
x-common: &common
networks:
- signoz-net
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
init-clickhouse:
condition: service_completed_successfully
zookeeper-1:
condition: service_healthy
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-init-clickhouse
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-1
# ports:
# - "2181:2181"
# - "2888:2888"
# - "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.70.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "3301:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.70.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1

View File

@@ -0,0 +1,39 @@
version: "3"
x-common: &common
networks:
- signoz-net
extra_hosts:
- host.docker.internal:host-gateway
logging:
options:
max-size: 50m
max-file: "3"
restart: on-failure
services:
hotrod:
<<: *common
image: jaegertracing/example-hotrod:1.61.0
container_name: hotrod
command: [ "all" ]
environment:
- OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 # In case of external SigNoz or cloud, update the endpoint and access token
# - OTEL_OTLP_HEADERS=signoz-access-token=<your-access-token>
load-hotrod:
<<: *common
image: "signoz/locust:1.2.3"
container_name: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../../../common/locust-scripts:/locust
networks:
signoz-net:
name: signoz-net
external: true

View File

@@ -0,0 +1,43 @@
version: "3"
x-common: &common
networks:
- signoz-net
extra_hosts:
- host.docker.internal:host-gateway
logging:
options:
max-size: 50m
max-file: "3"
restart: on-failure
services:
otel-agent:
<<: *common
image: otel/opentelemetry-collector-contrib:0.111.0
command:
- --config=/etc/otel-collector-config.yaml
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- /:/hostfs:ro
- /var/run/docker.sock:/var/run/docker.sock
environment:
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux # Replace signoz-host with the actual hostname
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
# Before exposing the ports, make sure the ports are not used by other services
# ports:
# - "4317:4317"
# - "4318:4318"
logspout:
<<: *common
image: "gliderlabs/logspout:v3.2.14"
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-agent:2255
depends_on:
- otel-agent
networks:
signoz-net:
name: signoz-net
external: true

View File

@@ -0,0 +1,139 @@
receivers:
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
# For Docker daemon metrics to be scraped, it must be configured to expose
# Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
# - job_name: docker-daemon
# static_configs:
# - targets:
# - host.docker.internal:9323
# labels:
# job_name: docker-daemon
- job_name: docker-container
docker_sd_configs:
- host: unix:///var/run/docker.sock
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_docker_container_label_signoz_io_scrape
- regex: true
source_labels:
- __meta_docker_container_label_signoz_io_path
target_label: __metrics_path__
- regex: (.+)
source_labels:
- __meta_docker_container_label_signoz_io_path
target_label: __metrics_path__
- separator: ":"
source_labels:
- __meta_docker_network_ip
- __meta_docker_container_label_signoz_io_port
target_label: __address__
- regex: '/(.*)'
replacement: '$1'
source_labels:
- __meta_docker_container_name
target_label: container_name
- regex: __meta_docker_container_label_signoz_io_(.+)
action: labelmap
replacement: $1
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors:
# - ec2
# - gcp
# - azure
- env
- system
timeout: 2s
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
otlp:
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
tls:
insecure: true
headers:
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- pprof
pipelines:
traces:
receivers: [otlp]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics:
receivers: [otlp]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics/prometheus:
receivers: [prometheus]
processors: [resourcedetection, batch]
exporters: [otlp]
logs:
receivers: [otlp, tcplog/docker]
processors: [resourcedetection, batch]
exporters: [otlp]

View File

@@ -1,62 +1,21 @@
receivers:
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
opencensus:
endpoint: 0.0.0.0:55678
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
# thrift_compact:
# endpoint: 0.0.0.0:6831
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
@@ -64,25 +23,11 @@ processors:
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
detectors: [env, system]
timeout: 2s
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
@@ -105,7 +50,11 @@ processors:
- name: host.name
- name: host.type
- name: container.name
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
@@ -119,44 +68,34 @@ exporters:
endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
use_new_schema: true
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages, pprof]
extensions:
- health_check
- pprof
pipelines:
traces:
receivers: [jaeger, otlp]
receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp, tcplog/docker]
receivers: [otlp]
processors: [batch]
exporters: [clickhouselogsexporter]

View File

@@ -2,6 +2,11 @@
set -o errexit
# Variables
BASE_DIR="$(dirname "$(readlink -f "$0")")"
DOCKER_STANDALONE_DIR="docker"
DOCKER_SWARM_DIR="docker-swarm" # TODO: Add docker swarm support
# Regular Colors
Black='\033[0;30m' # Black
Red='\[\e[0;31m\]' # Red
@@ -32,6 +37,11 @@ has_cmd() {
command -v "$1" > /dev/null 2>&1
}
# Check if docker compose plugin is present
has_docker_compose_plugin() {
docker compose version > /dev/null 2>&1
}
is_mac() {
[[ $OSTYPE == darwin* ]]
}
@@ -183,9 +193,7 @@ install_docker() {
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
echo "Installing docker"
$yum_cmd install docker-ce docker-ce-cli containerd.io
fi
}
compose_version () {
@@ -222,17 +230,11 @@ start_docker() {
echo -e "🐳 Starting Docker ...\n"
if [[ $os == "Mac" ]]; then
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
else
else
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
echo "Starting docker service"
$sudo_cmd systemctl start docker.service
fi
# if [[ -z $sudo_cmd ]]; then
# docker ps > /dev/null && true
# if [[ $? -ne 0 ]]; then
# request_sudo
# fi
# fi
if [[ -z $sudo_cmd ]]; then
if ! docker ps > /dev/null && true; then
request_sudo
@@ -260,12 +262,15 @@ wait_for_containers_start() {
}
bye() { # Prints a friendly good bye message and exits the script.
# Switch back to the original directory
popd > /dev/null 2>&1
if [[ "$?" -ne 0 ]]; then
set +o errexit
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo -e "cd ${DOCKER_STANDALONE_DIR}"
echo -e "$sudo_cmd $docker_compose_cmd ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
@@ -296,11 +301,6 @@ request_sudo() {
if (( $EUID != 0 )); then
sudo_cmd="sudo"
echo -e "Please enter your sudo password, if prompted."
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
# echo "Need sudo privileges to proceed with the installation."
# exit 1;
# fi
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
echo "Need sudo privileges to proceed with the installation."
exit 1;
@@ -309,7 +309,7 @@ request_sudo() {
echo -e "Got it! Thanks!! 🙏\n"
echo -e "Okay! We will bring up the SigNoz cluster from here 🚀\n"
fi
fi
fi
}
echo ""
@@ -317,6 +317,7 @@ echo -e "👋 Thank you for trying out SigNoz! "
echo ""
sudo_cmd=""
docker_compose_cmd="docker compose"
# Check sudo permissions
if (( $EUID != 0 )); then
@@ -324,7 +325,13 @@ if (( $EUID != 0 )); then
echo " In case of any failure or prompt, please consider running the script with sudo privileges."
echo ""
else
sudo_cmd="sudo"
if hash sudo 2>/dev/null; then
sudo_cmd="sudo"
else
echo "🟡 Running installer with root permissions but sudo command not found, running without sudo"
echo " In case of any failure or prompt, please consider installing sudo and running the script again."
echo ""
fi
fi
# Checking OS and assigning package manager
@@ -362,28 +369,8 @@ else
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
fi
# echo ""
# echo -e "👉 ${RED}Two ways to go forward\n"
# echo -e "${RED}1) ClickHouse as database (default)\n"
# read -p "⚙️ Enter your preference (1/2):" choice_setup
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
# do
# # echo $choice_setup
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
# read -p "⚙️ Enter your preference (1/2): " choice_setup
# # echo $choice_setup
# done
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
# setup_type='clickhouse'
# fi
setup_type='clickhouse'
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
# Run bye if failure happens
trap bye EXIT
@@ -455,8 +442,6 @@ if [[ $desired_os -eq 0 ]]; then
send_event "os_not_supported"
fi
# check_ports_occupied
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
if ! is_command_present docker; then
@@ -486,27 +471,41 @@ if ! is_command_present docker; then
fi
fi
# Install docker-compose
if ! is_command_present docker-compose; then
request_sudo
install_docker_compose
if has_docker_compose_plugin; then
echo "docker compose plugin found, using it"
else
docker_compose_cmd="docker-compose"
echo "docker compose plugin not found, using docker-compose instead"
if ! is_command_present docker-compose; then
request_sudo
install_docker_compose
fi
fi
start_docker
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
# Switch to the Docker Standalone directory
pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
# check for open ports, if signoz is not installed
if is_command_present docker-compose; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
echo "SigNoz already installed, skipping the occupied ports check"
else
check_ports_occupied
fi
fi
echo ""
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
$sudo_cmd $docker_compose_cmd pull
echo ""
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
echo
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# The $docker_compose_cmd command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing.
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
$sudo_cmd $docker_compose_cmd up --detach --remove-orphans || true
wait_for_containers_start 60
echo ""
@@ -516,7 +515,14 @@ if [[ $status_code -ne 200 ]]; then
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo "cd ${DOCKER_STANDALONE_DIR}"
echo "$sudo_cmd $docker_compose_cmd ps -a"
echo ""
echo "Try bringing down the containers and retrying the installation"
echo "cd ${DOCKER_STANDALONE_DIR}"
echo "$sudo_cmd $docker_compose_cmd down -v"
echo ""
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
echo "or reach us on SigNoz for support https://signoz.io/slack"
@@ -534,10 +540,13 @@ else
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
echo " To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
echo " To bring down SigNoz and clean volumes:"
echo ""
echo "cd ${DOCKER_STANDALONE_DIR}"
echo "$sudo_cmd $docker_compose_cmd down -v"
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
@@ -552,7 +561,7 @@ else
do
read -rp 'Email: ' email
done
send_event "identify_successful_installation"
fi

View File

@@ -59,7 +59,7 @@ type anomalyQueryParams struct {
// The results obtained from this query are used to compare with predicted values
// and to detect anomalies
CurrentPeriodQuery *v3.QueryRangeParamsV3
// PastPeriodQuery is the query range params for past seasonal period
// PastPeriodQuery is the query range params for past period of seasonality
// Example: For weekly seasonality, (now-1w-5m, now-1w)
// : For daily seasonality, (now-1d-5m, now-1d)
// : For hourly seasonality, (now-1h-5m, now-1h)
@@ -74,7 +74,6 @@ type anomalyQueryParams struct {
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
PastSeasonQuery *v3.QueryRangeParamsV3
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
@@ -144,13 +143,13 @@ func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonali
switch seasonality {
case SeasonalityWeekly:
currentGrowthPeriodStart = start - oneWeekOffset
currentGrowthPeriodEnd = end
currentGrowthPeriodEnd = start
case SeasonalityDaily:
currentGrowthPeriodStart = start - oneDayOffset
currentGrowthPeriodEnd = end
currentGrowthPeriodEnd = start
case SeasonalityHourly:
currentGrowthPeriodStart = start - oneHourOffset
currentGrowthPeriodEnd = end
currentGrowthPeriodEnd = start
}
currentGrowthQuery := &v3.QueryRangeParamsV3{

View File

@@ -194,10 +194,11 @@ func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSi
}
var sum float64
points := series.Points[startIdx:]
for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points))))
for i := 0; i < windowSize; i++ {
sum += points[i].Value
}
avg := sum / float64(movingAvgWindowSize)
avg := sum / float64(windowSize)
return avg
}
@@ -226,21 +227,25 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
// plus the average of the current season series
// minus the mean of the past season series, past2 season series and past3 season series
for idx, curr := range series.Points {
predictedValue :=
p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
p.getAvg(currentSeasonSeries) -
p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
avg := p.getAvg(currentSeasonSeries)
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
predictedValue := movingAvg + avg - mean
if predictedValue < 0 {
// this should not happen (except when the data has extreme outliers)
// we will use the moving avg of the previous period series in this case
zap.L().Warn("predictedValue is less than 0", zap.Float64("predictedValue", predictedValue), zap.Any("labels", series.Labels))
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
zap.L().Info("predictedSeries",
zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
zap.Float64("avg", p.getAvg(currentSeasonSeries)),
zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
zap.L().Debug("predictedSeries",
zap.Float64("movingAvg", movingAvg),
zap.Float64("avg", avg),
zap.Float64("mean", mean),
zap.Any("labels", series.Labels),
zap.Float64("predictedValue", predictedValue),
zap.Float64("curr", curr.Value),
)
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
Timestamp: curr.Timestamp,

View File

@@ -12,6 +12,7 @@ import (
"go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/cache"
@@ -25,15 +26,13 @@ type APIHandlerOptions struct {
DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig
PreferSpanMetrics bool
MaxIdleConns int
MaxOpenConns int
DialTimeout time.Duration
AppDao dao.ModelDao
RulesManager *rules.Manager
UsageManager *usage.Manager
FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager
IntegrationsController *integrations.Controller
CloudIntegrationsController *cloudintegrations.Controller
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
Cache cache.Cache
Gateway *httputil.ReverseProxy
@@ -55,13 +54,11 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig,
PreferSpanMetrics: opts.PreferSpanMetrics,
MaxIdleConns: opts.MaxIdleConns,
MaxOpenConns: opts.MaxOpenConns,
DialTimeout: opts.DialTimeout,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags,
IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache,
FluxInterval: opts.FluxInterval,
@@ -114,13 +111,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
// note: add ee override methods first
// routes available only in ee version
router.HandleFunc("/api/v1/licenses",
am.AdminAccess(ah.listLicenses)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/licenses",
am.AdminAccess(ah.applyLicense)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/featureFlags",
am.OpenAccess(ah.getFeatureFlags)).
@@ -175,11 +165,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
// v2
router.HandleFunc("/api/v2/licenses",
am.ViewAccess(ah.listLicensesV2)).
Methods(http.MethodGet)
// v3
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)

View File

@@ -1,7 +1,6 @@
package api
import (
"context"
"encoding/json"
"fmt"
"io"
@@ -64,55 +63,8 @@ type ApplyLicenseRequest struct {
LicenseKey string `json:"key"`
}
type ListLicenseResponse map[string]interface{}
func convertLicenseV3ToListLicenseResponse(licensesV3 []*model.LicenseV3) []ListLicenseResponse {
listLicenses := []ListLicenseResponse{}
for _, license := range licensesV3 {
listLicenses = append(listLicenses, license.Data)
}
return listLicenses
}
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
RespondError(w, apiError, nil)
}
ah.Respond(w, licenses)
}
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
var l model.License
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if l.Key == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
license, apiError := ah.LM().ActivateV3(r.Context(), l.Key)
if apiError != nil {
RespondError(w, apiError, nil)
return
}
ah.Respond(w, license)
}
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicensesV3(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
ah.Respond(w, convertLicenseV3ToListLicenseResponse(licenses))
ah.listLicensesV2(w, r)
}
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
@@ -121,6 +73,7 @@ func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
// return 404 not found if there is no active license
if activeLicense == nil {
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)

View File

@@ -7,6 +7,7 @@ import (
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/pkg/cache"
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/interfaces"
)
@@ -19,20 +20,20 @@ type ClickhouseReader struct {
func NewDataConnector(
localDB *sqlx.DB,
ch clickhouse.Conn,
promConfigPath string,
lm interfaces.FeatureLookup,
maxIdleConns int,
maxOpenConns int,
dialTimeout time.Duration,
cluster string,
useLogsNewSchema bool,
useTraceNewSchema bool,
fluxIntervalForTraceDetail time.Duration,
cache cache.Cache,
) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema, useTraceNewSchema)
chReader := basechr.NewReader(localDB, ch, promConfigPath, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
return &ClickhouseReader{
conn: ch.GetConn(),
conn: ch,
appdb: localDB,
ClickHouseReader: ch,
ClickHouseReader: chReader,
}
}

View File

@@ -29,8 +29,8 @@ import (
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/rules"
"go.signoz.io/signoz/pkg/http/middleware"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/migrate"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/signoz"
"go.signoz.io/signoz/pkg/web"
@@ -40,6 +40,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/agentConf"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
@@ -63,25 +64,23 @@ import (
const AppDbEngine = "sqlite"
type ServerOptions struct {
Config signoz.Config
SigNoz *signoz.SigNoz
PromConfigPath string
SkipTopLvlOpsPath string
HTTPHostPort string
PrivateHostPort string
// alert specific params
DisableRules bool
RuleRepoURL string
PreferSpanMetrics bool
MaxIdleConns int
MaxOpenConns int
DialTimeout time.Duration
CacheConfigPath string
FluxInterval string
Cluster string
GatewayUrl string
UseLogsNewSchema bool
UseTraceNewSchema bool
SkipWebFrontend bool
DisableRules bool
RuleRepoURL string
PreferSpanMetrics bool
CacheConfigPath string
FluxInterval string
FluxIntervalForTraceDetail string
Cluster string
GatewayUrl string
UseLogsNewSchema bool
UseTraceNewSchema bool
}
// Server runs HTTP api service
@@ -112,25 +111,22 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
// NewServer creates and initializes Server
func NewServer(serverOptions *ServerOptions) (*Server, error) {
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil {
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
localDB.SetMaxOpenConns(10)
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
if err != nil {
@@ -138,7 +134,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
// initiate license manager
lm, err := licensepkg.StartManager("sqlite", localDB)
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
@@ -147,20 +143,25 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
modelDao.SetFlagProvider(lm)
readerReady := make(chan bool)
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
if err != nil {
return nil, err
}
var reader interfaces.DataConnector
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
zap.L().Info("Using ClickHouse as datastore ...")
qb := db.NewDataConnector(
localDB,
serverOptions.SigNoz.SQLStore.SQLxDB(),
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(),
serverOptions.PromConfigPath,
lm,
serverOptions.MaxIdleConns,
serverOptions.MaxOpenConns,
serverOptions.DialTimeout,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
fluxIntervalForTraceDetail,
serverOptions.SigNoz.Cache,
)
go qb.Start(readerReady)
reader = qb
@@ -188,7 +189,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
rm, err := makeRulesManager(serverOptions.PromConfigPath,
baseconst.GetAlertManagerApiPrefix(),
serverOptions.RuleRepoURL,
localDB,
serverOptions.SigNoz.SQLStore.SQLxDB(),
reader,
c,
serverOptions.DisableRules,
@@ -201,29 +202,29 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
go func() {
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
if err != nil {
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
}
}()
// initiate opamp
_, err = opAmpModel.InitDB(localDB)
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
integrationsController, err := integrations.NewController(localDB)
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, fmt.Errorf(
"couldn't create integrations controller: %w", err,
)
}
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, fmt.Errorf(
"couldn't create cloud provider integrations controller: %w", err,
)
}
// ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
)
if err != nil {
return nil, err
@@ -231,8 +232,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
// initiate agent config handler
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
DB: localDB,
DBEngine: AppDbEngine,
DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
})
if err != nil {
@@ -240,7 +240,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
// start the usagemanager
usageManager, err := usage.New("sqlite", modelDao, lm.GetRepo(), reader.GetConn())
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), serverOptions.Config.TelemetryStore.ClickHouse.DSN)
if err != nil {
return nil, err
}
@@ -253,7 +253,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
if err != nil {
return nil, err
}
@@ -262,15 +261,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
DataConnector: reader,
SkipConfig: skipConfig,
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
MaxIdleConns: serverOptions.MaxIdleConns,
MaxOpenConns: serverOptions.MaxOpenConns,
DialTimeout: serverOptions.DialTimeout,
AppDao: modelDao,
RulesManager: rm,
UsageManager: usageManager,
FeatureFlags: lm,
LicenseManager: lm,
IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval,
@@ -319,10 +316,13 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
r := baseapp.NewRouter()
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddlewarePrivate)
r.Use(baseapp.LogCommentEnricher)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
s.serverOptions.Config.APIServer.Timeout.Max,
).Wrap)
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
apiHandler.RegisterPrivateRoutes(r)
@@ -342,7 +342,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
}, nil
}
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (*http.Server, error) {
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
r := baseapp.NewRouter()
@@ -362,14 +362,18 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (*
}
am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware)
r.Use(baseapp.LogCommentEnricher)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
s.serverOptions.Config.APIServer.Timeout.Max,
).Wrap)
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
apiHandler.RegisterRoutes(r, am)
apiHandler.RegisterLogsRoutes(r, am)
apiHandler.RegisterIntegrationRoutes(r, am)
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
apiHandler.RegisterQueryRangeV3Routes(r, am)
apiHandler.RegisterInfraMetricsRoutes(r, am)
apiHandler.RegisterQueryRangeV4Routes(r, am)
@@ -386,11 +390,9 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (*
handler = handlers.CompressHandler(handler)
if !s.serverOptions.SkipWebFrontend {
err := web.AddToRouter(r)
if err != nil {
return nil, err
}
err := web.AddToRouter(r)
if err != nil {
return nil, err
}
return &http.Server{
@@ -398,31 +400,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (*
}, nil
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// loggingMiddleware is used for logging public api calls
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path))
})
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// loggingMiddlewarePrivate is used for logging private api calls
// from internal services like alert manager
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
})
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
type loggingResponseWriter struct {
http.ResponseWriter
@@ -502,32 +479,29 @@ func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
}
signozMetricsUsed := false
signozLogsUsed := false
signozTracesUsed := false
if postData != nil {
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
if postData.CompositeQuery != nil {
data["queryType"] = postData.CompositeQuery.QueryType
data["panelType"] = postData.CompositeQuery.PanelType
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
}
}
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
if signozMetricsUsed {
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
if queryInfoResult.MetricsUsed {
telemetry.GetInstance().AddActiveMetricsUser()
}
if signozLogsUsed {
if queryInfoResult.LogsUsed {
telemetry.GetInstance().AddActiveLogsUser()
}
if signozTracesUsed {
if queryInfoResult.TracesUsed {
telemetry.GetInstance().AddActiveTracesUser()
}
data["metricsUsed"] = signozMetricsUsed
data["logsUsed"] = signozLogsUsed
data["tracesUsed"] = signozTracesUsed
data["metricsUsed"] = queryInfoResult.MetricsUsed
data["logsUsed"] = queryInfoResult.LogsUsed
data["tracesUsed"] = queryInfoResult.TracesUsed
data["filterApplied"] = queryInfoResult.FilterApplied
data["groupByApplied"] = queryInfoResult.GroupByApplied
data["aggregateOperator"] = queryInfoResult.AggregateOperator
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
data["queryType"] = queryInfoResult.QueryType
data["panelType"] = queryInfoResult.PanelType
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
if err == nil {
// switch case to set data["screen"] based on the referrer
@@ -594,23 +568,6 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
})
}
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
func setTimeoutMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var cancel context.CancelFunc
// check if route is not excluded
url := r.URL.Path
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout)
defer cancel()
}
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}
// initListeners initialises listeners of the server
func (s *Server) initListeners() error {
// listen on public port

View File

@@ -1,18 +1,10 @@
package dao
import (
"fmt"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
)
func InitDao(engine, path string) (ModelDao, error) {
switch engine {
case "sqlite":
return sqlite.InitDB(path)
default:
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
}
func InitDao(inputDB *sqlx.DB) (ModelDao, error) {
return sqlite.InitDB(inputDB)
}

View File

@@ -65,8 +65,8 @@ func columnExists(db *sqlx.DB, tableName, columnName string) bool {
}
// InitDB creates and extends base model DB repository
func InitDB(dataSourceName string) (*modelDao, error) {
dao, err := basedsql.InitDB(dataSourceName)
func InitDB(inputDB *sqlx.DB) (*modelDao, error) {
dao, err := basedsql.InitDB(inputDB)
if err != nil {
return nil, err
}

View File

@@ -28,26 +28,8 @@ func NewLicenseRepo(db *sqlx.DB) Repo {
}
}
func (r *Repo) InitDB(engine string) error {
switch engine {
case "sqlite3", "sqlite":
return sqlite.InitDB(r.db)
default:
return fmt.Errorf("unsupported db")
}
}
func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
licenses := []model.License{}
query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
err := r.db.Select(&licenses, query)
if err != nil {
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
}
return licenses, nil
func (r *Repo) InitDB(inputDB *sqlx.DB) error {
return sqlite.InitDB(inputDB)
}
func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
@@ -78,35 +60,6 @@ func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
return licenseV3Data, nil
}
func (r *Repo) GetActiveLicenseV2(ctx context.Context) (*model.License, *basemodel.ApiError) {
var err error
licenses := []model.License{}
query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
err = r.db.Select(&licenses, query)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
}
var active *model.License
for _, l := range licenses {
l.ParsePlan()
if active == nil &&
(l.ValidFrom != 0) &&
(l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
active = &l
}
if active != nil &&
l.ValidFrom > active.ValidFrom &&
(l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
active = &l
}
}
return active, nil
}
// GetActiveLicense fetches the latest active license from DB.
// If the license is not present, expect a nil license and a nil error in the output.
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
@@ -161,50 +114,56 @@ func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error)
return active, nil
}
// InsertLicense inserts a new license in db
func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
// InsertLicenseV3 inserts a new license v3 in db
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
if l.Key == "" {
return fmt.Errorf("insert license failed: license key is required")
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
// licsense is the entity of zeus so putting the entire license here without defining schema
licenseData, err := json.Marshal(l.Data)
if err != nil {
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
}
query := `INSERT INTO licenses
(key, planDetails, activationId, validationmessage)
VALUES ($1, $2, $3, $4)`
_, err := r.db.ExecContext(ctx,
_, err = r.db.ExecContext(ctx,
query,
l.ID,
l.Key,
l.PlanDetails,
l.ActivationId,
l.ValidationMessage)
string(licenseData),
)
if err != nil {
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
}
}
zap.L().Error("error in inserting license data: ", zap.Error(err))
return fmt.Errorf("failed to insert license in db: %v", err)
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
}
return nil
}
// UpdatePlanDetails writes new plan details to the db
func (r *Repo) UpdatePlanDetails(ctx context.Context,
key,
planDetails string) error {
// UpdateLicenseV3 updates a new license v3 in db
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
if key == "" {
return fmt.Errorf("update plan details failed: license key is required")
// the key and id for the license can't change so only update the data here!
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
license, err := json.Marshal(l.Data)
if err != nil {
return fmt.Errorf("insert license failed: license marshal error")
}
query := `UPDATE licenses
SET planDetails = $1,
updatedAt = $2
WHERE key = $3`
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
_, err = r.db.ExecContext(ctx,
query,
license,
l.ID,
)
if err != nil {
zap.L().Error("error in updating license: ", zap.Error(err))
zap.L().Error("error in updating license data: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err)
}
@@ -286,59 +245,3 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
}
return nil
}
// InsertLicenseV3 inserts a new license v3 in db
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
// licsense is the entity of zeus so putting the entire license here without defining schema
licenseData, err := json.Marshal(l.Data)
if err != nil {
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
}
_, err = r.db.ExecContext(ctx,
query,
l.ID,
l.Key,
string(licenseData),
)
if err != nil {
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
}
}
zap.L().Error("error in inserting license data: ", zap.Error(err))
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
}
return nil
}
// UpdateLicenseV3 updates a new license v3 in db
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
// the key and id for the license can't change so only update the data here!
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
license, err := json.Marshal(l.Data)
if err != nil {
return fmt.Errorf("insert license failed: license marshal error")
}
_, err = r.db.ExecContext(ctx,
query,
license,
l.ID,
)
if err != nil {
zap.L().Error("error in updating license data: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err)
}
return nil
}

View File

@@ -27,38 +27,30 @@ var LM *Manager
var validationFrequency = 24 * 60 * time.Minute
type Manager struct {
repo *Repo
mutex sync.Mutex
repo *Repo
mutex sync.Mutex
validatorRunning bool
// end the license validation, this is important to gracefully
// stopping validation and protect in-consistent updates
done chan struct{}
// terminated waits for the validate go routine to end
terminated chan struct{}
// last time the license was validated
lastValidated int64
// keep track of validation failure attempts
failedAttempts uint64
// keep track of active license and features
activeLicense *model.License
activeLicenseV3 *model.LicenseV3
activeFeatures basemodel.FeatureSet
}
func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
func StartManager(db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
if LM != nil {
return LM, nil
}
repo := NewLicenseRepo(db)
err := repo.InitDB(dbType)
err := repo.InitDB(db)
if err != nil {
return nil, fmt.Errorf("failed to initiate license repo: %v", err)
}
@@ -66,10 +58,10 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M
m := &Manager{
repo: &repo,
}
if err := m.start(features...); err != nil {
return m, err
}
LM = m
return m, nil
}
@@ -119,6 +111,7 @@ func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
if err != nil {
return err
}
if active != nil {
lm.SetActiveV3(active, features...)
} else {
@@ -136,32 +129,6 @@ func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
return nil
}
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicenses(ctx)
if err != nil {
return nil, model.InternalError(err)
}
for _, l := range licenses {
l.ParsePlan()
if lm.activeLicense != nil && l.Key == lm.activeLicense.Key {
l.IsCurrent = true
}
if l.ValidUntil == -1 {
// for subscriptions, there is no end-date as such
// but for showing user some validity we default one year timespan
l.ValidUntil = l.ValidFrom + 31556926
}
response = append(response, l)
}
return
}
func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicensesV3(ctx)
@@ -188,11 +155,11 @@ func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.License
func (lm *Manager) ValidatorV3(ctx context.Context) {
zap.L().Info("ValidatorV3 started!")
defer close(lm.terminated)
tick := time.NewTicker(validationFrequency)
defer tick.Stop()
lm.ValidateV3(ctx)
for {
select {
case <-lm.done:
@@ -238,10 +205,27 @@ func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
lm.lastValidated = time.Now().Unix()
if reterr != nil {
zap.L().Error("License validation completed with error", zap.Error(reterr))
atomic.AddUint64(&lm.failedAttempts, 1)
// default to basic plan if validation fails for three consecutive times
if atomic.LoadUint64(&lm.failedAttempts) > 3 {
zap.L().Error("License validation completed with error for three consecutive times, defaulting to basic plan", zap.String("license_id", lm.activeLicenseV3.ID), zap.Bool("license_validation", false))
lm.activeLicenseV3 = nil
lm.activeFeatures = model.BasicPlan
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Error("Couldn't initialize features", zap.Error(err))
}
lm.done <- struct{}{}
lm.validatorRunning = false
}
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
map[string]interface{}{"err": reterr.Error()}, "", true, false)
} else {
// reset the failed attempts counter
atomic.StoreUint64(&lm.failedAttempts, 0)
zap.L().Info("License validation completed with no errors")
}

View File

@@ -10,12 +10,12 @@ import (
"syscall"
"time"
"go.opentelemetry.io/collector/confmap"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.signoz.io/signoz/ee/query-service/app"
signozconfig "go.signoz.io/signoz/pkg/config"
"go.signoz.io/signoz/pkg/confmap/provider/signozenvprovider"
"go.signoz.io/signoz/pkg/config"
"go.signoz.io/signoz/pkg/config/envprovider"
"go.signoz.io/signoz/pkg/config/fileprovider"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/migrate"
@@ -99,7 +99,7 @@ func main() {
var useLogsNewSchema bool
var useTraceNewSchema bool
var cacheConfigPath, fluxInterval string
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string
var enableQueryServiceLogOTLPExport bool
var preferSpanMetrics bool
@@ -108,7 +108,6 @@ func main() {
var dialTimeout time.Duration
var gatewayUrl string
var useLicensesV3 bool
var skipWebFrontend bool
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
@@ -122,11 +121,11 @@ func main() {
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
flag.BoolVar(&skipWebFrontend, "skip-web-frontend", false, "skip web frontend")
flag.Parse()
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
@@ -136,42 +135,43 @@ func main() {
version.PrintVersion()
config, err := signozconfig.New(context.Background(), signozconfig.ProviderSettings{
ResolverSettings: confmap.ResolverSettings{
URIs: []string{"signozenv:"},
ProviderFactories: []confmap.ProviderFactory{
signozenvprovider.NewFactory(),
},
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
Uris: []string{"env:"},
ProviderFactories: []config.ProviderFactory{
envprovider.NewFactory(),
fileprovider.NewFactory(),
},
}, signoz.DeprecatedFlags{
MaxIdleConns: maxIdleConns,
MaxOpenConns: maxOpenConns,
DialTimeout: dialTimeout,
})
if err != nil {
zap.L().Fatal("Failed to create config", zap.Error(err))
}
signoz, err := signoz.New(config, skipWebFrontend)
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
if err != nil {
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
}
serverOptions := &app.ServerOptions{
SigNoz: signoz,
HTTPHostPort: baseconst.HTTPHostPort,
PromConfigPath: promConfigPath,
SkipTopLvlOpsPath: skipTopLvlOpsPath,
PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: baseconst.PrivateHostPort,
DisableRules: disableRules,
RuleRepoURL: ruleRepoURL,
MaxIdleConns: maxIdleConns,
MaxOpenConns: maxOpenConns,
DialTimeout: dialTimeout,
CacheConfigPath: cacheConfigPath,
FluxInterval: fluxInterval,
Cluster: cluster,
GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
SkipWebFrontend: skipWebFrontend,
Config: config,
SigNoz: signoz,
HTTPHostPort: baseconst.HTTPHostPort,
PromConfigPath: promConfigPath,
SkipTopLvlOpsPath: skipTopLvlOpsPath,
PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: baseconst.PrivateHostPort,
DisableRules: disableRules,
RuleRepoURL: ruleRepoURL,
CacheConfigPath: cacheConfigPath,
FluxInterval: fluxInterval,
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
Cluster: cluster,
GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
}
// Read the jwt secret key
@@ -183,7 +183,7 @@ func main() {
zap.L().Info("JWT secret key set successfully.")
}
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
if err := migrate.Migrate(signoz.SQLStore.SQLxDB()); err != nil {
zap.L().Error("Failed to migrate", zap.Error(err))
} else {
zap.L().Info("Migration successful")

View File

@@ -1,7 +1,6 @@
package model
import (
"encoding/base64"
"encoding/json"
"fmt"
"reflect"
@@ -61,37 +60,6 @@ type LicensePlan struct {
Status string `json:"status"`
}
func (l *License) ParsePlan() error {
l.LicensePlan = LicensePlan{}
planData, err := base64.StdEncoding.DecodeString(l.PlanDetails)
if err != nil {
return err
}
plan := LicensePlan{}
err = json.Unmarshal([]byte(planData), &plan)
if err != nil {
l.ValidationMessage = "failed to parse plan from license"
return errors.Wrap(err, "failed to parse plan from license")
}
l.LicensePlan = plan
l.ParseFeatures()
return nil
}
func (l *License) ParseFeatures() {
switch l.PlanKey {
case Pro:
l.FeatureSet = ProPlan
case Enterprise:
l.FeatureSet = EnterprisePlan
default:
l.FeatureSet = BasicPlan
}
}
type Licenses struct {
TrialStart int64 `json:"trialStart"`
TrialEnd int64 `json:"trialEnd"`

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
"sync/atomic"
@@ -46,9 +45,9 @@ type Manager struct {
tenantID string
}
func New(dbType string, modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
func New(modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn, chUrl string) (*Manager, error) {
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(os.Getenv("ClickHouseUrl"))
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(chUrl)
tenantID := ""
if len(hostNameRegexMatches) == 2 {

View File

@@ -6,7 +6,7 @@
**Building image**
``docker-compose up`
``docker compose up`
/ This will also run
or
@@ -19,7 +19,7 @@ docker tag signoz/frontend:latest 7296823551/signoz:latest
```
```
docker-compose up
docker compose up
```
## Without Docker

View File

@@ -0,0 +1,7 @@
version: "3.9"
services:
web:
build: .
image: signoz/frontend:latest
ports:
- "3301:3301"

View File

@@ -1,7 +0,0 @@
version: "3.9"
services:
web:
build: .
image: signoz/frontend:latest
ports:
- "3301:3301"

View File

@@ -44,6 +44,7 @@
"@sentry/webpack-plugin": "2.22.6",
"@signozhq/design-tokens": "1.1.4",
"@tanstack/react-table": "8.20.6",
"@tanstack/react-virtual": "3.11.2",
"@uiw/react-md-editor": "3.23.5",
"@visx/group": "3.3.0",
"@visx/shape": "3.5.0",

View File

@@ -0,0 +1,22 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M4.02102 14.418C4.02102 14.418 3.82659 14.6658 3.32106 14.6658C2.81553 14.6658 2.62109 14.418 2.62109 14.418V5.74512H4.02102V14.418Z" fill="#9E9E9E"/>
<path d="M4.02102 14.4179C4.02102 14.4179 3.82659 14.6657 3.32106 14.6657C2.81553 14.6657 2.62109 14.4179 2.62109 14.4179V11.077C2.62109 11.077 2.76331 10.8059 3.28328 10.8059C3.80325 10.8059 4.02102 11.077 4.02102 11.077V14.4179Z" fill="#BDBDBD"/>
<path d="M13.3765 14.418C13.3765 14.418 13.1821 14.6658 12.6765 14.6658C12.171 14.6658 11.9766 14.418 11.9766 14.418V5.74512H13.3765V14.418Z" fill="#9E9E9E"/>
<path d="M13.3765 14.4179C13.3765 14.4179 13.1821 14.6657 12.6765 14.6657C12.171 14.6657 11.9766 14.4179 11.9766 14.4179V11.077C11.9766 11.077 12.1188 10.8059 12.6387 10.8059C13.1587 10.8059 13.3765 11.077 13.3765 11.077V14.4179Z" fill="#BDBDBD"/>
<path d="M14.3623 9.98379H1.63633C1.46856 9.98379 1.33301 9.84825 1.33301 9.68048V5.79624C1.33301 5.62847 1.46856 5.49292 1.63633 5.49292H14.3623C14.5301 5.49292 14.6656 5.62847 14.6656 5.79624V9.68048C14.6656 9.84825 14.5301 9.98379 14.3623 9.98379Z" fill="#9E9E9E"/>
<path d="M14.3623 5.71509H1.63633C1.46856 5.71509 1.33301 5.85064 1.33301 6.01841V9.90264C1.33301 10.0704 1.46856 10.206 1.63633 10.206H14.3623C14.5301 10.206 14.6656 10.0704 14.6656 9.90264V6.01841C14.6656 5.85064 14.5301 5.71509 14.3623 5.71509Z" fill="#FFD600"/>
<path d="M2.82515 5.71509L1.33301 7.20723V9.40712L5.02504 5.71509H2.82515Z" fill="#212121"/>
<path d="M7.01027 5.71509L2.52051 10.206H4.72039L9.21016 5.71509H7.01027Z" fill="#212121"/>
<path d="M11.1969 5.71509L6.70605 10.206H8.90594L13.3957 5.71509H11.1969Z" fill="#212121"/>
<path d="M14.6658 6.43188L10.8916 10.2061H13.0915L14.6658 8.63177V6.43188Z" fill="#212121"/>
<path d="M13.5322 5.9095H11.8223C11.6623 5.9095 11.5445 5.80506 11.5823 5.6984L11.7012 4.95288H13.6511L13.77 5.6984C13.81 5.80617 13.6922 5.9095 13.5322 5.9095Z" fill="#E2A610"/>
<path d="M12.6768 4.93514C13.4567 4.93514 14.0889 4.3029 14.0889 3.52299C14.0889 2.74308 13.4567 2.11084 12.6768 2.11084C11.8969 2.11084 11.2646 2.74308 11.2646 3.52299C11.2646 4.3029 11.8969 4.93514 12.6768 4.93514Z" fill="#FFCA28"/>
<path d="M12.6761 4.56629C13.2523 4.56629 13.7194 4.0992 13.7194 3.52301C13.7194 2.94683 13.2523 2.47974 12.6761 2.47974C12.0999 2.47974 11.6328 2.94683 11.6328 3.52301C11.6328 4.0992 12.0999 4.56629 12.6761 4.56629Z" fill="#FF5722"/>
<path d="M13.652 4.96417H11.7021C11.7021 4.96417 11.7088 4.65308 12.2666 4.65308C12.8243 4.65308 13.0932 4.65308 13.0932 4.65308C13.6832 4.65308 13.652 4.96417 13.652 4.96417Z" fill="#FFCA28"/>
<path d="M12.7998 3.02315L13.0665 2.67872C13.0787 2.66317 13.1031 2.67539 13.0987 2.69428L12.9954 3.11759C12.9709 3.21647 13.0465 3.31202 13.1487 3.3098L13.5842 3.30313C13.6042 3.30313 13.6098 3.3298 13.592 3.33869L13.1965 3.52201C13.1042 3.56534 13.0776 3.68311 13.142 3.762L13.4187 4.09865C13.4309 4.1142 13.4142 4.13531 13.3965 4.12642L13.0065 3.93199C12.9154 3.88644 12.8065 3.93866 12.7843 4.03865L12.6932 4.46529C12.6887 4.48529 12.6609 4.48529 12.6576 4.46529L12.5665 4.03865C12.5454 3.93866 12.4354 3.88644 12.3443 3.93199L11.9543 4.12642C11.9365 4.13531 11.9188 4.11309 11.9321 4.09865L12.2087 3.762C12.2732 3.68311 12.2465 3.56534 12.1543 3.52201L11.7588 3.33869C11.741 3.3298 11.7465 3.30313 11.7665 3.30313L12.2021 3.3098C12.3043 3.31091 12.3798 3.21647 12.3554 3.11759L12.2521 2.69428C12.2476 2.67539 12.2721 2.66317 12.2843 2.67872L12.5509 3.02315C12.6165 3.10314 12.7376 3.10314 12.7998 3.02315Z" fill="#FFD5CA"/>
<path d="M4.17575 5.9095H2.46584C2.30584 5.9095 2.18807 5.80506 2.22585 5.6984L2.34473 4.95288H4.29463L4.41351 5.6984C4.45351 5.80617 4.33574 5.9095 4.17575 5.9095Z" fill="#E2A610"/>
<path d="M3.3223 4.93514C4.10221 4.93514 4.73445 4.3029 4.73445 3.52299C4.73445 2.74308 4.10221 2.11084 3.3223 2.11084C2.5424 2.11084 1.91016 2.74308 1.91016 3.52299C1.91016 4.3029 2.5424 4.93514 3.3223 4.93514Z" fill="#FFCA28"/>
<path d="M3.3216 4.56629C3.89779 4.56629 4.36488 4.0992 4.36488 3.52301C4.36488 2.94683 3.89779 2.47974 3.3216 2.47974C2.74541 2.47974 2.27832 2.94683 2.27832 3.52301C2.27832 4.0992 2.74541 4.56629 3.3216 4.56629Z" fill="#FF5722"/>
<path d="M4.29658 4.96417H2.34668C2.34668 4.96417 2.35335 4.65308 2.91109 4.65308C3.46884 4.65308 3.73772 4.65308 3.73772 4.65308C4.32769 4.65308 4.29658 4.96417 4.29658 4.96417Z" fill="#FFCA28"/>
<path d="M3.44337 3.02315L3.71002 2.67872C3.72224 2.66317 3.74669 2.67539 3.74224 2.69428L3.63892 3.11759C3.61447 3.21647 3.69002 3.31202 3.79224 3.3098L4.22777 3.30313C4.24777 3.30313 4.25333 3.3298 4.23555 3.33869L3.84002 3.52201C3.7478 3.56534 3.72113 3.68311 3.78557 3.762L4.06223 4.09865C4.07445 4.1142 4.05778 4.13531 4.04001 4.12642L3.65003 3.93199C3.55892 3.88644 3.45004 3.93866 3.42782 4.03865L3.33671 4.46529C3.33227 4.48529 3.30449 4.48529 3.30116 4.46529L3.21005 4.03865C3.18894 3.93866 3.07894 3.88644 2.98784 3.93199L2.59786 4.12642C2.58008 4.13531 2.56231 4.11309 2.57564 4.09865L2.85229 3.762C2.91673 3.68311 2.89007 3.56534 2.79785 3.52201L2.40231 3.33869C2.38454 3.3298 2.39009 3.30313 2.41009 3.30313L2.84562 3.3098C2.94784 3.31091 3.02339 3.21647 2.99895 3.11759L2.89562 2.69428C2.89118 2.67539 2.91562 2.66317 2.92784 2.67872L3.19449 3.02315C3.26005 3.10314 3.38115 3.10314 3.44337 3.02315Z" fill="#FFD5CA"/>
</svg>

After

Width:  |  Height:  |  Size: 5.2 KiB

View File

@@ -0,0 +1 @@
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M17.947 6.906h-6.62V6.24h6.62v.666zM17.947 10.293h-6.62v-.667h6.62v.667z" fill="#616161"/><path d="M16.174 29.403h-2.553V3.442s.238-.553 1.278-.553 1.278.553 1.278.553v25.96h-.003z" fill="#9E9E9E"/><path d="M15.519 2.971v20.974H13.62v1.91c.322.001.613.04.876.097a1.3 1.3 0 011.024 1.269v2.182h.655V3.442c-.002 0-.14-.318-.657-.471z" fill="#757575"/><path d="M22.362 2.738a5.382 5.382 0 00-5.381 5.401 5.365 5.365 0 005.381 5.382 5.378 5.378 0 005.382-5.382c0-2.975-2.406-5.401-5.382-5.401z" fill="#2196F3"/><path d="M24.713 4.749c-.338-.085-1.011-.174-2.349-.174-1.338 0-2.01.087-2.349.174-.2.05-.869.328-.869 1.077v4.618c0 .253.205.46.46.46h.17v.51c0 .15.12.27.268.27h.545c.149 0 .269-.12.269-.27v-.51h3.008v.51c0 .15.12.27.27.27h.544c.148 0 .268-.12.268-.27v-.51h.174a.46.46 0 00.46-.46V5.826c0-.717-.67-1.029-.87-1.077zm-3.802.366c0-.113.09-.204.204-.204h2.494c.113 0 .204.09.204.204v.362a.204.204 0 01-.204.205h-2.494a.204.204 0 01-.204-.205v-.362zm.042 4.864a.139.139 0 01-.138.138h-.549a.469.469 0 01-.468-.469v-.246c0-.076.062-.138.137-.138h.55c.257 0 .468.209.468.469v.246zm3.973-.33a.469.469 0 01-.469.468h-.549a.138.138 0 01-.137-.138v-.246c0-.258.209-.47.468-.47h.55c.075 0 .137.063.137.139v.246zm.125-2.007c0 .297-.645.817-2.69.817-2.046 0-2.688-.482-2.688-.817V6.277c0-.089.089-.31.311-.31h4.791c.222 0 .276.224.276.31v1.365z" fill="#fff"/><path d="M18.86 24.652h-7.926a.506.506 0 01-.506-.507V14.99c0-.28.226-.506.506-.506h7.924c.28 0 .507.226.507.506v9.155c0 .28-.227.507-.504.507z" fill="#F5F5F5"/><path opacity=".8" d="M18.005 23.139h-6.216l-.018-7.235h6.218l.015 7.235z" fill="#82AEC0"/><path fill-rule="evenodd" clip-rule="evenodd" d="M14.66 23.234v-7.33h.444v7.33h-.445z" fill="#F5F5F5"/><path d="M14.658 15.904h-2.886v.604h2.886v-.604z" fill="#616161"/><path fill-rule="evenodd" clip-rule="evenodd" d="M18.03 21.879h-6.263v-.223h6.264v.223zM18.03 20.412h-6.263v-.222h6.264v.222zM17.989 18.946H11.77v-.223h6.218v.223zM17.99 17.482h-6.223v-.223h6.224v.223z" fill="#F5F5F5"/><path d="M17.988 18.534h-2.886v.605h2.886v-.605zM14.672 19.999h-2.878v.604h2.878V20z" fill="#616161"/><path fill-rule="evenodd" clip-rule="evenodd" d="M10.935 14.817a.173.173 0 00-.174.173v9.155c0 .096.078.174.174.174h7.926a.173.173 0 00.171-.174V14.99a.173.173 0 00-.173-.173h-7.924zm-.84.173a.84.84 0 01.84-.84h7.924a.84.84 0 01.84.84v9.155a.84.84 0 01-.838.84h-7.926a.84.84 0 01-.84-.84V14.99z" fill="#9E9E9E"/><path d="M10.968 24.323c-.344.031-.344-.195-.344-.258V15.1c0-.25.204-.455.456-.455h7.693c.208 0 .304.127.262.384 0 0-.027-.164-.227-.164h-7.726a.237.237 0 00-.236.235v8.969c0 .209.122.255.122.255z" fill="#757575"/><path d="M12.268 4.933H4.695v6.577h7.573V4.933z" fill="#FFCA28"/><path d="M11.845 4.933c.233 0 .422.189.422.422v5.733a.422.422 0 01-.422.422H5.119a.422.422 0 01-.423-.422V5.355c0-.233.19-.422.423-.422h6.726zm0-.444H5.119a.868.868 0 00-.867.866v5.733c0 .478.389.867.867.867h6.726a.868.868 0 00.866-.867V5.355a.866.866 0 00-.866-.866z" fill="#9E9E9E"/><path fill-rule="evenodd" clip-rule="evenodd" d="M12.27 7.308H4.696v-.444h7.575v.444zM12.27 9.58H4.696v-.445h7.575v.444z" fill="#FFFDE7"/><path d="M7.066 5.664H5.162v.502h1.904v-.502zM6.598 10.27H5.162v.503h1.436v-.502zM11.648 10.27h-1.435v.503h1.435v-.502zM11.648 5.664h-1.435v.502h1.435v-.502zM10.301 7.968h-.826v.502h.826v-.502zM11.647 7.968h-.827v.502h.827v-.502zM7.802 7.968h-2.64v.502h2.64v-.502z" fill="#757575"/></svg>

After

Width:  |  Height:  |  Size: 3.4 KiB

View File

@@ -43,7 +43,10 @@ export const TraceFilter = Loadable(
);
export const TraceDetail = Loadable(
() => import(/* webpackChunkName: "TraceDetail Page" */ 'pages/TraceDetail'),
() =>
import(
/* webpackChunkName: "TraceDetail Page" */ 'pages/TraceDetailV2/index'
),
);
export const UsageExplorerPage = Loadable(
@@ -229,7 +232,7 @@ export const InstalledIntegrations = Loadable(
),
);
export const MessagingQueues = Loadable(
export const MessagingQueuesMainPage = Loadable(
() =>
import(/* webpackChunkName: "MessagingQueues" */ 'pages/MessagingQueues'),
);
@@ -247,3 +250,17 @@ export const InfrastructureMonitoring = Loadable(
/* webpackChunkName: "InfrastructureMonitoring" */ 'pages/InfrastructureMonitoring'
),
);
export const CeleryTask = Loadable(
() =>
import(
/* webpackChunkName: "CeleryTask" */ 'pages/Celery/CeleryTask/CeleryTask'
),
);
export const CeleryOverview = Loadable(
() =>
import(
/* webpackChunkName: "CeleryOverview" */ 'pages/Celery/CeleryOverview/CeleryOverview'
),
);

View File

@@ -1,4 +1,5 @@
import ROUTES from 'constants/routes';
import MessagingQueues from 'pages/MessagingQueues';
import { RouteProps } from 'react-router-dom';
import {
@@ -27,8 +28,6 @@ import {
LogsExplorer,
LogsIndexToFields,
LogsSaveViews,
MessagingQueues,
MQDetailPage,
MySettings,
NewDashboardPage,
OldLogsExplorer,
@@ -395,17 +394,31 @@ const routes: AppRoutes[] = [
key: 'INTEGRATIONS',
},
{
path: ROUTES.MESSAGING_QUEUES,
path: ROUTES.MESSAGING_QUEUES_KAFKA,
exact: true,
component: MessagingQueues,
key: 'MESSAGING_QUEUES',
key: 'MESSAGING_QUEUES_KAFKA',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_DETAIL,
path: ROUTES.MESSAGING_QUEUES_CELERY_TASK,
exact: true,
component: MQDetailPage,
key: 'MESSAGING_QUEUES_DETAIL',
component: MessagingQueues,
key: 'MESSAGING_QUEUES_CELERY_TASK',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_OVERVIEW,
exact: true,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_OVERVIEW',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_KAFKA_DETAIL,
exact: true,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_KAFKA_DETAIL',
isPrivate: true,
},
{
@@ -447,6 +460,7 @@ export const oldRoutes = [
'/logs-save-views',
'/traces-save-views',
'/settings/access-tokens',
'/messaging-queues',
];
export const oldNewRoutesMapping: Record<string, string> = {
@@ -456,6 +470,7 @@ export const oldNewRoutesMapping: Record<string, string> = {
'/logs-save-views': '/logs/saved-views',
'/traces-save-views': '/traces/saved-views',
'/settings/access-tokens': '/settings/api-keys',
'/messaging-queues': '/messaging-queues/overview',
};
export const ROUTES_NOT_TO_BE_OVERRIDEN: string[] = [

View File

@@ -1,4 +1,4 @@
import { ApiBaseInstance } from 'api';
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
@@ -59,7 +59,7 @@ export const getHostLists = async (
headers?: Record<string, string>,
): Promise<SuccessResponse<HostListResponse> | ErrorResponse> => {
try {
const response = await ApiBaseInstance.post('/hosts/list', props, {
const response = await axios.post('/hosts/list', props, {
signal,
headers,
});

View File

@@ -1,4 +1,4 @@
import { ApiBaseInstance } from 'api';
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import createQueryParams from 'lib/createQueryParams';
@@ -19,7 +19,7 @@ export const getInfraAttributesValues = async ({
SuccessResponse<IAttributeValuesResponse> | ErrorResponse
> => {
try {
const response = await ApiBaseInstance.get(
const response = await axios.get(
`/hosts/attribute_values?${createQueryParams({
dataSource,
attributeKey,

View File

@@ -0,0 +1,64 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export interface K8sClustersListPayload {
filters: TagFilter;
groupBy?: BaseAutocompleteData[];
offset?: number;
limit?: number;
orderBy?: {
columnName: string;
order: 'asc' | 'desc';
};
}
export interface K8sClustersData {
clusterUID: string;
cpuUsage: number;
cpuAllocatable: number;
memoryUsage: number;
memoryAllocatable: number;
meta: {
k8s_cluster_name: string;
k8s_cluster_uid: string;
};
}
export interface K8sClustersListResponse {
status: string;
data: {
type: string;
records: K8sClustersData[];
groups: null;
total: number;
sentAnyHostMetricsData: boolean;
isSendingK8SAgentMetrics: boolean;
};
}
export const getK8sClustersList = async (
props: K8sClustersListPayload,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sClustersListResponse> | ErrorResponse> => {
try {
const response = await axios.post('/clusters/list', props, {
signal,
headers,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
params: props,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -0,0 +1,70 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export interface K8sDaemonSetsListPayload {
filters: TagFilter;
groupBy?: BaseAutocompleteData[];
offset?: number;
limit?: number;
orderBy?: {
columnName: string;
order: 'asc' | 'desc';
};
}
export interface K8sDaemonSetsData {
daemonSetName: string;
cpuUsage: number;
memoryUsage: number;
cpuRequest: number;
memoryRequest: number;
cpuLimit: number;
memoryLimit: number;
restarts: number;
desiredNodes: number;
availableNodes: number;
meta: {
k8s_cluster_name: string;
k8s_daemonset_name: string;
k8s_namespace_name: string;
};
}
export interface K8sDaemonSetsListResponse {
status: string;
data: {
type: string;
records: K8sDaemonSetsData[];
groups: null;
total: number;
sentAnyHostMetricsData: boolean;
isSendingK8SAgentMetrics: boolean;
};
}
export const getK8sDaemonSetsList = async (
props: K8sDaemonSetsListPayload,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sDaemonSetsListResponse> | ErrorResponse> => {
try {
const response = await axios.post('/daemonsets/list', props, {
signal,
headers,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
params: props,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -0,0 +1,70 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export interface K8sDeploymentsListPayload {
filters: TagFilter;
groupBy?: BaseAutocompleteData[];
offset?: number;
limit?: number;
orderBy?: {
columnName: string;
order: 'asc' | 'desc';
};
}
export interface K8sDeploymentsData {
deploymentName: string;
cpuUsage: number;
memoryUsage: number;
desiredPods: number;
availablePods: number;
cpuRequest: number;
memoryRequest: number;
cpuLimit: number;
memoryLimit: number;
restarts: number;
meta: {
k8s_cluster_name: string;
k8s_deployment_name: string;
k8s_namespace_name: string;
};
}
export interface K8sDeploymentsListResponse {
status: string;
data: {
type: string;
records: K8sDeploymentsData[];
groups: null;
total: number;
sentAnyHostMetricsData: boolean;
isSendingK8SAgentMetrics: boolean;
};
}
export const getK8sDeploymentsList = async (
props: K8sDeploymentsListPayload,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sDeploymentsListResponse> | ErrorResponse> => {
try {
const response = await axios.post('/deployments/list', props, {
signal,
headers,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
params: props,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -0,0 +1,72 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export interface K8sJobsListPayload {
filters: TagFilter;
groupBy?: BaseAutocompleteData[];
offset?: number;
limit?: number;
orderBy?: {
columnName: string;
order: 'asc' | 'desc';
};
}
export interface K8sJobsData {
jobName: string;
cpuUsage: number;
memoryUsage: number;
cpuRequest: number;
memoryRequest: number;
cpuLimit: number;
memoryLimit: number;
restarts: number;
desiredSuccessfulPods: number;
activePods: number;
failedPods: number;
successfulPods: number;
meta: {
k8s_cluster_name: string;
k8s_job_name: string;
k8s_namespace_name: string;
};
}
export interface K8sJobsListResponse {
status: string;
data: {
type: string;
records: K8sJobsData[];
groups: null;
total: number;
sentAnyHostMetricsData: boolean;
isSendingK8SAgentMetrics: boolean;
};
}
export const getK8sJobsList = async (
props: K8sJobsListPayload,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sJobsListResponse> | ErrorResponse> => {
try {
const response = await axios.post('/jobs/list', props, {
signal,
headers,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
params: props,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -0,0 +1,62 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export interface K8sNamespacesListPayload {
filters: TagFilter;
groupBy?: BaseAutocompleteData[];
offset?: number;
limit?: number;
orderBy?: {
columnName: string;
order: 'asc' | 'desc';
};
}
export interface K8sNamespacesData {
namespaceName: string;
cpuUsage: number;
memoryUsage: number;
meta: {
k8s_cluster_name: string;
k8s_namespace_name: string;
};
}
export interface K8sNamespacesListResponse {
status: string;
data: {
type: string;
records: K8sNamespacesData[];
groups: null;
total: number;
sentAnyHostMetricsData: boolean;
isSendingK8SAgentMetrics: boolean;
};
}
export const getK8sNamespacesList = async (
props: K8sNamespacesListPayload,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sNamespacesListResponse> | ErrorResponse> => {
try {
const response = await axios.post('/namespaces/list', props, {
signal,
headers,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
params: props,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -1,4 +1,4 @@
import { ApiBaseInstance } from 'api';
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
@@ -47,7 +47,7 @@ export const getK8sNodesList = async (
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sNodesListResponse> | ErrorResponse> => {
try {
const response = await ApiBaseInstance.post('/nodes/list', props, {
const response = await axios.post('/nodes/list', props, {
signal,
headers,
});

View File

@@ -1,4 +1,4 @@
import { ApiBaseInstance } from 'api';
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
@@ -75,7 +75,7 @@ export const getK8sPodsList = async (
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sPodsListResponse> | ErrorResponse> => {
try {
const response = await ApiBaseInstance.post('/pods/list', props, {
const response = await axios.post('/pods/list', props, {
signal,
headers,
});

View File

@@ -0,0 +1,71 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export interface K8sVolumesListPayload {
filters: TagFilter;
groupBy?: BaseAutocompleteData[];
offset?: number;
limit?: number;
orderBy?: {
columnName: string;
order: 'asc' | 'desc';
};
}
export interface K8sVolumesData {
persistentVolumeClaimName: string;
volumeAvailable: number;
volumeCapacity: number;
volumeInodes: number;
volumeInodesFree: number;
volumeInodesUsed: number;
volumeUsage: number;
meta: {
k8s_cluster_name: string;
k8s_namespace_name: string;
k8s_node_name: string;
k8s_persistentvolumeclaim_name: string;
k8s_pod_name: string;
k8s_pod_uid: string;
k8s_statefulset_name: string;
};
}
export interface K8sVolumesListResponse {
status: string;
data: {
type: string;
records: K8sVolumesData[];
groups: null;
total: number;
sentAnyHostMetricsData: boolean;
isSendingK8SAgentMetrics: boolean;
};
}
export const getK8sVolumesList = async (
props: K8sVolumesListPayload,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sVolumesListResponse> | ErrorResponse> => {
try {
const response = await axios.post('/pvcs/list', props, {
signal,
headers,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
params: props,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -0,0 +1,69 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export interface K8sStatefulSetsListPayload {
filters: TagFilter;
groupBy?: BaseAutocompleteData[];
offset?: number;
limit?: number;
orderBy?: {
columnName: string;
order: 'asc' | 'desc';
};
}
export interface K8sStatefulSetsData {
statefulSetName: string;
cpuUsage: number;
memoryUsage: number;
desiredPods: number;
availablePods: number;
cpuRequest: number;
memoryRequest: number;
cpuLimit: number;
memoryLimit: number;
restarts: number;
meta: {
k8s_statefulset_name: string;
k8s_namespace_name: string;
};
}
export interface K8sStatefulSetsListResponse {
status: string;
data: {
type: string;
records: K8sStatefulSetsData[];
groups: null;
total: number;
sentAnyHostMetricsData: boolean;
isSendingK8SAgentMetrics: boolean;
};
}
export const getK8sStatefulSetsList = async (
props: K8sStatefulSetsListPayload,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponse<K8sStatefulSetsListResponse> | ErrorResponse> => {
try {
const response = await axios.post('/statefulsets/list', props, {
signal,
headers,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
params: props,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};

View File

@@ -1,4 +1,4 @@
import axios from 'api';
import { ApiV3Instance as axios } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';

View File

@@ -1,4 +1,4 @@
import { ApiV2Instance as axios } from 'api';
import { ApiV3Instance as axios } from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps } from 'types/api/licenses/getAll';

Some files were not shown because too many files have changed in this diff Show More