Compare commits

...

12 Commits

Author SHA1 Message Date
primus-bot[bot]
de53119257 chore(release): bump to v0.76.0 (#7311)
#### Summary
 - Release SigNoz vv0.76.0

 Created by [Primus-Bot](https://github.com/apps/primus-bot)
2025-03-14 17:41:24 +05:30
Prashant Shahi
eee3f7d549 feat: signoz package and deprecation of frontend, alertmanager (#7301)
### Summary

- signoz package with goreleaser
- frontend deprecation
- alertmanager deprecation

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-03-14 17:23:10 +05:30
Shaheer Kochai
32d144845a fix: update the auto-complete API reading logic (#7254) 2025-03-14 02:50:04 +00:00
Nityananda Gohain
e614d6b0e9 fix: handle 0 step interval (#7307) 2025-03-13 22:20:54 +05:30
Vibhu Pandey
7f71c0ed2d docs(contributing): revamp contributing docs (#7290) 2025-03-13 19:36:55 +05:30
Dāvis
50c1af2da8 fix: include frontend with oss query-service
Co-authored-by: Vibhu Pandey <vibhupandey28@gmail.com>
2025-03-13 18:34:20 +05:30
Nityananda Gohain
b46f0c9a7b fix: use correct created_at and updated_at (#7305) 2025-03-13 09:44:06 +00:00
aniketio-ctrl
9df23bc1ed feat: Add inspect metrics API (#7197)
* feat(inspect): added inspect metric api | 7076

* feat(inspect): added inspect metric api | 7076

* feat(inspect): added inspect metric api | 7076

* feat(inspect): removed underscore label keys

* feat(explorer): updated metadata metrics api| 7076

* feat(explorer): added inspect metrics with resource attribute| 7076

* fix(summary): fixed dashboard name in metric metadata api

* fix(summary): removed offset from second query

* fix(summary): removed offset from second query

* feat(inspect): normalized resource attributes

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-03-13 14:57:27 +05:30
Srikanth Chekuri
0035ae0072 chore: update infra monitoring events (#7268) 2025-03-13 06:04:44 +00:00
Amlan Kumar Nandy
86a888a6a2 chore: metrics explorer improvements (#7285) 2025-03-13 05:13:02 +00:00
Nityananda Gohain
9a3c49bce4 Store complete intervals in cache and update logic for response (#7212)
* fix: new implementation for finding missing timerange

* fix: remove unwanted code

* fix: update if condition

* fix: update logic and the test cases

* fix: correct name

* fix: filter points which are not a complete agg interval

* fix: fix the logic to use the points correctly

* fix: fix overlapping test case

* fix: add comments

* Update pkg/query-service/querycache/query_range_cache.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: use step ms

* fix: use step ms

* fix: tests

* fix: update logic to handle actual empty series

* fix: name updated

* Update pkg/query-service/app/querier/v2/helper.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: address comments

* fix: address comments

* fix: address comments

* Update pkg/query-service/common/query_range.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: add error log

* fix: handle case where end is equal to a complete window end

* fix: added comments

* fix: address comments

* fix: move function to common query range

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-03-13 04:34:06 +00:00
Vibhu Pandey
946a249c85 refactor(e2e): remove e2e package (#7265)
### Summary

remove e2e package

#### Related Issues / PR's

Unused
2025-03-13 01:41:34 +05:30
118 changed files with 3253 additions and 1803 deletions

View File

@@ -0,0 +1,73 @@
services:
clickhouse:
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: clickhouse
volumes:
- ${PWD}/fs/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
- ${PWD}/fs/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
- ${PWD}/fs/tmp/var/lib/clickhouse/:/var/lib/clickhouse/
- ${PWD}/fs/tmp/var/lib/clickhouse/user_scripts/:/var/lib/clickhouse/user_scripts/
ports:
- '127.0.0.1:8123:8123'
- '127.0.0.1:9000:9000'
tty: true
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
depends_on:
- zookeeper
zookeeper:
image: bitnami/zookeeper:3.7.1
container_name: zookeeper
volumes:
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
ports:
- '127.0.0.1:2181:2181'
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
schema-migrator-sync:
image: signoz/signoz-schema-migrator:0.111.29
container_name: schema-migrator-sync
command:
- sync
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
image: signoz/signoz-schema-migrator:0.111.29
container_name: schema-migrator-async
command:
- async
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
restart: on-failure

View File

@@ -0,0 +1,47 @@
<clickhouse replace="true">
<logger>
<level>information</level>
<formatting>
<type>json</type>
</formatting>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>3</count>
</logger>
<display_name>cluster</display_name>
<listen_host>0.0.0.0</listen_host>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<user_directories>
<users_xml>
<path>users.xml</path>
</users_xml>
<local_directory>
<path>/var/lib/clickhouse/access/</path>
</local_directory>
</user_directories>
<distributed_ddl>
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<remote_servers>
<cluster>
<shard>
<replica>
<host>clickhouse</host>
<port>9000</port>
</replica>
</shard>
</cluster>
</remote_servers>
<zookeeper>
<node>
<host>zookeeper</host>
<port>2181</port>
</node>
</zookeeper>
<macros>
<shard>01</shard>
<replica>01</replica>
</macros>
</clickhouse>

View File

@@ -0,0 +1,36 @@
<?xml version="1.0"?>
<clickhouse replace="true">
<profiles>
<default>
<max_memory_usage>10000000000</max_memory_usage>
<use_uncompressed_cache>0</use_uncompressed_cache>
<load_balancing>in_order</load_balancing>
<log_queries>1</log_queries>
</default>
</profiles>
<users>
<default>
<profile>default</profile>
<networks>
<ip>::/0</ip>
</networks>
<quota>default</quota>
<access_management>1</access_management>
<named_collection_control>1</named_collection_control>
<show_named_collections>1</show_named_collections>
<show_named_collections_secrets>1</show_named_collections_secrets>
</default>
</users>
<quotas>
<default>
<interval>
<duration>3600</duration>
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@@ -14,12 +14,12 @@ jobs:
uses: actions/checkout@v4
- name: Install dependencies
run: cd frontend && yarn install
- name: Build frontend docker image
- name: Build frontend static files
shell: bash
run: |
make build-frontend-amd64
make build-frontend-static
build-query-service:
build-signoz:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -27,13 +27,13 @@ jobs:
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build query-service image
go-version: "1.22"
- name: Build signoz image
shell: bash
run: |
make build-query-service-amd64
make build-signoz-amd64
build-ee-query-service:
build-signoz-community:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -41,8 +41,8 @@ jobs:
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build EE query-service image
go-version: "1.22"
- name: Build signoz community image
shell: bash
run: |
make build-ee-query-service-amd64
make build-signoz-community-amd64

View File

@@ -1,9 +1,8 @@
name: goreleaser
name: gor-histogramquantile
on:
push:
tags:
- v*
- histogram-quantile/v*
permissions:

View File

@@ -0,0 +1,155 @@
name: gor-signoz-community
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
permissions:
contents: write
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: build-frontend
run: make build-frontend-static
- name: upload-frontend-artifact
uses: actions/upload-artifact@v4
with:
name: community-frontend-build-${{ env.sha_short }}
path: frontend/build
build:
needs: prepare
strategy:
matrix:
os:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
if: matrix.os == 'ubuntu-latest'
- name: setup-buildx
uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-latest'
- name: ghcr-login
uses: docker/login-action@v3
if: matrix.os != 'macos-latest'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: download-frontend-artifact
uses: actions/download-artifact@v4
with:
name: community-frontend-build-${{ env.sha_short }}
path: frontend/build
- name: cache-linux
uses: actions/cache@v4
if: matrix.os == 'ubuntu-latest'
with:
path: dist/linux
key: signoz-community-linux-${{ env.sha_short }}
- name: cache-darwin
uses: actions/cache@v4
if: matrix.os == 'macos-latest'
with:
path: dist/darwin
key: signoz-community-darwin-${{ env.sha_short }}
- name: release
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --config ${{ env.CONFIG_PATH }} --clean --split
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
release:
runs-on: ubuntu-latest
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
WORKDIR: pkg/query-service
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
- name: cosign-installer
uses: sigstore/cosign-installer@v3.8.1
- name: download-syft
uses: anchore/sbom-action/download-syft@v0.18.0
- name: ghcr-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
# copy the caches from build
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: cache-linux
id: cache-linux
uses: actions/cache@v4
with:
path: dist/linux
key: signoz-community-linux-${{ env.sha_short }}
- name: cache-darwin
id: cache-darwin
uses: actions/cache@v4
with:
path: dist/darwin
key: signoz-community-darwin-${{ env.sha_short }}
# release
- uses: goreleaser/goreleaser-action@v6
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
with:
distribution: goreleaser-pro
version: '~> v2'
args: continue --merge
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

168
.github/workflows/gor-signoz.yaml vendored Normal file
View File

@@ -0,0 +1,168 @@
name: gor-signoz
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
permissions:
contents: write
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: dotenv-frontend
working-directory: frontend
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > .env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> .env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> .env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> .env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> .env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> .env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> .env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> .env
- name: build-frontend
run: make build-frontend-static
- name: upload-frontend-artifact
uses: actions/upload-artifact@v4
with:
name: frontend-build-${{ env.sha_short }}
path: frontend/build
build:
needs: prepare
strategy:
matrix:
os:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: ee/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
if: matrix.os == 'ubuntu-latest'
- name: setup-buildx
uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-latest'
- name: ghcr-login
uses: docker/login-action@v3
if: matrix.os != 'macos-latest'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: download-frontend-artifact
uses: actions/download-artifact@v4
with:
name: frontend-build-${{ env.sha_short }}
path: frontend/build
- name: cache-linux
uses: actions/cache@v4
if: matrix.os == 'ubuntu-latest'
with:
path: dist/linux
key: signoz-linux-${{ env.sha_short }}
- name: cache-darwin
uses: actions/cache@v4
if: matrix.os == 'macos-latest'
with:
path: dist/darwin
key: signoz-darwin-${{ env.sha_short }}
- name: release
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --config ${{ env.CONFIG_PATH }} --clean --split
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
release:
runs-on: ubuntu-latest
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
- name: cosign-installer
uses: sigstore/cosign-installer@v3.8.1
- name: download-syft
uses: anchore/sbom-action/download-syft@v0.18.0
- name: ghcr-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
# copy the caches from build
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: cache-linux
id: cache-linux
uses: actions/cache@v4
with:
path: dist/linux
key: signoz-linux-${{ env.sha_short }}
- name: cache-darwin
id: cache-darwin
uses: actions/cache@v4
with:
path: dist/darwin
key: signoz-darwin-${{ env.sha_short }}
# release
- uses: goreleaser/goreleaser-action@v6
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
with:
distribution: goreleaser-pro
version: '~> v2'
args: continue --merge
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -8,56 +8,27 @@ on:
- v*
jobs:
image-build-and-push-query-service:
image-build-and-push-signoz:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- name: checkout
uses: actions/checkout@v4
- name: Setup golang
- name: setup
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Set up QEMU
go-version: "1.22"
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
- name: setup-buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
- name: docker-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-query-service
image-build-and-push-ee-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
- name: create-env-file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
@@ -70,140 +41,94 @@ jobs:
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
- name: branch-name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
- name: docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
- name: cross-compilation-tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-ee-query-service
image-build-and-push-frontend:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
- name: publish-signoz
run: make build-push-signoz
- name: qs-docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
echo "DOCKER_TAG=${tag}" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend
- name: publish-query-service
run: |
SIGNOZ_DOCKER_IMAGE=query-service make build-push-signoz
image-build-and-push-frontend-ee:
image-build-and-push-signoz-community:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- name: checkout
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
- name: setup-go
uses: actions/setup-go@v4
with:
go-version: "1.22"
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
- name: docker-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
- name: branch-name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
- name: docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: cross-compilation-tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: publish-signoz-community
run: make build-push-signoz-community
- name: qs-docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend
- name: publish-query-service-oss
run: |
SIGNOZ_COMMUNITY_DOCKER_IMAGE=query-service make build-push-signoz-community

View File

@@ -49,8 +49,7 @@ jobs:
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make build-signoz-amd64
make run-testing
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

View File

@@ -49,8 +49,7 @@ jobs:
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}
git checkout --track origin/${GITHUB_BRANCH}
make build-ee-query-service-amd64
make build-frontend-amd64
make build-signoz-amd64
make run-testing
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

6
.gitignore vendored
View File

@@ -77,4 +77,8 @@ dist/
# ignore user_scripts that is fetched by init-clickhouse
deploy/common/clickhouse/user_scripts/
queries.active
queries.active
# .devenv tmp files
.devenv/**/tmp/**

View File

@@ -16,10 +16,8 @@ tasks:
yarn dev
ports:
- port: 3301
onOpen: open-browser
- port: 8080
onOpen: ignore
onOpen: open-browser
- port: 9000
onOpen: ignore
- port: 8123

View File

@@ -1,389 +1,79 @@
# Contributing Guidelines
## Welcome to SigNoz Contributing section 🎉
Thank you for your interest in contributing to our project! We greatly value feedback and contributions from our community. This document will guide you through the contribution process.
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
## How can I contribute?
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
### Finding Issues to Work On
- Check our [existing open issues](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue)
- Look for [good first issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with
- Review [recently closed issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) to avoid duplicates
- We accept contributions made to the [SigNoz `develop` branch]()
- Find all SigNoz Docker Hub images here
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
### Types of Contributions
## Finding contributions to work on 💬
1. **Report Bugs**: Use our [Bug Report template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
2. **Request Features**: Submit using [Feature Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
3. **Improve Documentation**: Create an issue with the `documentation` label
4. **Report Performance Issues**: Use our [Performance Issue template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=)
5. **Request Dashboards**: Submit using [Dashboard Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+)
6. **Report Security Issues**: Follow our [Security Policy](https://github.com/SigNoz/signoz/security/policy)
7. **Join Discussions**: Participate in [project discussions](https://github.com/SigNoz/signoz/discussions)
Looking at the existing issues is a great way to find something to contribute on.
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
### Creating Helpful Issues
When creating issues, include:
## Sections:
- [General Instructions](#1-general-instructions-)
- [For Creating Issue(s)](#11-for-creating-issues)
- [For Pull Requests(s)](#12-for-pull-requests)
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
- [Develop Frontend](#3-develop-frontend-)
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
- [Other Ways to Contribute](#other-ways-to-contribute)
- **For Feature Requests**:
- Clear use case and requirements
- Proposed solution or improvement
- Any open questions or considerations
# 1. General Instructions 📝
- **For Bug Reports**:
- Step-by-step reproduction steps
- Version information
- Relevant environment details
- Any modifications you've made
- Expected vs actual behavior
## 1.1 For Creating Issue(s)
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
### Submitting Pull Requests
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
1. **Development**:
- Setup your [development environment](docs/contributing/development.md)
- Work against the latest `main` branch
- Focus on specific changes
- Ensure all tests pass locally
- Follow our [commit convention](#commit-convention)
#### Details like these are incredibly useful:
2. **Submit PR**:
- Ensure your branch can be auto-merged
- Address any CI failures
- Respond to review comments promptly
- **Requirement** - what kind of use case are you trying to solve?
- **Proposal** - what do you suggest to solve the problem or improve the existing
situation?
- Any open questions to address❓
For substantial changes, please split your contribution into multiple PRs:
#### If you are reporting a bug, details like these are incredibly useful:
1. First PR: Overall structure (README, configurations, interfaces)
2. Second PR: Core implementation (split further if needed)
3. Final PR: Documentation updates and end-to-end tests
- A reproducible test case or series of steps.
- The version of our code being used.
- Any modifications you've made relevant to the bug🐞.
- Anything unusual about your environment or deployment.
### Commit Convention
Discussing your proposed changes ahead of time will make the contribution
process smooth for everyone 🙌.
We follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/). All commits and PRs should include type specifiers (e.g., `feat:`, `fix:`, `docs:`, etc.).
**[`^top^`](#contributing-guidelines)**
<hr>
## How can I contribute to other repositories?
## 1.2 For Pull Request(s)
You can find other repositories in the [SigNoz](https://github.com/SigNoz) organization to contribute to. Here is a list of **highlighted** repositories:
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
Before sending us a pull request, please ensure that,
- [charts](https://github.com/SigNoz/charts)
- [dashboards](https://github.com/SigNoz/dashboards)
- Fork the SigNoz repo on GitHub, clone it on your machine.
- Create a branch with your changes.
- You are working against the latest source on the `develop` branch.
- Modify the source; please focus only on the specific change you are contributing.
- Ensure local tests pass.
- Commit to your fork using clear commit messages.
- Send us a pull request, answering any default questions in the pull request interface.
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
- Once the change has been approved and merged, we will inform you in a comment.
Each repository has its own contributing guidelines. Please refer to the guidelines of the repository you want to contribute to.
## How can I get help?
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
Need assistance? Join our Slack community:
- [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M)
- [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B)
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s):
## Where do I go from here?
* 1⃣ First PR should include the overall structure of the new component:
* Readme, configuration, interfaces or base classes, etc...
* This PR is usually trivial to review, so the size limit does not apply to
it.
* 2⃣ Second PR should include the concrete implementation of the component. If the
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
multiple PRs.
* If there are multiple sub-component then ideally each one should be implemented as
a **separate** pull request.
* Last PR should include changes to **any user-facing documentation.** And should include
end-to-end tests if applicable. The component must be enabled
only after sufficient testing, and there is enough confidence in the
stability and quality of the component.
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack).
### Pointers:
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
<hr>
### Conventions to follow when submitting Commits and Pull Request(s).
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
**[`^top^`](#contributing-guidelines)**
<hr>
# 2. How to Contribute 🙋🏻‍♂️
#### There are primarily 2 areas in which you can contribute to SigNoz
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it.
**[`^top^`](#contributing-guidelines)**
<hr>
# 3. Develop Frontend 🌚
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
## 3.1 Contribute to Frontend with Docker installation of SigNoz
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
![develop-frontend](https://user-images.githubusercontent.com/52788043/179009217-6692616b-17dc-4d27-b587-9d007098d739.jpeg)
- run `cd deploy` to move to deploy directory,
- Install signoz locally **without** the frontend,
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
```
ports:
- "8080:8080"
```
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
- Next run,
```
cd deploy/docker
sudo docker compose up -d
```
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
If you have backend api exposed via frontend nginx:
```
FRONTEND_API_ENDPOINT=http://localhost:3301
```
If not:
```
FRONTEND_API_ENDPOINT=http://localhost:8080
```
- Next,
```
yarn install
yarn dev
```
## 3.2 Contribute to Frontend without installing SigNoz backend
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
- Clone the SigNoz repository and cd into signoz/frontend directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
````
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
- Next,
```
yarn install
yarn dev
```
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
**[`^top^`](#contributing-guidelines)**
<hr>
# 4. Contribute to Backend (Query-Service) 🌑
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
## 4.1 Prerequisites
### 4.1.1 Install SQLite3
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
- If not installed already, Install using below command
- on Linux
- on Debian / Ubuntu
```
sudo apt install sqlite3
```
- on CentOS / Fedora / RedHat
```
sudo yum install sqlite3
```
## 4.2 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- run `sudo make dev-setup` to configure local setup to run query-service,
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
```
ports:
- 9001:9000
```
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
- run `cd pkg/query-service/` to move to `query-service` directory,
- Then, you need to create a `.env` file with the following environment variable
```
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
```
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
#### Run locally,
```
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
```
#### Build and Run locally
```
cd pkg/query-service
go build -o build/query-service main.go
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
```
#### Docker Images
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
```
docker pull signoz/query-service
```
```
docker pull signoz/query-service:latest
```
```
docker pull signoz/query-service:develop
```
### Important Note:
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
Click the button below. A workspace with all required environments will be created.
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/SigNoz/signoz)
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
**[`^top^`](#contributing-guidelines)**
<hr>
# 5. Contribute to SigNoz Helm Chart 📊
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
## 5.1 To run helm chart for local development
- Clone the SigNoz repository and cd into charts directory,
```
git clone https://github.com/SigNoz/charts.git && cd charts
```
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [k3d](https://k3d.io/#installation)
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
- next run,
```
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
```
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
**5.1.2 To load data with the HotROD sample app:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
```
**5.1.3 To stop the load generation:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```
**[`^top^`](#contributing-guidelines)**
---
# 6. Contribute to Dashboards 📈
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
1. Create a dashboard JSON file.
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
3. Include screenshots of the dashboard in the `assets/` directory.
4. Submit a pull request for review.
## Other Ways to Contribute
There are many other ways to get involved with the community and to participate in this project:
- Use the product, submitting GitHub issues when a problem is found.
- Help code review pull requests and participate in issue threads.
- Submit a new feature request as an issue.
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
- Tell others about the project on Twitter, your blog, etc.
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
Thank You!
- Set up your [development environment](docs/contributing/development.md)

209
Makefile
View File

@@ -7,8 +7,10 @@ BUILD_VERSION ?= $(shell git describe --always --tags)
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
LICENSE_SIGNOZ_IO ?= https://license.signoz.io/api/v1
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
ZEUS_URL ?= https://api.signoz.cloud
DEV_ZEUS_URL ?= https://api.staging.signoz.cloud
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
# Internal variables or constants.
@@ -18,15 +20,16 @@ EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker
SWARM_DIRECTORY ?= deploy/docker-swarm
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
GORELEASER_BIN ?= goreleaser
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
GOPATH ?= $(shell go env GOPATH)
REPONAME ?= signoz
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
DOCKER_TAG ?= $(BUILD_VERSION)
SIGNOZ_DOCKER_IMAGE ?= signoz
SIGNOZ_COMMUNITY_DOCKER_IMAGE ?= signoz-community
# Build-time Go variables
PACKAGE?=go.signoz.io/signoz
@@ -37,10 +40,46 @@ gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
PROD_LD_FLAGS=-X ${zeusURL}=${ZEUS_URL} -X ${licenseSignozIo}=${LICENSE_SIGNOZ_IO}
DEV_LD_FLAGS=-X ${zeusURL}=${DEV_ZEUS_URL} -X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
all: build-push-frontend build-push-query-service
##############################################################
# common commands
##############################################################
.PHONY: help
help: ## Displays help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n\nTargets:\n"} /^[a-z0-9A-Z_-]+:.*?##/ { printf " \033[36m%-40s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST)
##############################################################
# devenv commands
##############################################################
.PHONY: devenv-clickhouse
devenv-clickhouse: ## Run clickhouse in devenv
@cd .devenv/docker/clickhouse; \
docker compose -f compose.yaml up -d
##############################################################
# run commands
##############################################################
.PHONY: run-go
run-go: ## Runs the go backend server
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
SIGNOZ_WEB_ENABLED=false \
SIGNOZ_JWT_SECRET=secret \
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \
./ee/query-service/main.go \
--config ./pkg/query-service/config/prometheus.yml \
--cluster cluster \
--use-logs-new-schema true \
--use-trace-new-schema true
all: build-push-frontend build-push-signoz
# Steps to build static files of frontend
build-frontend-static:
@@ -53,95 +92,67 @@ build-frontend-static:
yarn build && \
ls -l build
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline)
build-frontend-amd64: build-frontend-static
# Steps to build static binary of signoz
.PHONY: build-signoz-static
build-signoz-static:
@echo "------------------"
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend: build-frontend-static
@echo "------------------"
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build static binary of query service
.PHONY: build-query-service-static
build-query-service-static:
@echo "------------------"
@echo "--> Building query-service static binary"
@echo "--> Building signoz static binary"
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${PROD_LD_FLAGS}"; \
fi
.PHONY: build-query-service-static-amd64
build-query-service-static-amd64:
make GOARCH=amd64 build-query-service-static
.PHONY: build-signoz-static-amd64
build-signoz-static-amd64:
make GOARCH=amd64 build-signoz-static
.PHONY: build-query-service-static-arm64
build-query-service-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
.PHONY: build-signoz-static-arm64
build-signoz-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-signoz-static
# Steps to build static binary of query service for all platforms
.PHONY: build-query-service-static-all
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
# Steps to build static binary of signoz for all platforms
.PHONY: build-signoz-static-all
build-signoz-static-all: build-signoz-static-amd64 build-signoz-static-arm64 build-frontend-static
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
# Steps to build and push docker image of signoz
.PHONY: build-signoz-amd64 build-push-signoz
# Step to build docker image of signoz in amd64 (used in build pipeline)
build-signoz-amd64: build-signoz-static-amd64 build-frontend-static
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "--> Building signoz docker image for amd64"
@echo "------------------"
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
@docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service: build-query-service-static-all
build-push-signoz: build-signoz-static-all
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "--> Building and pushing signoz docker image"
@echo "------------------"
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
--push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) .
# Step to build EE docker image of query service in amd64 (used in build pipeline)
build-ee-query-service-amd64:
# Step to build docker image of signoz community in amd64 (used in build pipeline)
build-signoz-community-amd64:
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "--> Building signoz docker image for amd64"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-signoz-amd64
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
build-push-ee-query-service:
# Step to build and push docker image of signoz community in amd64 and arm64 (used in push pipeline)
build-push-signoz-community:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
dev-setup:
mkdir -p /var/lib/signoz
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
mkdir -p pkg/query-service/config/dashboards
@echo "------------------"
@echo "--> Local Setup completed"
@echo "--> Building and pushing signoz community docker image"
@echo "------------------"
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-push-signoz
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
@@ -155,22 +166,6 @@ run-testing:
down-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-standalone-ch:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
clear-swarm-ch:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
check-no-ee-references:
@echo "Checking for 'ee' package references in 'pkg' directory..."
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
@@ -183,14 +178,42 @@ check-no-ee-references:
test:
go test ./pkg/...
goreleaser-snapshot:
########################################################
# Goreleaser
########################################################
.PHONY: gor-snapshot gor-snapshot-histogram-quantile gor-snapshot-signoz gor-snapshot-signoz-community gor-split gor-split-histogram-quantile gor-split-signoz gor-split-signoz-community gor-merge
gor-snapshot:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
cd ${GORELEASER_WORKDIR} && \
goreleaser release --clean --snapshot; \
cd -; \
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --snapshot; \
else \
goreleaser release --clean --snapshot; \
${GORELEASER_BIN} release --clean --snapshot; \
fi
goreleaser-snapshot-histogram-quantile:
gor-snapshot-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot
gor-snapshot-signoz: build-frontend-static
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
gor-snapshot-signoz-community: build-frontend-static
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
gor-split:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --split; \
else \
${GORELEASER_BIN} release --clean --split; \
fi
gor-split-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-split
gor-split-signoz: build-frontend-static
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-split
gor-split-signoz-community: build-frontend-static
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-split
gor-merge:
${GORELEASER_BIN} continue --merge

4
conf/cache-config.yml Normal file
View File

@@ -0,0 +1,4 @@
provider: "inmemory"
inmemory:
ttl: 60m
cleanupInterval: 10m

25
conf/prometheus.yml Normal file
View File

@@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- 127.0.0.1:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: []
remote_read:
- url: tcp://localhost:9000/signoz_metrics

View File

@@ -26,7 +26,7 @@ cd deploy/docker
docker compose up -d
```
Open http://localhost:3301 in your favourite browser.
Open http://localhost:8080 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:
@@ -55,7 +55,7 @@ cd deploy/docker-swarm
docker stack deploy -c docker-compose.yaml signoz
```
Open http://localhost:3301 in your favourite browser.
Open http://localhost:8080 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:

View File

@@ -1,64 +0,0 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
proxy_pass http://query-service:8080;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
}
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
location /ws {
proxy_pass http://query-service:8080/ws;
proxy_http_version 1.1;
proxy_set_header Upgrade "websocket";
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -1 +1 @@
server_endpoint: ws://query-service:4320/v1/opamp
server_endpoint: ws://signoz:4320/v1/opamp

View File

@@ -172,19 +172,9 @@ services:
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- ./clickhouse-setup/data/alertmanager:/data
depends_on:
- query-service
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:0.75.0
image: signoz/signoz:v0.76.0
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
@@ -197,14 +187,15 @@ services:
- ../common/dashboards:/root/config/dashboards
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- SIGNOZ_JWT_SECRET=secret
healthcheck:
test:
- CMD
@@ -215,19 +206,9 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.75.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.29
image: signoz/signoz-otel-collector:0.111.30
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -248,10 +229,10 @@ services:
depends_on:
- clickhouse
- schema-migrator
- query-service
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.29
image: signoz/signoz-schema-migrator:0.111.30
deploy:
restart_policy:
condition: on-failure
@@ -266,8 +247,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:

View File

@@ -108,19 +108,9 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
- query-service
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:0.75.0
image: signoz/signoz:v0.76.0
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
@@ -133,8 +123,8 @@ services:
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -151,19 +141,9 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.75.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.29
image: signoz/signoz-otel-collector:0.111.30
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -184,10 +164,10 @@ services:
depends_on:
- clickhouse
- schema-migrator
- query-service
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.29
image: signoz/signoz-schema-migrator:0.111.30
deploy:
restart_policy:
condition: on-failure
@@ -202,8 +182,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -42,7 +42,7 @@ receivers:
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
expr: 'attributes.container_name matches "^(signoz_(logspout|signoz|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
processors:
batch:
send_batch_size: 10000

View File

@@ -175,36 +175,24 @@ services:
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse-3:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
container_name: signoz-query-service
image: signoz/signoz:${DOCKER_TAG:-v0.76.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "3301:8080" # signoz port
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -221,21 +209,10 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.30}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -257,11 +234,11 @@ services:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
query-service:
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
container_name: schema-migrator-sync
command:
- sync
@@ -272,7 +249,7 @@ services:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
container_name: schema-migrator-async
command:
- async
@@ -283,8 +260,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:

View File

@@ -108,22 +108,10 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
container_name: signoz-query-service
image: signoz/signoz:${DOCKER_TAG:-v0.76.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --gateway-url=https://api.staging.signoz.cloud
@@ -137,8 +125,8 @@ services:
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -156,20 +144,9 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.30}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -187,11 +164,11 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
container_name: schema-migrator-sync
command:
- sync
@@ -203,7 +180,7 @@ services:
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
container_name: schema-migrator-async
command:
- async
@@ -214,8 +191,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -108,36 +108,24 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
container_name: signoz-query-service
image: signoz/signoz:${DOCKER_TAG:-v0.76.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "3301:8080" # signoz port
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -154,20 +142,9 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.30}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -185,11 +162,11 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
container_name: schema-migrator-sync
command:
- sync
@@ -201,7 +178,7 @@ services:
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
container_name: schema-migrator-async
command:
- async
@@ -212,8 +189,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -79,7 +79,7 @@ receivers:
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
expr: 'attributes.container_name matches "^signoz|(signoz-(|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
processors:
batch:
send_batch_size: 10000

View File

@@ -127,7 +127,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="3301|4317"
local ports_pattern="8080|4317"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@@ -144,7 +144,7 @@ check_ports_occupied() {
send_event "port_not_available"
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "SigNoz requires ports 8080 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/"
echo "++++++++++++++++++++++++++++++++++++++++"
echo ""
@@ -248,7 +248,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:8080/api/v1/health?live=1" || true)"
if [[ status_code -eq 200 ]]; then
break
else
@@ -484,7 +484,7 @@ pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
# check for open ports, if signoz is not installed
if is_command_present docker-compose; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz" | grep -q "healthy" > /dev/null 2>&1; then
echo "SigNoz already installed, skipping the occupied ports check"
else
check_ports_occupied
@@ -533,7 +533,7 @@ else
echo ""
echo "🟢 Your installation is complete!"
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo -e "🟢 SigNoz is running on http://localhost:8080"
echo ""
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"

View File

@@ -0,0 +1,95 @@
# Development Guide
Welcome! This guide will help you set up your local development environment for SigNoz. Let's get you started! 🚀
## What do I need?
Before diving in, make sure you have these tools installed:
- **Git** - Our version control system
- Download from [git-scm.com](https://git-scm.com/)
- **Go** - Powers our backend
- Download from [go.dev/dl](https://go.dev/dl/)
- Check [go.mod](../../go.mod#L3) for the minimum version
- **GCC** - Required for CGO dependencies
- Download from [gcc.gnu.org](https://gcc.gnu.org/)
- **Node** - Powers our frontend
- Download from [nodejs.org](https://nodejs.org)
- Check [.nvmrc](../../frontend/.nvmrc) for the version
- **Yarn** - Our frontend package manager
- Follow the [installation guide](https://yarnpkg.com/getting-started/install)
- **Docker** - For running Clickhouse and Postgres locally
- Get it from [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)
> 💡 **Tip**: Run `make help` to see all available commands with descriptions
## How do I get the code?
1. Open your terminal
2. Clone the repository:
```bash
git clone https://github.com/SigNoz/signoz.git
```
3. Navigate to the project:
```bash
cd signoz
```
## How do I run it locally?
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
### 1. Setting up Clickhouse
First, we need to get Clickhouse running:
```bash
make devenv-clickhouse
```
This command:
- Starts Clickhouse in a single-shard, single-replica cluster
- Sets up Zookeeper
- Runs the latest schema migrations
### 2. Starting the Backend
1. Run the backend server:
```bash
make run-go
```
2. Verify it's working:
```bash
curl http://localhost:8080/api/v1/health
```
You should see: `{"status":"ok"}`
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
### 3. Setting up the Frontend
1. Install dependencies:
```bash
yarn install
```
2. Create a `.env` file in the `frontend` directory:
```env
FRONTEND_API_ENDPOINT=http://localhost:8080
```
3. Start the development server:
```bash
yarn dev
```
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
Now you're all set to start developing! Happy coding! 🎉

View File

@@ -1,14 +0,0 @@
{
"name": "e2e",
"version": "1.0.0",
"main": "index.js",
"license": "MIT",
"devDependencies": {
"@playwright/test": "^1.22.0",
"@types/node": "^20.9.2"
},
"scripts": {},
"dependencies": {
"dotenv": "8.2.0"
}
}

View File

@@ -1,46 +0,0 @@
import { defineConfig, devices } from "@playwright/test";
import dotenv from "dotenv";
dotenv.config();
export default defineConfig({
testDir: "./tests",
fullyParallel: true,
forbidOnly: !!process.env.CI,
name: "Signoz E2E",
retries: process.env.CI ? 2 : 0,
reporter: process.env.CI ? "github" : "list",
preserveOutput: "always",
updateSnapshots: "all",
quiet: false,
testMatch: ["**/*.spec.ts"],
use: {
trace: "on-first-retry",
baseURL:
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
},
projects: [
{ name: "setup", testMatch: /.*\.setup\.ts/ },
{
name: "chromium",
use: {
...devices["Desktop Chrome"],
// Use prepared auth state.
storageState: ".auth/user.json",
},
dependencies: ["setup"],
},
],
});

View File

@@ -1,37 +0,0 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import dotenv from "dotenv";
dotenv.config();
const authFile = ".auth/user.json";
test("E2E Login Test", async ({ page }) => {
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
const signup = "Monitor your applications. Find what is causing issues.";
const el = await page.locator(`text=${signup}`);
expect(el).toBeVisible();
await page
.locator("id=loginEmail")
.type(
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
);
await page.getByText("Next").click();
await page
.locator('input[id="currentPassword"]')
.fill(
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
);
await page.locator('button[data-attr="signup"]').click();
await expect(page).toHaveURL(ROUTES.APPLICATION);
await page.context().storageState({ path: authFile });
});

View File

@@ -1,10 +0,0 @@
export const SERVICE_TABLE_HEADERS = {
APPLICATION: "Applicaton",
P99LATENCY: "P99 latency (in ms)",
ERROR_RATE: "Error Rate (% of total)",
OPS_PER_SECOND: "Operations Per Second",
};
export const DATA_TEST_IDS = {
NEW_DASHBOARD_BTN: "create-new-dashboard",
};

View File

@@ -1,40 +0,0 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
test("Basic Navigation Check across different resources", async ({ page }) => {
// route to services page and check if the page renders fine with BE contract
await Promise.all([
page.goto(ROUTES.APPLICATION),
page.waitForRequest("**/v1/services"),
]);
const p99Latency = page.locator(
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
);
await expect(p99Latency).toBeVisible();
// route to the new trace explorer page and check if the page renders fine
await page.goto(ROUTES.TRACES_EXPLORER);
await page.waitForLoadState("networkidle");
const listViewTable = await page
.locator('div[role="presentation"]')
.isVisible();
expect(listViewTable).toBeTruthy();
// route to the dashboards page and check if the page renders fine
await Promise.all([
page.goto(ROUTES.ALL_DASHBOARD),
page.waitForRequest("**/v1/dashboards"),
]);
const newDashboardBtn = await page
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
.isVisible();
expect(newDashboardBtn).toBeTruthy();
});

View File

@@ -1,46 +0,0 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@playwright/test@^1.22.0":
version "1.40.0"
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
dependencies:
playwright "1.40.0"
"@types/node@^20.9.2":
version "20.9.2"
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
dependencies:
undici-types "~5.26.4"
dotenv@8.2.0:
version "8.2.0"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
fsevents@2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
playwright-core@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
playwright@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
dependencies:
playwright-core "1.40.0"
optionalDependencies:
fsevents "2.3.2"
undici-types@~5.26.4:
version "5.26.5"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==

View File

@@ -0,0 +1,70 @@
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
version: 2
project_name: signoz
before:
hooks:
- go mod tidy
builds:
- id: signoz
binary: bin/signoz
main: ee/query-service/main.go
env:
- CGO_ENABLED=1
- >-
{{- if eq .Os "linux" }}
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
{{- end }}
goos:
- linux
- darwin
goarch:
- amd64
- arm64
goamd64:
- v1
goarm64:
- v8.0
ldflags:
- -s -w
- -X github.com/SigNoz/signoz/pkg/query-service/version.version={{ .Version }}
- -X main.commit={{ .Commit }} -X main.date={{ .CommitDate }}
- -X main.builtBy=goreleaser
- -X go.signoz.io/signoz/pkg/query-service/version.buildVersion={{ .Version }}
- -X go.signoz.io/signoz/pkg/query-service/version.buildHash={{ .ShortCommit }}
- -X go.signoz.io/signoz/pkg/query-service/version.buildTime={{ .Date }}
- -X go.signoz.io/signoz/pkg/query-service/version.gitBranch={{ .Branch }}
- -X go.signoz.io/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
- -X go.signoz.io/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
- >-
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
mod_timestamp: "{{ .CommitTimestamp }}"
tags:
- timetzdata
archives:
- formats:
- tar.gz
name_template: >-
{{ .ProjectName }}_{{- .Os }}_{{- .Arch }}
wrap_in_directory: true
strip_binary_directory: false
files:
- src: README.md
dst: README.md
- src: LICENSE
dst: LICENSE
- src: frontend/build
dst: web
- src: conf
dst: conf
- src: templates
dst: templates
release:
name_template: "v{{ .Version }}"
draft: false
prerelease: auto

View File

@@ -13,21 +13,21 @@ RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
# set working directory
WORKDIR /root
# copy the query-service binary
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
# copy the signoz binary
COPY ee/query-service/bin/signoz-${TARGETOS}-${TARGETARCH} /root/signoz
# copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service
# Make signoz executable for non-root users
RUN chmod 755 /root /root/signoz
# Copy frontend
COPY frontend/build/ /etc/signoz/web/
# run the binary
ENTRYPOINT ["./query-service"]
ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]

View File

@@ -5,7 +5,7 @@ import (
)
const (
DefaultSiteURL = "https://localhost:3301"
DefaultSiteURL = "https://localhost:8080"
)
var LicenseSignozIo = "https://license.signoz.io/api/v1"

View File

@@ -1,3 +0,0 @@
node_modules
.vscode
.git

View File

@@ -1,18 +0,0 @@
FROM nginx:1.26-alpine
# Add Maintainer Info
LABEL maintainer="signoz"
# Set working directory
WORKDIR /frontend
# Remove default nginx index page
RUN rm -rf /usr/share/nginx/html/*
# Copy custom nginx config and static files
COPY conf/default.conf /etc/nginx/conf.d/default.conf
COPY build /usr/share/nginx/html
EXPOSE 3301
ENTRYPOINT ["nginx", "-g", "daemon off;"]

View File

@@ -1,33 +0,0 @@
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location = /api {
proxy_pass http://signoz-query-service:8080/api;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -1,7 +0,0 @@
version: "3.9"
services:
web:
build: .
image: signoz/frontend:latest
ports:
- "3301:3301"

View File

@@ -1,7 +1,7 @@
NODE_ENV="development"
BUNDLE_ANALYSER="true"
FRONTEND_API_ENDPOINT="http://localhost:3301/"
FRONTEND_API_ENDPOINT="http://localhost:8080/"
INTERCOM_APP_ID="intercom-app-id"
PLAYWRIGHT_TEST_BASE_URL="http://localhost:3301"
PLAYWRIGHT_TEST_BASE_URL="http://localhost:8080"
CI="1"

View File

@@ -16,15 +16,21 @@ export interface MetricDetails {
timeSeriesActive: number;
lastReceived: string;
attributes: MetricDetailsAttribute[];
metadata: {
metadata?: {
metric_type: MetricType;
description: string;
unit: string;
temporality: Temporality;
};
alerts: MetricDetailsAlert[] | null;
dashboards: MetricDetailsDashboard[] | null;
}
export enum Temporality {
CUMULATIVE = 'cumulative',
DELTA = 'delta',
}
export interface MetricDetailsAttribute {
key: string;
value: string[];

View File

@@ -1,12 +1,15 @@
import axios from 'api';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { Temporality } from './getMetricDetails';
import { MetricType } from './getMetricsList';
export interface UpdateMetricMetadataProps {
description: string;
unit: string;
type: MetricType;
metricType: MetricType;
temporality: Temporality;
isMonotonic?: boolean;
}
export interface UpdateMetricMetadataResponse {

View File

@@ -1,6 +1,7 @@
/* eslint-disable react-hooks/exhaustive-deps */
import { DefaultOptionType } from 'antd/es/select';
import { getAttributesValues } from 'api/queryBuilder/getAttributesValues';
import { DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY } from 'constants/queryBuilder';
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
import { useQuery } from 'react-query';
import { useSelector } from 'react-redux';
@@ -67,9 +68,13 @@ export function useGetAllFilters(props: Filters): GetAllFiltersResponse {
const uniqueValues = [
...new Set(
responses.flatMap(
({ payload }) => Object.values(payload || {}).find((el) => !!el) || [],
),
responses.flatMap(({ payload }) => {
if (!payload) return [];
const dataType = filterAttributeKeyDataType || DataTypes.String;
const key = DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY[dataType];
return key ? payload[key] || [] : [];
}),
),
];

View File

@@ -12,6 +12,7 @@ import {
} from 'antd';
import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -56,7 +57,6 @@ import HostMetricLogsDetailedView from './HostMetricsLogs/HostMetricLogsDetailed
import HostMetricTraces from './HostMetricTraces/HostMetricTraces';
import Metrics from './Metrics/Metrics';
import Processes from './Processes/Processes';
// eslint-disable-next-line sonarjs/cognitive-complexity
function HostMetricsDetails({
host,
@@ -120,11 +120,14 @@ function HostMetricsDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: Hosts list details page visited', {
host: host?.hostName,
});
if (host) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.HostEntity,
page: InfraMonitoringEvents.DetailedPage,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [host]);
useEffect(() => {
setLogFilters(initialFilters);
@@ -166,9 +169,10 @@ function HostMetricsDetails({
});
}
logEvent('Infra Monitoring: Hosts list details time updated', {
host: host?.hostName,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.HostEntity,
interval,
page: InfraMonitoringEvents.DetailedPage,
});
},
// eslint-disable-next-line react-hooks/exhaustive-deps
@@ -186,9 +190,13 @@ function HostMetricsDetails({
(item) => item.key?.key !== 'id' && item.key?.key !== 'host.name',
);
logEvent('Infra Monitoring: Hosts list details logs filters applied', {
host: host?.hostName,
});
if (newFilters.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.HostEntity,
view: InfraMonitoringEvents.LogsView,
page: InfraMonitoringEvents.DetailedPage,
});
}
return {
op: 'AND',
@@ -211,9 +219,13 @@ function HostMetricsDetails({
(item) => item.key?.key === 'host.name',
);
logEvent('Infra Monitoring: Hosts list details traces filters applied', {
host: host?.hostName,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.HostEntity,
view: InfraMonitoringEvents.TracesView,
page: InfraMonitoringEvents.DetailedPage,
});
}
return {
op: 'AND',
@@ -237,9 +249,10 @@ function HostMetricsDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: Hosts list details explore clicked', {
host: host?.hostName,
logEvent(InfraMonitoringEvents.ExploreClicked, {
view: selectedView,
entity: InfraMonitoringEvents.HostEntity,
page: InfraMonitoringEvents.DetailedPage,
});
if (selectedView === VIEW_TYPES.LOGS) {

View File

@@ -10,7 +10,10 @@ import {
IQuickFiltersConfig,
QuickFiltersSource,
} from 'components/QuickFilters/types';
import { OPERATORS } from 'constants/queryBuilder';
import {
DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY,
OPERATORS,
} from 'constants/queryBuilder';
import { DEBOUNCE_DELAY } from 'constants/queryBuilderFilterConfig';
import { getOperatorValue } from 'container/QueryBuilder/filters/QueryBuilderSearch/utils';
import { useGetAggregateValues } from 'hooks/queryBuilder/useGetAggregateValues';
@@ -76,12 +79,12 @@ export default function CheckboxFilter(props: ICheckboxProps): JSX.Element {
},
);
const attributeValues: string[] = useMemo(
() =>
((Object.values(data?.payload || {}).find((el) => !!el) ||
[]) as string[]).filter((val) => !isEmpty(val)),
[data?.payload],
);
const attributeValues: string[] = useMemo(() => {
const dataType = filter.attributeKey.dataType || DataTypes.String;
const key = DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY[dataType];
return (data?.payload?.[key] || []).filter((val) => !isEmpty(val));
}, [data?.payload, filter.attributeKey.dataType]);
const currentAttributeKeys = attributeValues.slice(0, visibleItemsCount);
const setSearchTextDebounced = useDebouncedFn((...args) => {

View File

@@ -4,3 +4,34 @@ export enum Events {
TABLE_COLUMNS_DATA = 'TABLE_COLUMNS_DATA',
SLOW_API_WARNING = 'SLOW_API_WARNING',
}
export enum InfraMonitoringEvents {
PageVisited = 'Infra Monitoring: page visited',
PageNumberChanged = 'Infra Monitoring: page number changed',
GroupByChanged = 'Infra Monitoring: group by changed',
FilterApplied = 'Infra Monitoring: filter applied',
ItemClicked = 'Infra Monitoring: item clicked',
TabChanged = 'Infra Monitoring: tab changed',
TimeUpdated = 'Infra Monitoring: time updated',
ExploreClicked = 'Infra Monitoring: explore clicked',
HostEntity = 'host',
K8sEntity = 'k8s',
ListPage = 'list',
DetailedPage = 'detailed',
LogsView = 'logs',
TracesView = 'traces',
EventsView = 'events',
QuickFiltersView = 'quick filters',
MetricsView = 'metrics',
Total = 'total',
Cluster = 'cluster',
DaemonSet = 'daemonSet',
Deployment = 'deployment',
Job = 'job',
Namespace = 'namespace',
Node = 'node',
Volume = 'volume',
Pod = 'pod',
StatefulSet = 'statefulSet',
Volumes = 'volumes',
}

View File

@@ -26,4 +26,5 @@ export enum LOCALSTORAGE {
UNAUTHENTICATED_ROUTE_HIT = 'UNAUTHENTICATED_ROUTE_HIT',
CELERY_OVERVIEW_COLUMNS = 'CELERY_OVERVIEW_COLUMNS',
DONT_SHOW_SLOW_API_WARNING = 'DONT_SHOW_SLOW_API_WARNING',
METRICS_LIST_OPTIONS = 'METRICS_LIST_OPTIONS',
}

View File

@@ -1,6 +1,7 @@
// ** Helpers
import { createIdFromObjectFields } from 'lib/createIdFromObjectFields';
import { createNewBuilderItemName } from 'lib/newQueryBuilder/createNewBuilderItemName';
import { IAttributeValuesResponse } from 'types/api/queryBuilder/getAttributesValues';
import {
AutocompleteType,
BaseAutocompleteData,
@@ -417,3 +418,18 @@ export enum PanelDisplay {
PIE = 'Pie',
HISTOGRAM = 'Histogram',
}
export const DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY: Record<
DataTypes,
keyof IAttributeValuesResponse
> = {
[DataTypes.String]: 'stringAttributeValues',
[DataTypes.Float64]: 'numberAttributeValues',
[DataTypes.Int64]: 'numberAttributeValues',
[DataTypes.bool]: 'boolAttributeValues',
[DataTypes.ArrayFloat64]: 'numberAttributeValues',
[DataTypes.ArrayInt64]: 'numberAttributeValues',
[DataTypes.ArrayString]: 'stringAttributeValues',
[DataTypes.ArrayBool]: 'boolAttributeValues',
[DataTypes.EMPTY]: 'stringAttributeValues',
};

View File

@@ -22,7 +22,7 @@ function TopContributorsCard({
const viewAllTopContributorsParam = searchParams.get('viewAllTopContributors');
const [isViewAllVisible, setIsViewAllVisible] = useState(
!!viewAllTopContributorsParam ?? false,
!!viewAllTopContributorsParam,
);
const isDarkMode = useIsDarkMode();

View File

@@ -28,7 +28,7 @@ export default function DashboardEmptyState(): JSX.Element {
}
const userRole: ROLES | null =
selectedDashboard?.created_by === user?.email
selectedDashboard?.createdBy === user?.email
? (USER_ROLES.AUTHOR as ROLES)
: user.role;

View File

@@ -113,7 +113,7 @@ function GraphLayout(props: GraphLayoutProps): JSX.Element {
}
const userRole: ROLES | null =
selectedDashboard?.created_by === user?.email
selectedDashboard?.createdBy === user?.email
? (USER_ROLES.AUTHOR as ROLES)
: user.role;

View File

@@ -44,7 +44,7 @@ export function WidgetRowHeader(props: WidgetRowHeaderProps): JSX.Element {
const { user } = useAppContext();
const userRole: ROLES | null =
selectedDashboard?.created_by === user?.email
selectedDashboard?.createdBy === user?.email
? (USER_ROLES.AUTHOR as ROLES)
: user.role;
const [addPanelPermission] = useComponentPermission(permissions, userRole);

View File

@@ -7,6 +7,7 @@ import { HostListPayload } from 'api/infraMonitoring/getHostLists';
import HostMetricDetail from 'components/HostMetricsDetail';
import QuickFilters from 'components/QuickFilters/QuickFilters';
import { QuickFiltersSource } from 'components/QuickFilters/types';
import { InfraMonitoringEvents } from 'constants/events';
import { usePageSize } from 'container/InfraMonitoringK8s/utils';
import { useGetHostList } from 'hooks/infraMonitoring/useGetHostList';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -21,7 +22,6 @@ import { GlobalReducer } from 'types/reducer/globalTime';
import HostsListControls from './HostsListControls';
import HostsListTable from './HostsListTable';
import { getHostListsQuery, HostsQuickFiltersConfig } from './utils';
// eslint-disable-next-line sonarjs/cognitive-complexity
function HostsList(): JSX.Element {
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
@@ -85,9 +85,12 @@ function HostsList(): JSX.Element {
if (isNewFilterAdded) {
setCurrentPage(1);
logEvent('Infra Monitoring: Hosts list filters applied', {
filters: value,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.HostEntity,
page: InfraMonitoringEvents.ListPage,
});
}
}
},
// eslint-disable-next-line react-hooks/exhaustive-deps
@@ -95,8 +98,12 @@ function HostsList(): JSX.Element {
);
useEffect(() => {
logEvent('Infra Monitoring: Hosts list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
total: data?.payload?.data?.total,
entity: InfraMonitoringEvents.HostEntity,
page: InfraMonitoringEvents.ListPage,
});
}, [data?.payload?.data?.total]);
const selectedHostData = useMemo(() => {
if (!selectedHostName) return null;

View File

@@ -9,6 +9,7 @@ import {
} from 'antd';
import { SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { InfraMonitoringEvents } from 'constants/events';
import { useCallback, useMemo } from 'react';
import HostsEmptyOrIncorrectMetrics from './HostsEmptyOrIncorrectMetrics';
@@ -77,8 +78,9 @@ export default function HostsListTable({
const handleRowClick = (record: HostRowData): void => {
setSelectedHostName(record.hostName);
logEvent('Infra Monitoring: Hosts list item clicked', {
host: record.hostName,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.HostEntity,
page: InfraMonitoringEvents.ListPage,
});
};

View File

@@ -7,6 +7,7 @@ import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { K8sClustersData } from 'api/infraMonitoring/getK8sClustersList';
import { VIEW_TYPES, VIEWS } from 'components/HostMetricsDetail/constants';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -150,11 +151,15 @@ function ClusterDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: Clusters list details page visited', {
cluster: cluster?.clusterUID,
});
if (cluster) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Cluster,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [cluster]);
useEffect(() => {
setLogsAndTracesFilters(initialFilters);
@@ -176,8 +181,10 @@ function ClusterDetails({
const handleTabChange = (e: RadioChangeEvent): void => {
setSelectedView(e.target.value);
logEvent('Infra Monitoring: Clusters list details tab changed', {
cluster: cluster?.clusterUID,
logEvent(InfraMonitoringEvents.TabChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Cluster,
view: e.target.value,
});
};
@@ -200,8 +207,10 @@ function ClusterDetails({
});
}
logEvent('Infra Monitoring: Clusters list details time updated', {
cluster: cluster?.clusterUID,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Cluster,
interval,
view: selectedView,
});
@@ -222,9 +231,14 @@ function ClusterDetails({
item.key?.key !== 'id' && item.key?.key !== QUERY_KEYS.K8S_CLUSTER_NAME,
);
logEvent('Infra Monitoring: Clusters list details logs filters applied', {
cluster: cluster?.clusterUID,
});
if (newFilters.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
view: InfraMonitoringEvents.LogsView,
category: InfraMonitoringEvents.Cluster,
});
}
return {
op: 'AND',
@@ -249,9 +263,14 @@ function ClusterDetails({
[QUERY_KEYS.K8S_CLUSTER_NAME].includes(item.key?.key ?? ''),
);
logEvent('Infra Monitoring: Clusters list details traces filters applied', {
cluster: cluster?.clusterUID,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
view: InfraMonitoringEvents.TracesView,
category: InfraMonitoringEvents.Cluster,
});
}
return {
op: 'AND',
@@ -280,9 +299,14 @@ function ClusterDetails({
(item) => item.key?.key === QUERY_KEYS.K8S_OBJECT_NAME,
);
logEvent('Infra Monitoring: Clusters list details events filters applied', {
cluster: cluster?.clusterUID,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
view: InfraMonitoringEvents.EventsView,
category: InfraMonitoringEvents.Cluster,
});
}
return {
op: 'AND',
@@ -313,8 +337,10 @@ function ClusterDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: Clusters list details explore clicked', {
cluster: cluster?.clusterUID,
logEvent(InfraMonitoringEvents.ExploreClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Cluster,
view: selectedView,
});

View File

@@ -15,6 +15,7 @@ import {
import { ColumnType, SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { K8sClustersListPayload } from 'api/infraMonitoring/getK8sClustersList';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sClustersList } from 'hooks/infraMonitoring/useGetK8sClustersList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -41,7 +42,6 @@ import {
getK8sClustersListQuery,
K8sClustersRowData,
} from './utils';
// eslint-disable-next-line sonarjs/cognitive-complexity
function K8sClustersList({
isFiltersVisible,
@@ -240,11 +240,6 @@ function K8sClustersList({
}
}, [selectedRowData, fetchGroupedByRowData]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sClustersRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -255,10 +250,10 @@ function K8sClustersList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s clusters list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Cluster,
});
}
@@ -271,7 +266,7 @@ function K8sClustersList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -285,14 +280,25 @@ function K8sClustersList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s clusters list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
category: InfraMonitoringEvents.Cluster,
page: InfraMonitoringEvents.ListPage,
});
}
},
[handleChangeQueryData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s clusters list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
category: InfraMonitoringEvents.Cluster,
page: InfraMonitoringEvents.ListPage,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedClusterData = useMemo(() => {
if (!selectedClusterName) return null;
@@ -320,8 +326,10 @@ function K8sClustersList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s cluster list item clicked', {
clusterName: record.clusterName,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Cluster,
});
};
@@ -450,7 +458,11 @@ function K8sClustersList({
setCurrentPage(1);
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s clusters list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Cluster,
});
},
[groupByFiltersData],
);
@@ -469,10 +481,10 @@ function K8sClustersList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s clusters list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Cluster,
});
};

View File

@@ -6,6 +6,7 @@ import { Button, Divider, Drawer, Radio, Tooltip, Typography } from 'antd';
import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { VIEW_TYPES, VIEWS } from 'components/HostMetricsDetail/constants';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -164,11 +165,15 @@ function DaemonSetDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: DaemonSets list details page visited', {
daemonSet: daemonSet?.daemonSetName,
});
if (daemonSet) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.DaemonSet,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [daemonSet]);
useEffect(() => {
setLogAndTracesFilters(initialFilters);
@@ -190,8 +195,10 @@ function DaemonSetDetails({
const handleTabChange = (e: RadioChangeEvent): void => {
setSelectedView(e.target.value);
logEvent('Infra Monitoring: DaemonSets list details tab changed', {
daemonSet: daemonSet?.daemonSetName,
logEvent(InfraMonitoringEvents.TabChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.DaemonSet,
view: e.target.value,
});
};
@@ -214,8 +221,10 @@ function DaemonSetDetails({
});
}
logEvent('Infra Monitoring: DaemonSets list details time updated', {
daemonSet: daemonSet?.daemonSetName,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.DaemonSet,
interval,
view: selectedView,
});
@@ -239,9 +248,14 @@ function DaemonSetDetails({
item.key?.key !== QUERY_KEYS.K8S_DAEMON_SET_NAME,
);
logEvent('Infra Monitoring: DaemonSets list details logs filters applied', {
daemonSet: daemonSet?.daemonSetName,
});
if (newFilters.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.DaemonSet,
view: InfraMonitoringEvents.LogsView,
});
}
return {
op: 'AND',
@@ -266,12 +280,14 @@ function DaemonSetDetails({
),
);
logEvent(
'Infra Monitoring: DaemonSets list details traces filters applied',
{
daemonSet: daemonSet?.daemonSetName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.DaemonSet,
view: InfraMonitoringEvents.TracesView,
});
}
return {
op: 'AND',
@@ -298,12 +314,14 @@ function DaemonSetDetails({
(item) => item.key?.key === QUERY_KEYS.K8S_OBJECT_NAME,
);
logEvent(
'Infra Monitoring: DaemonSets list details events filters applied',
{
daemonSet: daemonSet?.daemonSetName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.DaemonSet,
view: InfraMonitoringEvents.EventsView,
});
}
return {
op: 'AND',
@@ -332,8 +350,10 @@ function DaemonSetDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: DaemonSets list details explore clicked', {
daemonSet: daemonSet?.daemonSetName,
logEvent(InfraMonitoringEvents.ExploreClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.DaemonSet,
view: selectedView,
});

View File

@@ -16,6 +16,7 @@ import { ColumnType, SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { K8sDaemonSetsListPayload } from 'api/infraMonitoring/getK8sDaemonSetsList';
import classNames from 'classnames';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sDaemonSetsList } from 'hooks/infraMonitoring/useGetK8sDaemonSetsList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -243,11 +244,6 @@ function K8sDaemonSetsList({
}
}, [selectedRowData, fetchGroupedByRowData]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sDaemonSetsRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -258,10 +254,10 @@ function K8sDaemonSetsList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s daemonSets list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.DaemonSet,
});
}
@@ -274,7 +270,7 @@ function K8sDaemonSetsList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -288,14 +284,25 @@ function K8sDaemonSetsList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s daemonSets list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.DaemonSet,
});
}
},
[handleChangeQueryData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s daemonSets list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.DaemonSet,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedDaemonSetData = useMemo(() => {
if (groupBy.length > 0) {
@@ -327,8 +334,10 @@ function K8sDaemonSetsList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s daemonSet list item clicked', {
daemonSetName: record.daemonsetName,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.DaemonSet,
});
};
@@ -457,7 +466,11 @@ function K8sDaemonSetsList({
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s daemonSets list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.DaemonSet,
});
},
[groupByFiltersData],
);
@@ -476,10 +489,10 @@ function K8sDaemonSetsList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s daemonSets list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.DaemonSet,
});
};

View File

@@ -7,6 +7,7 @@ import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { K8sDeploymentsData } from 'api/infraMonitoring/getK8sDeploymentsList';
import { VIEW_TYPES, VIEWS } from 'components/HostMetricsDetail/constants';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -166,11 +167,15 @@ function DeploymentDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: Deployments list details page visited', {
deployment: deployment?.deploymentName,
});
if (deployment) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Deployment,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [deployment]);
useEffect(() => {
setLogAndTracesFilters(initialFilters);
@@ -192,8 +197,10 @@ function DeploymentDetails({
const handleTabChange = (e: RadioChangeEvent): void => {
setSelectedView(e.target.value);
logEvent('Infra Monitoring: Deployments list details tab changed', {
deployment: deployment?.deploymentName,
logEvent(InfraMonitoringEvents.TabChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Deployment,
view: e.target.value,
});
};
@@ -216,8 +223,10 @@ function DeploymentDetails({
});
}
logEvent('Infra Monitoring: Deployments list details time updated', {
deployment: deployment?.deploymentName,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Deployment,
interval,
view: selectedView,
});
@@ -241,12 +250,14 @@ function DeploymentDetails({
item.key?.key !== QUERY_KEYS.K8S_DEPLOYMENT_NAME,
);
logEvent(
'Infra Monitoring: Deployments list details logs filters applied',
{
deployment: deployment?.deploymentName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Deployment,
view: InfraMonitoringEvents.LogsView,
});
}
return {
op: 'AND',
@@ -273,12 +284,14 @@ function DeploymentDetails({
),
);
logEvent(
'Infra Monitoring: Deployments list details traces filters applied',
{
deployment: deployment?.deploymentName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Deployment,
view: InfraMonitoringEvents.TracesView,
});
}
return {
op: 'AND',
@@ -307,12 +320,14 @@ function DeploymentDetails({
(item) => item.key?.key === QUERY_KEYS.K8S_OBJECT_NAME,
);
logEvent(
'Infra Monitoring: Deployments list details events filters applied',
{
deployment: deployment?.deploymentName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Deployment,
view: InfraMonitoringEvents.EventsView,
});
}
return {
op: 'AND',
@@ -343,8 +358,10 @@ function DeploymentDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: Deployments list details explore clicked', {
deployment: deployment?.deploymentName,
logEvent(InfraMonitoringEvents.ExploreClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Deployment,
view: selectedView,
});

View File

@@ -16,6 +16,7 @@ import { ColumnType, SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { K8sDeploymentsListPayload } from 'api/infraMonitoring/getK8sDeploymentsList';
import classNames from 'classnames';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sDeploymentsList } from 'hooks/infraMonitoring/useGetK8sDeploymentsList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -245,11 +246,6 @@ function K8sDeploymentsList({
}
}, [selectedRowData, fetchGroupedByRowData]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sDeploymentsRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -260,10 +256,10 @@ function K8sDeploymentsList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s deployments list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Deployment,
});
}
@@ -276,7 +272,7 @@ function K8sDeploymentsList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -290,14 +286,25 @@ function K8sDeploymentsList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s deployments list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Deployment,
});
}
},
[handleChangeQueryData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s deployments list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Deployment,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedDeploymentData = useMemo(() => {
if (!selectedDeploymentUID) return null;
@@ -330,8 +337,10 @@ function K8sDeploymentsList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s deployment list item clicked', {
deploymentUID: record.deploymentName,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Deployment,
});
};
@@ -461,7 +470,11 @@ function K8sDeploymentsList({
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s deployments list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Deployment,
});
},
[groupByFiltersData],
);
@@ -480,10 +493,10 @@ function K8sDeploymentsList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s deployments list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Deployment,
});
};

View File

@@ -7,6 +7,7 @@ import { Collapse, Tooltip, Typography } from 'antd';
import logEvent from 'api/common/logEvent';
import QuickFilters from 'components/QuickFilters/QuickFilters';
import { QuickFiltersSource } from 'components/QuickFilters/types';
import { InfraMonitoringEvents } from 'constants/events';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
import {
@@ -70,10 +71,12 @@ export default function InfraMonitoringK8s(): JSX.Element {
handleChangeQueryData('filters', query.builder.queryData[0].filters);
setQuickFiltersLastUpdated(Date.now());
logEvent(
`Infra Monitoring: K8s ${selectedCategory} list quick filters applied`,
{},
);
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: selectedCategory,
view: InfraMonitoringEvents.QuickFiltersView,
});
};
const items: CollapseProps['items'] = [

View File

@@ -6,6 +6,7 @@ import { Button, Divider, Drawer, Radio, Tooltip, Typography } from 'antd';
import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { VIEW_TYPES, VIEWS } from 'components/HostMetricsDetail/constants';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -161,11 +162,15 @@ function JobDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: Jobs list details page visited', {
job: job?.jobName,
});
if (job) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Job,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [job]);
useEffect(() => {
setLogAndTracesFilters(initialFilters);
@@ -187,8 +192,10 @@ function JobDetails({
const handleTabChange = (e: RadioChangeEvent): void => {
setSelectedView(e.target.value);
logEvent('Infra Monitoring: Jobs list details tab changed', {
job: job?.jobName,
logEvent(InfraMonitoringEvents.TabChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Job,
view: e.target.value,
});
};
@@ -211,8 +218,10 @@ function JobDetails({
});
}
logEvent('Infra Monitoring: Jobs list details time updated', {
job: job?.jobName,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Job,
interval,
view: selectedView,
});
@@ -235,9 +244,14 @@ function JobDetails({
item.key?.key !== 'id' && item.key?.key !== QUERY_KEYS.K8S_JOB_NAME,
);
logEvent('Infra Monitoring: Jobs list details logs filters applied', {
job: job?.jobName,
});
if (newFilters.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Job,
view: 'logs',
});
}
return {
op: 'AND',
@@ -262,9 +276,14 @@ function JobDetails({
),
);
logEvent('Infra Monitoring: Jobs list details traces filters applied', {
job: job?.jobName,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Job,
view: 'traces',
});
}
return {
op: 'AND',
@@ -291,9 +310,14 @@ function JobDetails({
(item) => item.key?.key === QUERY_KEYS.K8S_OBJECT_NAME,
);
logEvent('Infra Monitoring: Jobs list details events filters applied', {
job: job?.jobName,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Job,
view: 'events',
});
}
return {
op: 'AND',
@@ -322,8 +346,10 @@ function JobDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: Jobs list details explore clicked', {
job: job?.jobName,
logEvent(InfraMonitoringEvents.ExploreClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Job,
view: selectedView,
});

View File

@@ -16,6 +16,7 @@ import { ColumnType, SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { K8sJobsListPayload } from 'api/infraMonitoring/getK8sJobsList';
import classNames from 'classnames';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sJobsList } from 'hooks/infraMonitoring/useGetK8sJobsList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -234,11 +235,6 @@ function K8sJobsList({
}
}, [selectedRowData, fetchGroupedByRowData]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sJobsRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -247,10 +243,10 @@ function K8sJobsList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s jobs list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Job,
});
}
@@ -263,7 +259,7 @@ function K8sJobsList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -277,14 +273,25 @@ function K8sJobsList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s jobs list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Job,
});
}
},
[handleChangeQueryData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s jobs list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Job,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedJobData = useMemo(() => {
if (groupBy.length > 0) {
@@ -303,8 +310,10 @@ function K8sJobsList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s job list item clicked', {
jobName: record.jobName,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Job,
});
};
@@ -433,7 +442,11 @@ function K8sJobsList({
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s jobs list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Job,
});
},
[groupByFiltersData],
);
@@ -452,10 +465,10 @@ function K8sJobsList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s jobs list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Job,
});
};

View File

@@ -15,6 +15,7 @@ import {
import { ColumnType, SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { K8sNamespacesListPayload } from 'api/infraMonitoring/getK8sNamespacesList';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sNamespacesList } from 'hooks/infraMonitoring/useGetK8sNamespacesList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -242,11 +243,6 @@ function K8sNamespacesList({
}
}, [selectedRowData, fetchGroupedByRowData]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sNamespacesRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -257,10 +253,10 @@ function K8sNamespacesList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s namespaces list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Namespace,
});
}
@@ -273,7 +269,7 @@ function K8sNamespacesList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -287,14 +283,25 @@ function K8sNamespacesList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s namespaces list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Namespace,
});
}
},
[handleChangeQueryData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s namespaces list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Namespace,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedNamespaceData = useMemo(() => {
if (!selectedNamespaceUID) return null;
@@ -327,8 +334,10 @@ function K8sNamespacesList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s namespace list item clicked', {
namespaceUID: record.namespaceUID,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Namespace,
});
};
@@ -458,7 +467,11 @@ function K8sNamespacesList({
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s namespaces list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Namespace,
});
},
[groupByFiltersData],
);
@@ -477,10 +490,10 @@ function K8sNamespacesList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s namespaces list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Namespace,
});
};

View File

@@ -7,6 +7,7 @@ import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { K8sNamespacesData } from 'api/infraMonitoring/getK8sNamespacesList';
import { VIEW_TYPES, VIEWS } from 'components/HostMetricsDetail/constants';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -152,11 +153,15 @@ function NamespaceDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: Namespaces list details page visited', {
namespace: namespace?.namespaceName,
});
if (namespace) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Namespace,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [namespace]);
useEffect(() => {
setLogAndTracesFilters(initialFilters);
@@ -178,8 +183,10 @@ function NamespaceDetails({
const handleTabChange = (e: RadioChangeEvent): void => {
setSelectedView(e.target.value);
logEvent('Infra Monitoring: Namespaces list details tab changed', {
namespace: namespace?.namespaceName,
logEvent(InfraMonitoringEvents.TabChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Namespace,
view: e.target.value,
});
};
@@ -202,8 +209,10 @@ function NamespaceDetails({
});
}
logEvent('Infra Monitoring: Namespaces list details time updated', {
namespace: namespace?.namespaceName,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Namespace,
interval,
view: selectedView,
});
@@ -226,9 +235,14 @@ function NamespaceDetails({
item.key?.key !== 'id' && item.key?.key !== QUERY_KEYS.K8S_NAMESPACE_NAME,
);
logEvent('Infra Monitoring: Namespaces list details logs filters applied', {
namespace: namespace?.namespaceName,
});
if (newFilters.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Namespace,
view: InfraMonitoringEvents.LogsView,
});
}
return {
op: 'AND',
@@ -253,12 +267,14 @@ function NamespaceDetails({
),
);
logEvent(
'Infra Monitoring: Namespaces list details traces filters applied',
{
namespace: namespace?.namespaceName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Namespace,
view: InfraMonitoringEvents.TracesView,
});
}
return {
op: 'AND',
@@ -285,12 +301,14 @@ function NamespaceDetails({
(item) => item.key?.key === QUERY_KEYS.K8S_OBJECT_NAME,
);
logEvent(
'Infra Monitoring: Namespaces list details events filters applied',
{
namespace: namespace?.namespaceName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Namespace,
view: InfraMonitoringEvents.EventsView,
});
}
return {
op: 'AND',
@@ -319,8 +337,10 @@ function NamespaceDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: Namespaces list details explore clicked', {
namespace: namespace?.namespaceName,
logEvent(InfraMonitoringEvents.ExploreClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Namespace,
view: selectedView,
});

View File

@@ -15,6 +15,7 @@ import {
import { ColumnType, SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { K8sNodesListPayload } from 'api/infraMonitoring/getK8sNodesList';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sNodesList } from 'hooks/infraMonitoring/useGetK8sNodesList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -233,11 +234,6 @@ function K8sNodesList({
}
}, [selectedRowData, fetchGroupedByRowData]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sNodesRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -246,10 +242,10 @@ function K8sNodesList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s nodes list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Node,
});
}
@@ -262,7 +258,7 @@ function K8sNodesList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -276,14 +272,25 @@ function K8sNodesList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s nodes list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Node,
});
}
},
[handleChangeQueryData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s nodes list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Node,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedNodeData = useMemo(() => {
if (!selectedNodeUID) return null;
@@ -305,8 +312,10 @@ function K8sNodesList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s node list item clicked', {
nodeUID: record.nodeUID,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Node,
});
};
@@ -436,7 +445,11 @@ function K8sNodesList({
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s nodes list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Node,
});
},
[groupByFiltersData],
);
@@ -455,10 +468,10 @@ function K8sNodesList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s nodes list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Node,
});
};

View File

@@ -7,6 +7,7 @@ import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { K8sNodesData } from 'api/infraMonitoring/getK8sNodesList';
import { VIEW_TYPES, VIEWS } from 'components/HostMetricsDetail/constants';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -150,11 +151,15 @@ function NodeDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: Nodes list details page visited', {
node: node?.nodeUID,
});
if (node) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Node,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [node]);
useEffect(() => {
setLogAndTracesFilters(initialFilters);
@@ -176,8 +181,10 @@ function NodeDetails({
const handleTabChange = (e: RadioChangeEvent): void => {
setSelectedView(e.target.value);
logEvent('Infra Monitoring: Nodes list details tab changed', {
node: node?.nodeUID,
logEvent(InfraMonitoringEvents.TabChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Node,
view: e.target.value,
});
};
@@ -200,8 +207,10 @@ function NodeDetails({
});
}
logEvent('Infra Monitoring: Nodes list details time updated', {
node: node?.nodeUID,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Node,
interval,
view: selectedView,
});
@@ -224,9 +233,14 @@ function NodeDetails({
item.key?.key !== 'id' && item.key?.key !== QUERY_KEYS.K8S_NODE_NAME,
);
logEvent('Infra Monitoring: Nodes list details logs filters applied', {
node: node?.nodeUID,
});
if (newFilters.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Node,
view: InfraMonitoringEvents.LogsView,
});
}
return {
op: 'AND',
@@ -253,9 +267,14 @@ function NodeDetails({
),
);
logEvent('Infra Monitoring: Nodes list details traces filters applied', {
node: node?.nodeUID,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Node,
view: InfraMonitoringEvents.TracesView,
});
}
return {
op: 'AND',
@@ -284,9 +303,14 @@ function NodeDetails({
(item) => item.key?.key === QUERY_KEYS.K8S_OBJECT_NAME,
);
logEvent('Infra Monitoring: Nodes list details events filters applied', {
node: node?.nodeUID,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Node,
view: InfraMonitoringEvents.EventsView,
});
}
return {
op: 'AND',
@@ -315,8 +339,10 @@ function NodeDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: Nodes list details explore clicked', {
node: node?.nodeUID,
logEvent(InfraMonitoringEvents.ExploreClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Node,
view: selectedView,
});

View File

@@ -16,6 +16,7 @@ import set from 'api/browser/localstorage/set';
import logEvent from 'api/common/logEvent';
import { K8sPodsListPayload } from 'api/infraMonitoring/getK8sPodsList';
import classNames from 'classnames';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sPodsList } from 'hooks/infraMonitoring/useGetK8sPodsList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -248,11 +249,6 @@ function K8sPodsList({
groupBy,
]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sPodsRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -261,10 +257,10 @@ function K8sPodsList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s pods list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Pod,
});
}
@@ -277,7 +273,7 @@ function K8sPodsList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -291,7 +287,13 @@ function K8sPodsList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s pods list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Pod,
});
}
},
[handleChangeQueryData],
);
@@ -317,14 +319,23 @@ function K8sPodsList({
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s pods list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Pod,
});
},
[groupByFiltersData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s pods list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Pod,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedPodData = useMemo(() => {
if (!selectedPodUID) return null;
@@ -360,8 +371,10 @@ function K8sPodsList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s pods list item clicked', {
podUID: record.podUID,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Pod,
});
};
@@ -512,10 +525,10 @@ function K8sPodsList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s pods list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Pod,
});
};

View File

@@ -8,6 +8,7 @@ import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { K8sPodsData } from 'api/infraMonitoring/getK8sPodsList';
import { VIEW_TYPES, VIEWS } from 'components/HostMetricsDetail/constants';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -167,11 +168,15 @@ function PodDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: Pods list details page visited', {
pod: pod?.podUID,
});
if (pod) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Pod,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [pod]);
useEffect(() => {
setLogsAndTracesFilters(initialFilters);
@@ -193,8 +198,10 @@ function PodDetails({
const handleTabChange = (e: RadioChangeEvent): void => {
setSelectedView(e.target.value);
logEvent('Infra Monitoring: Pods list details tab changed', {
pod: pod?.podUID,
logEvent(InfraMonitoringEvents.TabChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Pod,
view: e.target.value,
});
};
@@ -217,8 +224,10 @@ function PodDetails({
});
}
logEvent('Infra Monitoring: Pods list details time updated', {
pod: pod?.podUID,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Pod,
interval,
view: selectedView,
});
@@ -243,9 +252,14 @@ function PodDetails({
item.key?.key !== 'id' && item.key?.key !== QUERY_KEYS.K8S_CLUSTER_NAME,
);
logEvent('Infra Monitoring: Pods list details logs filters applied', {
pod: pod?.podUID,
});
if (newFilters.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Pod,
view: selectedView,
});
}
return {
op: 'AND',
@@ -274,9 +288,14 @@ function PodDetails({
].includes(item.key?.key ?? ''),
);
logEvent('Infra Monitoring: Pods list details traces filters applied', {
pod: pod?.podUID,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Pod,
view: selectedView,
});
}
return {
op: 'AND',
@@ -305,9 +324,14 @@ function PodDetails({
(item) => item.key?.key === QUERY_KEYS.K8S_OBJECT_NAME,
);
logEvent('Infra Monitoring: Pods list details events filters applied', {
pod: pod?.podUID,
});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Pod,
view: selectedView,
});
}
return {
op: 'AND',
@@ -336,8 +360,10 @@ function PodDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: Pods list details explore clicked', {
pod: pod?.podUID,
logEvent(InfraMonitoringEvents.ExploreClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Pod,
view: selectedView,
});

View File

@@ -16,6 +16,7 @@ import { ColumnType, SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { K8sStatefulSetsListPayload } from 'api/infraMonitoring/getsK8sStatefulSetsList';
import classNames from 'classnames';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sStatefulSetsList } from 'hooks/infraMonitoring/useGetK8sStatefulSetsList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -42,7 +43,6 @@ import {
getK8sStatefulSetsListQuery,
K8sStatefulSetsRowData,
} from './utils';
// eslint-disable-next-line sonarjs/cognitive-complexity
function K8sStatefulSetsList({
isFiltersVisible,
@@ -245,11 +245,6 @@ function K8sStatefulSetsList({
}
}, [selectedRowData, fetchGroupedByRowData]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sStatefulSetsRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -260,10 +255,10 @@ function K8sStatefulSetsList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s statefulSets list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.StatefulSet,
});
}
@@ -276,7 +271,7 @@ function K8sStatefulSetsList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -290,14 +285,25 @@ function K8sStatefulSetsList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s statefulSets list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.StatefulSet,
});
}
},
[handleChangeQueryData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s statefulSets list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.StatefulSet,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedStatefulSetData = useMemo(() => {
if (!selectedStatefulSetUID) return null;
@@ -328,8 +334,10 @@ function K8sStatefulSetsList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s statefulSet list item clicked', {
statefulSetName: record.statefulsetName,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.StatefulSet,
});
};
@@ -458,7 +466,11 @@ function K8sStatefulSetsList({
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s statefulSets list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.StatefulSet,
});
},
[groupByFiltersData],
);
@@ -477,10 +489,10 @@ function K8sStatefulSetsList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s statefulSets list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.StatefulSet,
});
};

View File

@@ -6,6 +6,7 @@ import { Button, Divider, Drawer, Radio, Tooltip, Typography } from 'antd';
import { RadioChangeEvent } from 'antd/lib';
import logEvent from 'api/common/logEvent';
import { VIEW_TYPES, VIEWS } from 'components/HostMetricsDetail/constants';
import { InfraMonitoringEvents } from 'constants/events';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValuesMap,
@@ -167,11 +168,15 @@ function StatefulSetDetails({
);
useEffect(() => {
logEvent('Infra Monitoring: StatefulSets list details page visited', {
statefulSet: statefulSet?.statefulSetName,
});
if (statefulSet) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.StatefulSet,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [statefulSet]);
useEffect(() => {
setLogAndTracesFilters(initialFilters);
@@ -193,8 +198,10 @@ function StatefulSetDetails({
const handleTabChange = (e: RadioChangeEvent): void => {
setSelectedView(e.target.value);
logEvent('Infra Monitoring: StatefulSets list details tab changed', {
statefulSet: statefulSet?.statefulSetName,
logEvent(InfraMonitoringEvents.TabChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.StatefulSet,
view: e.target.value,
});
};
@@ -217,8 +224,10 @@ function StatefulSetDetails({
});
}
logEvent('Infra Monitoring: StatefulSets list details time updated', {
statefulSet: statefulSet?.statefulSetName,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.StatefulSet,
interval,
view: selectedView,
});
@@ -242,12 +251,14 @@ function StatefulSetDetails({
item.key?.key !== QUERY_KEYS.K8S_STATEFUL_SET_NAME,
);
logEvent(
'Infra Monitoring: StatefulSets list details logs filters applied',
{
statefulSet: statefulSet?.statefulSetName,
},
);
if (newFilters.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.StatefulSet,
view: 'logs',
});
}
return {
op: 'AND',
@@ -272,12 +283,14 @@ function StatefulSetDetails({
),
);
logEvent(
'Infra Monitoring: StatefulSets list details traces filters applied',
{
statefulSet: statefulSet?.statefulSetName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.StatefulSet,
view: 'traces',
});
}
return {
op: 'AND',
@@ -304,12 +317,14 @@ function StatefulSetDetails({
(item) => item.key?.key === QUERY_KEYS.K8S_OBJECT_NAME,
);
logEvent(
'Infra Monitoring: StatefulSets list details events filters applied',
{
statefulSet: statefulSet?.statefulSetName,
},
);
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.StatefulSet,
view: 'logs',
});
}
return {
op: 'AND',
@@ -338,8 +353,10 @@ function StatefulSetDetails({
urlQuery.set(QueryParams.endTime, modalTimeRange.endTime.toString());
}
logEvent('Infra Monitoring: StatefulSets list details explore clicked', {
statefulSet: statefulSet?.statefulSetName,
logEvent(InfraMonitoringEvents.ExploreClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.StatefulSet,
view: selectedView,
});

View File

@@ -16,6 +16,7 @@ import { ColumnType, SorterResult } from 'antd/es/table/interface';
import logEvent from 'api/common/logEvent';
import { K8sVolumesListPayload } from 'api/infraMonitoring/getK8sVolumesList';
import classNames from 'classnames';
import { InfraMonitoringEvents } from 'constants/events';
import { useGetK8sVolumesList } from 'hooks/infraMonitoring/useGetK8sVolumesList';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -42,7 +43,6 @@ import {
K8sVolumesRowData,
} from './utils';
import VolumeDetails from './VolumeDetails';
// eslint-disable-next-line sonarjs/cognitive-complexity
function K8sVolumesList({
isFiltersVisible,
@@ -237,11 +237,6 @@ function K8sVolumesList({
}
}, [selectedRowData, fetchGroupedByRowData]);
const numberOfPages = useMemo(() => Math.ceil(totalCount / pageSize), [
totalCount,
pageSize,
]);
const handleTableChange: TableProps<K8sVolumesRowData>['onChange'] = useCallback(
(
pagination: TablePaginationConfig,
@@ -250,10 +245,10 @@ function K8sVolumesList({
): void => {
if (pagination.current) {
setCurrentPage(pagination.current);
logEvent('Infra Monitoring: K8s volumes list page number changed', {
page: pagination.current,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Volumes,
});
}
@@ -266,7 +261,7 @@ function K8sVolumesList({
setOrderBy(null);
}
},
[numberOfPages, pageSize],
[],
);
const { handleChangeQueryData } = useQueryOperations({
@@ -280,14 +275,25 @@ function K8sVolumesList({
handleChangeQueryData('filters', value);
setCurrentPage(1);
logEvent('Infra Monitoring: K8s volumes list filters applied', {});
if (value.items.length > 0) {
logEvent(InfraMonitoringEvents.FilterApplied, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Volumes,
});
}
},
[handleChangeQueryData],
);
useEffect(() => {
logEvent('Infra Monitoring: K8s volumes list page visited', {});
}, []);
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Volumes,
total: data?.payload?.data?.total,
});
}, [data?.payload?.data?.total]);
const selectedVolumeData = useMemo(() => {
if (!selectedVolumeUID) return null;
@@ -313,8 +319,10 @@ function K8sVolumesList({
handleGroupByRowClick(record);
}
logEvent('Infra Monitoring: K8s volume list item clicked', {
volumeUID: record.volumeUID,
logEvent(InfraMonitoringEvents.ItemClicked, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Volumes,
});
};
@@ -443,7 +451,11 @@ function K8sVolumesList({
setGroupBy(groupBy);
setExpandedRowKeys([]);
logEvent('Infra Monitoring: K8s volumes list group by changed', {});
logEvent(InfraMonitoringEvents.GroupByChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Volumes,
});
},
[groupByFiltersData],
);
@@ -462,10 +474,10 @@ function K8sVolumesList({
const onPaginationChange = (page: number, pageSize: number): void => {
setCurrentPage(page);
setPageSize(pageSize);
logEvent('Infra Monitoring: K8s volumes list page number changed', {
page,
pageSize,
numberOfPages,
logEvent(InfraMonitoringEvents.PageNumberChanged, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.ListPage,
category: InfraMonitoringEvents.Volumes,
});
};

View File

@@ -4,6 +4,7 @@ import '../../EntityDetailsUtils/entityDetails.styles.scss';
import { Color, Spacing } from '@signozhq/design-tokens';
import { Divider, Drawer, Tooltip, Typography } from 'antd';
import logEvent from 'api/common/logEvent';
import { InfraMonitoringEvents } from 'constants/events';
import { K8sCategory } from 'container/InfraMonitoringK8s/constants';
import {
CustomTimeType,
@@ -50,11 +51,15 @@ function VolumeDetails({
const isDarkMode = useIsDarkMode();
useEffect(() => {
logEvent('Infra Monitoring: Volumes list details page visited', {
volume: volume?.persistentVolumeClaimName,
});
if (volume) {
logEvent(InfraMonitoringEvents.PageVisited, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Volume,
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [volume]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
@@ -87,8 +92,10 @@ function VolumeDetails({
});
}
logEvent('Infra Monitoring: Volumes list details time updated', {
volume: volume?.persistentVolumeClaimName,
logEvent(InfraMonitoringEvents.TimeUpdated, {
entity: InfraMonitoringEvents.K8sEntity,
page: InfraMonitoringEvents.DetailedPage,
category: InfraMonitoringEvents.Volume,
interval,
});
},

View File

@@ -8,10 +8,10 @@ describe('executeSearchQueries', () => {
const firstDashboard: Dashboard = {
id: 11111,
uuid: uuid(),
created_at: '',
updated_at: '',
created_by: '',
updated_by: '',
createdAt: '',
updatedAt: '',
createdBy: '',
updatedBy: '',
data: {
title: 'first dashboard',
variables: {},
@@ -20,10 +20,10 @@ describe('executeSearchQueries', () => {
const secondDashboard: Dashboard = {
id: 22222,
uuid: uuid(),
created_at: '',
updated_at: '',
created_by: '',
updated_by: '',
createdAt: '',
updatedAt: '',
createdBy: '',
updatedBy: '',
data: {
title: 'second dashboard',
variables: {},
@@ -32,10 +32,10 @@ describe('executeSearchQueries', () => {
const thirdDashboard: Dashboard = {
id: 333333,
uuid: uuid(),
created_at: '',
updated_at: '',
created_by: '',
updated_by: '',
createdAt: '',
updatedAt: '',
createdBy: '',
updatedBy: '',
data: {
title: 'third dashboard (with special characters +?\\)',
variables: {},

View File

@@ -67,7 +67,6 @@
.related-metrics-container {
width: 100%;
min-height: 300px;
max-height: 450px;
display: flex;
flex-direction: column;
gap: 10px;
@@ -102,19 +101,23 @@
}
.related-metrics-body {
padding: 10px 0;
margin-top: 20px;
max-height: 650px;
overflow-y: scroll;
.related-metrics-card-container {
min-height: 300px;
margin-bottom: 25px;
margin-bottom: 20px;
min-height: 640px;
.related-metrics-card {
// height: 400px;
display: flex;
flex-direction: column;
gap: 16px;
.related-metrics-card-error {
padding-top: 10px;
height: fit-content;
width: fit-content;
}
}
}

View File

@@ -38,8 +38,8 @@ function Explorer(): JSX.Element {
const { notifications } = useNotifications();
const { mutate: updateDashboard, isLoading } = useUpdateDashboard();
const { options } = useOptionsMenu({
storageKey: LOCALSTORAGE.TRACES_LIST_OPTIONS,
dataSource: DataSource.TRACES,
storageKey: LOCALSTORAGE.METRICS_LIST_OPTIONS,
dataSource: DataSource.METRICS,
aggregateOperator: 'noop',
});

View File

@@ -1,9 +1,5 @@
import { Color } from '@signozhq/design-tokens';
import { Card, Col, Input, Row, Select, Skeleton } from 'antd';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useResizeObserver } from 'hooks/useDimensions';
import { getUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions';
import { getUPlotChartData } from 'lib/uPlotLib/utils/getUplotChartData';
import { Card, Col, Empty, Input, Row, Select, Skeleton } from 'antd';
import { Gauge } from 'lucide-react';
import { useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
@@ -15,9 +11,7 @@ import { RelatedMetricsProps, RelatedMetricWithQueryResult } from './types';
import { useGetRelatedMetricsGraphs } from './useGetRelatedMetricsGraphs';
function RelatedMetrics({ metricNames }: RelatedMetricsProps): JSX.Element {
const isDarkMode = useIsDarkMode();
const graphRef = useRef<HTMLDivElement>(null);
const dimensions = useResizeObserver(graphRef);
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
(state) => state.globalTime,
);
@@ -41,13 +35,15 @@ function RelatedMetrics({ metricNames }: RelatedMetricsProps): JSX.Element {
}
}, [metricNames]);
const { relatedMetrics, isRelatedMetricsLoading } = useGetRelatedMetricsGraphs(
{
selectedMetricName,
startMs,
endMs,
},
);
const {
relatedMetrics,
isRelatedMetricsLoading,
isRelatedMetricsError,
} = useGetRelatedMetricsGraphs({
selectedMetricName,
startMs,
endMs,
});
const metricNamesSelectOptions = useMemo(
() =>
@@ -91,31 +87,6 @@ function RelatedMetrics({ metricNames }: RelatedMetricsProps): JSX.Element {
return filteredMetrics;
}, [relatedMetrics, selectedRelatedMetric, searchValue]);
const chartData = useMemo(
() =>
filteredRelatedMetrics.map(({ queryResult }) =>
getUPlotChartData(queryResult.data?.payload),
),
[filteredRelatedMetrics],
);
const options = useMemo(
() =>
filteredRelatedMetrics.map(({ queryResult }) =>
getUPlotChartOptions({
apiResponse: queryResult.data?.payload,
isDarkMode,
dimensions,
yAxisUnit: '',
softMax: null,
softMin: null,
minTimeScale: startMs,
maxTimeScale: endMs,
}),
),
[filteredRelatedMetrics, isDarkMode, dimensions, startMs, endMs],
);
return (
<div className="related-metrics-container">
<div className="related-metrics-header">
@@ -145,20 +116,34 @@ function RelatedMetrics({ metricNames }: RelatedMetricsProps): JSX.Element {
</div>
<div className="related-metrics-body">
{isRelatedMetricsLoading && <Skeleton active />}
<Row gutter={24}>
{filteredRelatedMetrics.map((relatedMetricWithQueryResult, index) => (
<Col span={8} key={relatedMetricWithQueryResult.name}>
<Card bordered ref={graphRef} className="related-metrics-card-container">
<RelatedMetricsCard
key={relatedMetricWithQueryResult.name}
metric={relatedMetricWithQueryResult}
options={options[index]}
chartData={chartData[index]}
/>
</Card>
</Col>
))}
</Row>
{isRelatedMetricsError && (
<Empty description="Error fetching related metrics" />
)}
{!isRelatedMetricsLoading &&
!isRelatedMetricsError &&
filteredRelatedMetrics.length === 0 && (
<Empty description="No related metrics found" />
)}
{!isRelatedMetricsLoading &&
!isRelatedMetricsError &&
filteredRelatedMetrics.length > 0 && (
<Row gutter={24}>
{filteredRelatedMetrics.map((relatedMetricWithQueryResult) => (
<Col span={12} key={relatedMetricWithQueryResult.name}>
<Card
bordered
ref={graphRef}
className="related-metrics-card-container"
>
<RelatedMetricsCard
key={relatedMetricWithQueryResult.name}
metric={relatedMetricWithQueryResult}
/>
</Card>
</Col>
))}
</Row>
)}
</div>
</div>
);

View File

@@ -1,14 +1,11 @@
import { Skeleton, Typography } from 'antd';
import Uplot from 'components/Uplot';
import { Empty, Skeleton, Typography } from 'antd';
import TimeSeriesView from 'container/TimeSeriesView/TimeSeriesView';
import { DataSource } from 'types/common/queryBuilder';
import DashboardsAndAlertsPopover from '../MetricDetails/DashboardsAndAlertsPopover';
import { RelatedMetricsCardProps } from './types';
function RelatedMetricsCard({
metric,
options,
chartData,
}: RelatedMetricsCardProps): JSX.Element {
function RelatedMetricsCard({ metric }: RelatedMetricsCardProps): JSX.Element {
const { queryResult } = metric;
if (queryResult.isLoading) {
@@ -27,13 +24,20 @@ function RelatedMetricsCard({
{metric.name}
</Typography.Text>
{queryResult.isLoading ? <Skeleton /> : null}
{queryResult.error ? (
{queryResult.isError ? (
<div className="related-metrics-card-error">
<Typography.Text>Something went wrong</Typography.Text>
<Empty description="Error fetching metric data" />
</div>
) : null}
{!queryResult.isLoading && !queryResult.error && (
<Uplot options={options} data={chartData} />
<TimeSeriesView
isFilterApplied={false}
isError={queryResult.isError}
isLoading={queryResult.isLoading}
data={queryResult.data}
yAxisUnit="ms"
dataSource={DataSource.METRICS}
/>
)}
<DashboardsAndAlertsPopover
dashboards={metric.dashboards}

View File

@@ -18,8 +18,6 @@ export interface RelatedMetricsProps {
export interface RelatedMetricsCardProps {
metric: RelatedMetricWithQueryResult;
options: uPlot.Options;
chartData: any[];
}
export interface UseGetRelatedMetricsGraphsProps {

View File

@@ -1,11 +1,14 @@
import { Button, Collapse, Input, Typography } from 'antd';
import { ColumnsType } from 'antd/es/table';
import { ResizeTable } from 'components/ResizeTable';
import ROUTES from 'constants/routes';
import { DataType } from 'container/LogDetailedView/TableView';
import { useSafeNavigate } from 'hooks/useSafeNavigate';
import { Search } from 'lucide-react';
import { useCallback, useMemo, useState } from 'react';
import { AllAttributesProps } from './types';
import { getMetricDetailsQuery } from './utils';
function AllAttributes({
attributes,
@@ -16,12 +19,17 @@ function AllAttributes({
'all-attributes',
);
const { safeNavigate } = useSafeNavigate();
const goToMetricsExploreWithAppliedAttribute = useCallback(
(attribute: string) => {
// TODO: Implement this when explore page is ready
console.log(metricName, attribute);
(key: string, value: string) => {
const compositeQuery = getMetricDetailsQuery(metricName, { key, value });
const encodedCompositeQuery = JSON.stringify(compositeQuery);
safeNavigate(
`${ROUTES.METRICS_EXPLORER_EXPLORER}?compositeQuery=${encodedCompositeQuery}`,
);
},
[metricName],
[metricName, safeNavigate],
);
const filteredAttributes = useMemo(
@@ -40,7 +48,10 @@ function AllAttributes({
label: attribute.key,
contribution: attribute.valueCount,
},
value: attribute.value,
value: {
key: attribute.key,
value: attribute.value,
},
}))
: [],
[filteredAttributes],
@@ -70,14 +81,14 @@ function AllAttributes({
align: 'left',
ellipsis: true,
className: 'metric-metadata-value',
render: (attributes: string[]): JSX.Element => (
render: (field: { key: string; value: string[] }): JSX.Element => (
<div className="all-attributes-value">
{attributes.map((attribute) => (
{field.value.map((attribute) => (
<Button
key={attribute}
type="text"
onClick={(): void => {
goToMetricsExploreWithAppliedAttribute(attribute);
goToMetricsExploreWithAppliedAttribute(field.key, attribute);
}}
>
<Typography.Text>{attribute}</Typography.Text>

View File

@@ -1,5 +1,6 @@
import { Button, Collapse, Input, Select, Typography } from 'antd';
import { ColumnsType } from 'antd/es/table';
import { Temporality } from 'api/metricsExplorer/getMetricDetails';
import { MetricType } from 'api/metricsExplorer/getMetricsList';
import { UpdateMetricMetadataProps } from 'api/metricsExplorer/updateMetricMetadata';
import { ResizeTable } from 'components/ResizeTable';
@@ -14,6 +15,7 @@ import { METRIC_TYPE_LABEL_MAP } from '../Summary/constants';
import { MetricTypeRenderer } from '../Summary/utils';
import { METRIC_METADATA_KEYS } from './constants';
import { MetadataProps } from './types';
import { determineIsMonotonic } from './utils';
function Metadata({
metricName,
@@ -25,9 +27,10 @@ function Metadata({
metricMetadata,
setMetricMetadata,
] = useState<UpdateMetricMetadataProps>({
type: metadata.metric_type,
description: metadata.description,
unit: metadata.unit,
metricType: metadata?.metric_type || MetricType.SUM,
description: metadata?.description || '',
unit: metadata?.unit || '',
temporality: metadata?.temporality || Temporality.CUMULATIVE,
});
const { notifications } = useNotifications();
const {
@@ -41,13 +44,16 @@ function Metadata({
const tableData = useMemo(
() =>
metadata
? Object.keys(metadata).map((key) => ({
key,
value: {
value: metadata[key as keyof typeof metadata],
? Object.keys(metadata)
// Filter out isMonotonic as user input is not required
.filter((key) => key !== 'isMonotonic')
.map((key) => ({
key,
},
}))
value: {
value: metadata[key as keyof typeof metadata],
key,
},
}))
: [],
[metadata],
);
@@ -93,11 +99,28 @@ function Metadata({
value: key,
label: value,
}))}
value={metricMetadata.type}
value={metricMetadata.metricType}
onChange={(value): void => {
setMetricMetadata({
...metricMetadata,
type: value as MetricType,
metricType: value as MetricType,
});
}}
/>
);
}
if (field.key === 'temporality') {
return (
<Select
options={Object.values(Temporality).map((key) => ({
value: key,
label: key,
}))}
value={metricMetadata.temporality}
onChange={(value): void => {
setMetricMetadata({
...metricMetadata,
temporality: value as Temporality,
});
}}
/>
@@ -106,7 +129,11 @@ function Metadata({
return (
<Input
name={field.key}
value={metricMetadata[field.key as keyof UpdateMetricMetadataProps]}
value={
metricMetadata[
field.key as Exclude<keyof UpdateMetricMetadataProps, 'isMonotonic'>
]
}
onChange={(e): void => {
setMetricMetadata({ ...metricMetadata, [field.key]: e.target.value });
}}
@@ -122,7 +149,13 @@ function Metadata({
updateMetricMetadata(
{
metricName,
payload: metricMetadata,
payload: {
...metricMetadata,
isMonotonic: determineIsMonotonic(
metricMetadata.metricType,
metricMetadata.temporality,
),
},
},
{
onSuccess: (response): void => {

View File

@@ -2,9 +2,19 @@ import './MetricDetails.styles.scss';
import '../Summary/Summary.styles.scss';
import { Color } from '@signozhq/design-tokens';
import { Button, Divider, Drawer, Skeleton, Tooltip, Typography } from 'antd';
import {
Button,
Divider,
Drawer,
Empty,
Skeleton,
Tooltip,
Typography,
} from 'antd';
import ROUTES from 'constants/routes';
import { useGetMetricDetails } from 'hooks/metricsExplorer/useGetMetricDetails';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useSafeNavigate } from 'hooks/useSafeNavigate';
import { Compass, X } from 'lucide-react';
import { useCallback, useMemo } from 'react';
@@ -16,6 +26,7 @@ import { MetricDetailsProps } from './types';
import {
formatNumberToCompactFormat,
formatTimestampToReadableDate,
getMetricDetailsQuery,
} from './utils';
function MetricDetails({
@@ -24,10 +35,13 @@ function MetricDetails({
metricName,
}: MetricDetailsProps): JSX.Element {
const isDarkMode = useIsDarkMode();
const { safeNavigate } = useSafeNavigate();
const {
data,
isLoading,
isFetching,
error: metricDetailsError,
refetch: refetchMetricDetails,
} = useGetMetricDetails(metricName ?? '', {
enabled: !!metricName,
@@ -40,7 +54,7 @@ function MetricDetails({
return formatTimestampToReadableDate(metric.lastReceived);
}, [metric]);
const isMetricDetailsLoading = isLoading || isFetching || !metric;
const isMetricDetailsLoading = isLoading || isFetching;
const timeSeries = useMemo(() => {
if (!metric) return null;
@@ -50,21 +64,28 @@ function MetricDetails({
}, [metric]);
const goToMetricsExplorerwithSelectedMetric = useCallback(() => {
// TODO: Implement this when explore page is ready
console.log(metricName);
}, [metricName]);
if (metricName) {
const compositeQuery = getMetricDetailsQuery(metricName);
const encodedCompositeQuery = JSON.stringify(compositeQuery);
safeNavigate(
`${ROUTES.METRICS_EXPLORER_EXPLORER}?compositeQuery=${encodedCompositeQuery}`,
);
}
}, [metricName, safeNavigate]);
const top5Attributes = useMemo(() => {
if (!metric) return [];
const totalSum =
metric?.attributes.reduce((acc, curr) => acc + curr.valueCount, 0) || 0;
if (!metric) return [];
return metric.attributes.slice(0, 5).map((attr) => ({
return metric?.attributes.slice(0, 5).map((attr) => ({
key: attr.key,
count: attr.valueCount,
percentage: totalSum === 0 ? 0 : (attr.valueCount / totalSum) * 100,
}));
}, [metric]);
const isMetricDetailsError = metricDetailsError || !metric;
return (
<Drawer
width="60%"
@@ -77,6 +98,7 @@ function MetricDetails({
<Button
onClick={goToMetricsExplorerwithSelectedMetric}
icon={<Compass size={16} />}
disabled={!metricName}
>
Open in Explorer
</Button>
@@ -93,9 +115,11 @@ function MetricDetails({
destroyOnClose
closeIcon={<X size={16} />}
>
{isMetricDetailsLoading ? (
<Skeleton active />
) : (
{isMetricDetailsLoading && <Skeleton active />}
{isMetricDetailsError && !isMetricDetailsLoading && (
<Empty description="Error fetching metric details" />
)}
{!isMetricDetailsLoading && !isMetricDetailsError && (
<div className="metric-details-content">
<div className="metric-details-content-grid">
<div className="labels-row">

View File

@@ -2,4 +2,5 @@ export const METRIC_METADATA_KEYS = {
description: 'Description',
unit: 'Unit',
metric_type: 'Metric Type',
temporality: 'Temporality',
};

View File

@@ -19,7 +19,7 @@ export interface DashboardsAndAlertsPopoverProps {
export interface MetadataProps {
metricName: string;
metadata: MetricDetails['metadata'];
metadata: MetricDetails['metadata'] | undefined;
refetchMetricDetails: () => void;
}

View File

@@ -1,3 +1,10 @@
import { Temporality } from 'api/metricsExplorer/getMetricDetails';
import { MetricType } from 'api/metricsExplorer/getMetricsList';
import { initialQueriesMap } from 'constants/queryBuilder';
import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { Query } from 'types/api/queryBuilder/queryBuilderData';
import { DataSource } from 'types/common/queryBuilder';
export function formatTimestampToReadableDate(timestamp: string): string {
const date = new Date(timestamp);
// Extracting date components
@@ -19,3 +26,59 @@ export function formatNumberToCompactFormat(num: number): string {
maximumFractionDigits: 1,
}).format(num);
}
export function determineIsMonotonic(
metricType: MetricType,
temporality: Temporality,
): boolean {
if (metricType === MetricType.HISTOGRAM) {
return true;
}
if (metricType === MetricType.GAUGE || metricType === MetricType.SUMMARY) {
return false;
}
if (metricType === MetricType.SUM) {
return temporality === Temporality.CUMULATIVE;
}
return false;
}
export function getMetricDetailsQuery(
metricName: string,
filter?: { key: string; value: string },
): Query {
return {
...initialQueriesMap[DataSource.METRICS],
builder: {
queryData: [
{
...initialQueriesMap[DataSource.METRICS].builder.queryData[0],
aggregateAttribute: {
key: metricName,
type: DataTypes.String,
id: `${metricName}----string--`,
},
timeAggregation: 'rate',
spaceAggregation: 'sum',
filters: {
op: 'AND',
items: filter
? [
{
op: '=',
id: filter.key,
value: filter.value,
key: {
key: filter.key,
type: DataTypes.String,
},
},
]
: [],
},
},
],
queryFormulas: [],
},
};
}

View File

@@ -0,0 +1,19 @@
.loading-metrics {
padding: 24px 0;
height: 240px;
display: flex;
justify-content: center;
align-items: flex-start;
.loading-metrics-content {
display: flex;
align-items: flex-start;
flex-direction: column;
.loading-gif {
height: 72px;
margin-left: -24px;
}
}
}

View File

@@ -0,0 +1,24 @@
import './MetricsLoading.styles.scss';
import { Typography } from 'antd';
import { useTranslation } from 'react-i18next';
import { DataSource } from 'types/common/queryBuilder';
export function MetricsLoading(): JSX.Element {
const { t } = useTranslation('common');
return (
<div className="loading-metrics">
<div className="loading-metrics-content">
<img
className="loading-gif"
src="/Icons/loading-plane.gif"
alt="wait-icon"
/>
<Typography>
{t('pending_data_placeholder', { dataSource: DataSource.METRICS })}
</Typography>
</div>
</div>
);
}

View File

@@ -7,7 +7,7 @@ import { useGetMetricsTreeMap } from 'hooks/metricsExplorer/useGetMetricsTreeMap
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
import { useCallback, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
@@ -99,6 +99,15 @@ function Summary(): JSX.Element {
enabled: !!metricsTreemapQuery,
});
// Reset the filters when the component mounts
useEffect(() => {
handleChangeQueryData('filters', {
op: 'AND',
items: [],
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const handleFilterChange = useCallback(
(value: TagFilter) => {
handleChangeQueryData('filters', value);

View File

@@ -4,6 +4,7 @@ import './QueryBuilderSearchV2.styles.scss';
import { Select, Spin, Tag, Tooltip } from 'antd';
import cx from 'classnames';
import {
DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY,
OPERATORS,
QUERY_BUILDER_OPERATORS_BY_TYPES,
QUERY_BUILDER_SEARCH_VALUES,
@@ -737,9 +738,11 @@ function QueryBuilderSearchV2(
values.push(tagValue[tagValue.length - 1]);
} else if (!isEmpty(tagValue)) values.push(tagValue);
values.push(
...(Object.values(attributeValues?.payload || {}).find((el) => !!el) || []),
);
if (attributeValues?.payload) {
const dataType = currentFilterItem?.key?.dataType || DataTypes.String;
const key = DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY[dataType];
values.push(...(attributeValues?.payload?.[key] || []));
}
setDropdownOptions(
values.map((val) => ({

View File

@@ -6,6 +6,7 @@ import { QueryParams } from 'constants/query';
import EmptyLogsSearch from 'container/EmptyLogsSearch/EmptyLogsSearch';
import LogsError from 'container/LogsError/LogsError';
import { LogsLoading } from 'container/LogsLoading/LogsLoading';
import { MetricsLoading } from 'container/MetricsExplorer/MetricsLoading/MetricsLoading';
import NoLogs from 'container/NoLogs/NoLogs';
import { CustomTimeType } from 'container/TopNav/DateTimeSelectionV2/config';
import { TracesLoading } from 'container/TracesExplorer/TraceLoading/TraceLoading';
@@ -131,6 +132,10 @@ function TimeSeriesView({
logEvent('Logs Explorer: Data present', {
panelType: 'TIME_SERIES',
});
} else if (dataSource === DataSource.METRICS) {
logEvent('Metrics Explorer: Data present', {
panelType: 'TIME_SERIES',
});
}
}
}, [isLoading, isError, chartData, dataSource]);
@@ -164,8 +169,9 @@ function TimeSeriesView({
ref={graphRef}
data-testid="time-series-graph"
>
{isLoading &&
(dataSource === DataSource.LOGS ? <LogsLoading /> : <TracesLoading />)}
{isLoading && dataSource === DataSource.LOGS && <LogsLoading />}
{isLoading && dataSource === DataSource.TRACES && <TracesLoading />}
{isLoading && dataSource === DataSource.METRICS && <MetricsLoading />}
{chartData &&
chartData[0] &&

View File

@@ -1,6 +1,7 @@
/* eslint-disable sonarjs/cognitive-complexity */
import { getMetricsListFilterValues } from 'api/metricsExplorer/getMetricsListFilterValues';
import { getAttributesValues } from 'api/queryBuilder/getAttributesValues';
import { DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY } from 'constants/queryBuilder';
import { DEBOUNCE_DELAY } from 'constants/queryBuilderFilterConfig';
import {
K8sCategory,
@@ -16,6 +17,7 @@ import useDebounceValue from 'hooks/useDebounce';
import { cloneDeep, isEqual, uniqWith, unset } from 'lodash-es';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useDebounce } from 'react-use';
import { IAttributeValuesResponse } from 'types/api/queryBuilder/getAttributesValues';
import {
BaseAutocompleteData,
DataTypes,
@@ -157,6 +159,28 @@ export const useFetchKeysAndValues = (
enabled: isMetricsExplorer && isQueryEnabled && !shouldUseSuggestions,
});
function isAttributeValuesResponse(
payload: any,
): payload is IAttributeValuesResponse {
return (
payload &&
(Array.isArray(payload.stringAttributeValues) ||
payload.stringAttributeValues === null ||
Array.isArray(payload.numberAttributeValues) ||
payload.numberAttributeValues === null ||
Array.isArray(payload.boolAttributeValues) ||
payload.boolAttributeValues === null)
);
}
function isMetricsListFilterValuesData(
payload: any,
): payload is { filterValues: string[] } {
return (
payload && 'filterValues' in payload && Array.isArray(payload.filterValues)
);
}
/**
* Fetches the options to be displayed based on the selected value
* @param value - the selected value
@@ -226,8 +250,17 @@ export const useFetchKeysAndValues = (
}
if (payload) {
const values = Object.values(payload).find((el) => !!el) || [];
setResults(values);
if (isAttributeValuesResponse(payload)) {
const dataType = filterAttributeKey?.dataType ?? DataTypes.String;
const key = DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY[dataType];
setResults(key ? payload[key] || [] : []);
return;
}
if (isMetricsExplorer && isMetricsListFilterValuesData(payload)) {
setResults(payload.filterValues || []);
return;
}
}
} catch (e) {
console.error(e);

View File

@@ -236,7 +236,7 @@ describe('dashboard list page', () => {
expect.objectContaining({
id: firstDashboardData.uuid,
title: firstDashboardData.data.title,
createdAt: firstDashboardData.created_at,
createdAt: firstDashboardData.createdAt,
}),
);
});

View File

@@ -34,7 +34,7 @@ export function useGetAllConfigOptions(
});
if (payload) {
const values = Object.values(payload).find((el) => !!el) || [];
const values = payload.stringAttributeValues || [];
const options: DefaultOptionType[] = values.map((val: string) => ({
label: val,
value: val,

View File

@@ -5,12 +5,12 @@ import { TabRoutes } from 'components/RouteTab/types';
import history from 'lib/history';
import { useLocation } from 'react-use';
import { Explorer, Summary, Views } from './constants';
import { Explorer, Summary } from './constants';
function MetricsExplorerPage(): JSX.Element {
const { pathname } = useLocation();
const routes: TabRoutes[] = [Summary, Explorer, Views];
const routes: TabRoutes[] = [Summary, Explorer];
return (
<div className="metrics-explorer-page">

View File

@@ -1,5 +1,6 @@
/* eslint-disable react-hooks/exhaustive-deps */
import { getAttributesValues } from 'api/queryBuilder/getAttributesValues';
import { DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY } from 'constants/queryBuilder';
import { Dispatch, SetStateAction, useEffect, useState } from 'react';
import {
BaseAutocompleteData,
@@ -327,8 +328,9 @@ export function useGetAggregateValues(
});
if (payload) {
const values = Object.values(payload).find((el) => !!el) || [];
setResults(values);
const key =
DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY[keyData.dataType as Partial<DataTypes>];
setResults(key ? payload[key] || [] : []);
}
} catch (e) {
console.error(e);

View File

@@ -0,0 +1,68 @@
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
version: 2
project_name: signoz-community
before:
hooks:
- go mod tidy
builds:
- id: signoz
binary: bin/signoz
main: pkg/query-service/main.go
env:
- CGO_ENABLED=1
- >-
{{- if eq .Os "linux" }}
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
{{- end }}
goos:
- linux
- darwin
goarch:
- amd64
- arm64
goamd64:
- v1
goarm64:
- v8.0
ldflags:
- -s -w
- -X github.com/SigNoz/signoz/pkg/query-service/version.version={{ .Version }}
- -X main.commit={{ .Commit }} -X main.date={{ .CommitDate }}
- -X main.builtBy=goreleaser
- -X go.signoz.io/signoz/pkg/query-service/version.buildVersion={{ .Version }}
- -X go.signoz.io/signoz/pkg/query-service/version.buildHash={{ .ShortCommit }}
- -X go.signoz.io/signoz/pkg/query-service/version.buildTime={{ .Date }}
- -X go.signoz.io/signoz/pkg/query-service/version.gitBranch={{ .Branch }}
- >-
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
mod_timestamp: "{{ .CommitTimestamp }}"
tags:
- timetzdata
archives:
- formats:
- tar.gz
name_template: >-
{{ .ProjectName }}_{{- .Os }}_{{- .Arch }}
wrap_in_directory: true
strip_binary_directory: false
files:
- src: README.md
dst: README.md
- src: LICENSE
dst: LICENSE
- src: frontend/build
dst: web
- src: conf
dst: conf
- src: templates
dst: templates
release:
name_template: "v{{ .Version }}"
draft: false
prerelease: auto

View File

@@ -13,18 +13,24 @@ RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
# set working directory
WORKDIR /root
# copy the query-service binary
COPY pkg/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
# copy the signoz binary
COPY pkg/query-service/bin/signoz-${TARGETOS}-${TARGETARCH} /root/signoz
# copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service
# Make signoz executable for non-root users
RUN chmod 755 /root /root/signoz
# Copy frontend
COPY frontend/build/ /etc/signoz/web/
# Copy frontend
COPY frontend/build/ /etc/signoz/web/
# run the binary
ENTRYPOINT ["./query-service"]
ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]

View File

@@ -1,80 +0,0 @@
# Query Service
Query service is the interface between frontend and databases. It is written in **Golang**. It will have modules for all supported databases. Query service is responsible to:
- parse the request from Frontend
- create relevant Clickhouse queries (and all other supported database queries)
- parse response from databases and handle error if any
- clickhouse response in the format accepted by Frontend
# Complete the clickhouse setup locally.
https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#42-to-run-clickhouse-setup-recommended-for-local-development
- Comment out the query-service and the frontend section in `signoz/deploy/docker/docker-compose.yaml`
- Change the alertmanager section in `signoz/deploy/docker/docker-compose.yaml` as follows:
```console
alertmanager:
image: signoz/alertmanager:0.23.7
volumes:
- ./data/alertmanager:/data
expose:
- "9093"
ports:
- "8080:9093"
# depends_on:
# query-service:
# condition: service_healthy
restart: unless-stopped
command:
- --queryService.url=http://172.17.0.1:8085
- --storage.path=/data
```
- Run the following:
```console
cd deploy/docker
docker compose up -d
```
#### Backend Configuration
- Open ./constants/constants.go
- Replace ```const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db"``` \
with ```const RELATIONAL_DATASOURCE_PATH = "./signoz.db".```
- Query Service needs below `env` variables to run:
```
export ClickHouseUrl=tcp://localhost:9001
export STORAGE=clickhouse
export ALERTMANAGER_API_PREFIX=http://localhost:9093/api/
```
<!-- The above values are the default ones used by SigNoz and are kept at `deploy/kubernetes/platform/signoz-charts/query-service/values.yaml` -->
#### Build and Run locally
```console
cd pkg/query-service
go build -o build/query-service main.go
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service --prefer-delta=true
```
# Frontend Configuration for local query-service.
- Set the following environment variables
```console
export FRONTEND_API_ENDPOINT=http://localhost:8080
```
- Run the following
```console
cd signoz\frontend\
yarn install
yarn dev
```
## Note:
If you use go version 1.18 for development and contributions, then please checkout the following issue.
https://github.com/SigNoz/signoz/issues/1371
#### Docker Images
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service

Some files were not shown because too many files have changed in this diff Show More