Compare commits

..

4 Commits

Author SHA1 Message Date
Nityananda Gohain
9daec90260 Merge branch 'main' into issue_6584 2025-02-11 17:58:38 +05:30
Nityananda Gohain
9cec3b9e4a Merge branch 'develop' into issue_6584 2025-02-11 17:13:34 +05:30
nityanandagohain
2df8d40bcc fix: add comments 2024-12-20 11:04:01 +07:00
nityanandagohain
161f5fd856 fix: handle exists and nexists for mat columns and top level columns 2024-12-19 13:19:02 +07:00
676 changed files with 7160 additions and 38644 deletions

View File

@@ -1,73 +0,0 @@
services:
clickhouse:
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: clickhouse
volumes:
- ${PWD}/fs/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
- ${PWD}/fs/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
- ${PWD}/fs/tmp/var/lib/clickhouse/:/var/lib/clickhouse/
- ${PWD}/fs/tmp/var/lib/clickhouse/user_scripts/:/var/lib/clickhouse/user_scripts/
ports:
- '127.0.0.1:8123:8123'
- '127.0.0.1:9000:9000'
tty: true
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
depends_on:
- zookeeper
zookeeper:
image: bitnami/zookeeper:3.7.1
container_name: zookeeper
volumes:
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
ports:
- '127.0.0.1:2181:2181'
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
schema-migrator-sync:
image: signoz/signoz-schema-migrator:0.111.29
container_name: schema-migrator-sync
command:
- sync
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
image: signoz/signoz-schema-migrator:0.111.29
container_name: schema-migrator-async
command:
- async
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
restart: on-failure

View File

@@ -1,47 +0,0 @@
<clickhouse replace="true">
<logger>
<level>information</level>
<formatting>
<type>json</type>
</formatting>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>3</count>
</logger>
<display_name>cluster</display_name>
<listen_host>0.0.0.0</listen_host>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<user_directories>
<users_xml>
<path>users.xml</path>
</users_xml>
<local_directory>
<path>/var/lib/clickhouse/access/</path>
</local_directory>
</user_directories>
<distributed_ddl>
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<remote_servers>
<cluster>
<shard>
<replica>
<host>clickhouse</host>
<port>9000</port>
</replica>
</shard>
</cluster>
</remote_servers>
<zookeeper>
<node>
<host>zookeeper</host>
<port>2181</port>
</node>
</zookeeper>
<macros>
<shard>01</shard>
<replica>01</replica>
</macros>
</clickhouse>

View File

@@ -1,36 +0,0 @@
<?xml version="1.0"?>
<clickhouse replace="true">
<profiles>
<default>
<max_memory_usage>10000000000</max_memory_usage>
<use_uncompressed_cache>0</use_uncompressed_cache>
<load_balancing>in_order</load_balancing>
<log_queries>1</log_queries>
</default>
</profiles>
<users>
<default>
<profile>default</profile>
<networks>
<ip>::/0</ip>
</networks>
<quota>default</quota>
<access_management>1</access_management>
<named_collection_control>1</named_collection_control>
<show_named_collections>1</show_named_collections>
<show_named_collections_secrets>1</show_named_collections_secrets>
</default>
</users>
<quotas>
<default>
<interval>
<duration>3600</duration>
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@@ -7,6 +7,13 @@ on:
- release/v*
jobs:
check-no-ee-references:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run check
run: make check-no-ee-references
build-frontend:
runs-on: ubuntu-latest
steps:
@@ -14,12 +21,42 @@ jobs:
uses: actions/checkout@v4
- name: Install dependencies
run: cd frontend && yarn install
- name: Build frontend static files
- name: Run ESLint
run: cd frontend && npm run lint
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
shell: bash
run: |
make build-frontend-static
make build-frontend-amd64
build-signoz:
build-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint
run: cd frontend && npm run lint
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
shell: bash
run: |
make build-frontend-amd64
build-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -27,13 +64,17 @@ jobs:
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.22"
- name: Build signoz image
go-version: "1.21"
- name: Run tests
shell: bash
run: |
make build-signoz-amd64
make test
- name: Build query-service image
shell: bash
run: |
make build-query-service-amd64
build-signoz-community:
build-ee-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -41,8 +82,8 @@ jobs:
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.22"
- name: Build signoz community image
go-version: "1.21"
- name: Build EE query-service image
shell: bash
run: |
make build-signoz-community-amd64
make build-ee-query-service-amd64

17
.github/workflows/codeball.yml vendored Normal file
View File

@@ -0,0 +1,17 @@
name: Codeball
on: [pull_request]
jobs:
codeball_job:
runs-on: ubuntu-latest
name: Codeball
steps:
# Run Codeball on all new Pull Requests 🚀
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
- name: Codeball
uses: sturdy-dev/codeball-action@v2
with:
approvePullRequests: "true"
labelPullRequestsWhenApproved: "true"
labelPullRequestsWhenReviewNeeded: "false"
failJobsWhenReviewNeeded: "false"

71
.github/workflows/codeql.yaml vendored Normal file
View File

@@ -0,0 +1,71 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main, v* ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '32 5 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View File

@@ -1,39 +0,0 @@
name: commitci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
refcheck:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: check
run: |
if grep -R --include="*.go" '.*/ee/.*' pkg/; then
echo "Error: Found references to 'ee' packages in 'pkg' directory"
exit 1
else
echo "No references to 'ee' packages found in 'pkg' directory"
fi
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: lint
uses: wagoid/commitlint-github-action@v5

13
.github/workflows/commitlint.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: commitlint
on: [pull_request]
defaults:
run:
working-directory: frontend
jobs:
lint-commits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: wagoid/commitlint-github-action@v5

View File

@@ -0,0 +1,27 @@
on:
pull_request_target:
types:
- closed
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
PR_NUMBER: ${{ github.event.number }}
jobs:
create_issue_on_merge:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
steps:
- name: Checkout Codebase
uses: actions/checkout@v4
with:
repository: signoz/gh-bot
- name: Use Node v16
uses: actions/setup-node@v4
with:
node-version: 16
- name: Setup Cache & Install Dependencies
uses: bahmutov/npm-install@v1
with:
install-command: yarn --frozen-lockfile
- name: Comment on PR
run: node create-issue.js

22
.github/workflows/dependency-review.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
# Dependency Review Action
#
# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
#
# Source repository: https://github.com/actions/dependency-review-action
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
name: 'Dependency Review'
on: [pull_request]
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
with:
fail-on-severity: high
uses: actions/dependency-review-action@v3

93
.github/workflows/e2e-k3s.yaml vendored Normal file
View File

@@ -0,0 +1,93 @@
name: e2e-k3s
on:
pull_request:
types: [labeled]
jobs:
e2e-k3s:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'ok-to-test' }}
env:
DOCKER_TAG: pull-${{ github.event.number }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build query-service image
env:
DEV_BUILD: 1
run: make build-ee-query-service-amd64
- name: Build frontend image
run: make build-frontend-amd64
- name: Create a k3s cluster
uses: AbsaOSS/k3d-action@v2
with:
cluster-name: "signoz"
- name: Inject the images to the cluster
run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz
- name: Set up HotROD sample-app
run: |
# create sample-application namespace
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
- name: Deploy the app
run: |
# add signoz helm repository
helm repo add signoz https://charts.signoz.io
# create platform namespace
kubectl create ns platform
# installing signoz using helm
helm install my-release signoz/signoz -n platform \
--wait \
--timeout 10m0s \
--set frontend.service.type=LoadBalancer \
--set queryService.image.tag=$DOCKER_TAG \
--set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images
kubectl get pods -n platform
kubectl get svc -n platform
- name: Kick off a sample-app workload
run: |
# start the locust swarm
kubectl --namespace sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
id: get-subdomain
run: |
subdomain="pr-$(git rev-parse --short HEAD)"
echo "URL for tunnelling: https://$subdomain.loca.lt"
echo "subdomain=$subdomain" >> $GITHUB_OUTPUT
worker_ip="$(curl -4 -s ipconfig.io/ip)"
echo "Worker node IP address: $worker_ip"
- name: Start tunnel
env:
SUBDOMAIN: ${{ steps.get-subdomain.outputs.subdomain }}
run: |
npm install -g localtunnel
host=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f4)
port=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1)
lt -p $port -l $host -s $SUBDOMAIN

View File

@@ -1,36 +0,0 @@
name: goci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
test:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-test.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
GO_TEST_CONTEXT: ./...
fmt:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-fmt.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-lint.yaml@main
secrets: inherit
with:
PRIMUS_REF: main

View File

@@ -1,155 +0,0 @@
name: gor-signoz-community
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
permissions:
contents: write
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: build-frontend
run: make build-frontend-static
- name: upload-frontend-artifact
uses: actions/upload-artifact@v4
with:
name: community-frontend-build-${{ env.sha_short }}
path: frontend/build
build:
needs: prepare
strategy:
matrix:
os:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
if: matrix.os == 'ubuntu-latest'
- name: setup-buildx
uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-latest'
- name: ghcr-login
uses: docker/login-action@v3
if: matrix.os != 'macos-latest'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: download-frontend-artifact
uses: actions/download-artifact@v4
with:
name: community-frontend-build-${{ env.sha_short }}
path: frontend/build
- name: cache-linux
uses: actions/cache@v4
if: matrix.os == 'ubuntu-latest'
with:
path: dist/linux
key: signoz-community-linux-${{ env.sha_short }}
- name: cache-darwin
uses: actions/cache@v4
if: matrix.os == 'macos-latest'
with:
path: dist/darwin
key: signoz-community-darwin-${{ env.sha_short }}
- name: release
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --config ${{ env.CONFIG_PATH }} --clean --split
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
release:
runs-on: ubuntu-latest
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
WORKDIR: pkg/query-service
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
- name: cosign-installer
uses: sigstore/cosign-installer@v3.8.1
- name: download-syft
uses: anchore/sbom-action/download-syft@v0.18.0
- name: ghcr-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
# copy the caches from build
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: cache-linux
id: cache-linux
uses: actions/cache@v4
with:
path: dist/linux
key: signoz-community-linux-${{ env.sha_short }}
- name: cache-darwin
id: cache-darwin
uses: actions/cache@v4
with:
path: dist/darwin
key: signoz-community-darwin-${{ env.sha_short }}
# release
- uses: goreleaser/goreleaser-action@v6
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
with:
distribution: goreleaser-pro
version: '~> v2'
args: continue --merge
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -1,168 +0,0 @@
name: gor-signoz
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
permissions:
contents: write
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: dotenv-frontend
working-directory: frontend
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > .env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> .env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> .env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> .env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> .env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> .env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> .env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> .env
- name: build-frontend
run: make build-frontend-static
- name: upload-frontend-artifact
uses: actions/upload-artifact@v4
with:
name: frontend-build-${{ env.sha_short }}
path: frontend/build
build:
needs: prepare
strategy:
matrix:
os:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: ee/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
if: matrix.os == 'ubuntu-latest'
- name: setup-buildx
uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-latest'
- name: ghcr-login
uses: docker/login-action@v3
if: matrix.os != 'macos-latest'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: download-frontend-artifact
uses: actions/download-artifact@v4
with:
name: frontend-build-${{ env.sha_short }}
path: frontend/build
- name: cache-linux
uses: actions/cache@v4
if: matrix.os == 'ubuntu-latest'
with:
path: dist/linux
key: signoz-linux-${{ env.sha_short }}
- name: cache-darwin
uses: actions/cache@v4
if: matrix.os == 'macos-latest'
with:
path: dist/darwin
key: signoz-darwin-${{ env.sha_short }}
- name: release
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --config ${{ env.CONFIG_PATH }} --clean --split
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
release:
runs-on: ubuntu-latest
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
- name: cosign-installer
uses: sigstore/cosign-installer@v3.8.1
- name: download-syft
uses: anchore/sbom-action/download-syft@v0.18.0
- name: ghcr-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
# copy the caches from build
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: cache-linux
id: cache-linux
uses: actions/cache@v4
with:
path: dist/linux
key: signoz-linux-${{ env.sha_short }}
- name: cache-darwin
id: cache-darwin
uses: actions/cache@v4
with:
path: dist/darwin
key: signoz-darwin-${{ env.sha_short }}
# release
- uses: goreleaser/goreleaser-action@v6
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
with:
distribution: goreleaser-pro
version: '~> v2'
args: continue --merge
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -1,8 +1,9 @@
name: gor-histogramquantile
name: goreleaser
on:
push:
tags:
- v*
- histogram-quantile/v*
permissions:

View File

@@ -0,0 +1,32 @@
name: Jest Coverage - changed files
on:
pull_request:
branches:
- main
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: "refs/heads/main"
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
- name: Fetch branch
run: git fetch origin ${{ github.event.pull_request.head.ref }}
- run: |
git checkout ${{ github.event.pull_request.head.sha }}
- uses: actions/setup-node@v4
with:
node-version: lts/*
- name: Install dependencies
run: cd frontend && npm install -g yarn && yarn
- name: npm run test:changedsince
run: cd frontend && npm run i18n:generate-hash && npm run test:changedsince

View File

@@ -1,50 +0,0 @@
name: jsci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
tsc:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: install
run: cd frontend && yarn install
- name: tsc
run: cd frontend && yarn tsc
test:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-test.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
fmt:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-fmt.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-lint.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend

24
.github/workflows/playwright.yaml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Playwright Tests
on: [pull_request]
jobs:
playwright:
defaults:
run:
working-directory: frontend
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: "16.x"
- name: Install dependencies
run: CI=1 yarn install
- name: Install Playwright
run: npx playwright install --with-deps
- name: Run Playwright tests
run: yarn playwright
env:
# This might depend on your test-runner/language binding
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}

View File

@@ -1,18 +0,0 @@
name: postci
on:
pull_request_target:
branches:
- main
types:
- synchronize
jobs:
remove:
if: github.event.pull_request.head.repo.fork || github.event.pull_request.user.login == 'dependabot[bot]'
uses: signoz/primus.workflows/.github/workflows/github-label.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
GITHUB_LABEL_ACTION: remove
GITHUB_LABEL_NAME: safe-to-test

View File

@@ -8,27 +8,56 @@ on:
- v*
jobs:
image-build-and-push-signoz:
image-build-and-push-query-service:
runs-on: ubuntu-latest
steps:
- name: checkout
- name: Checkout code
uses: actions/checkout@v4
- name: setup
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.22"
- name: setup-qemu
go-version: "1.21"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: setup-buildx
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: docker-login
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: create-env-file
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-query-service
image-build-and-push-ee-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
@@ -41,94 +70,140 @@ jobs:
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: branch-name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: cross-compilation-tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: publish-signoz
run: make build-push-signoz
- name: qs-docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: publish-query-service
run: |
SIGNOZ_DOCKER_IMAGE=query-service make build-push-signoz
image-build-and-push-signoz-community:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: setup-go
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.22"
- name: setup-qemu
go-version: "1.21"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: setup-buildx
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: docker-login
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: branch-name
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: docker-tag
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: cross-compilation-tools
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: publish-signoz-community
run: make build-push-signoz-community
- name: qs-docker-tag
- name: Build and push docker image
run: make build-push-ee-query-service
image-build-and-push-frontend:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: publish-query-service-oss
- name: Build and push docker image
run: make build-push-frontend
image-build-and-push-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
SIGNOZ_COMMUNITY_DOCKER_IMAGE=query-service make build-push-signoz-community
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend

View File

@@ -8,6 +8,12 @@ jobs:
remove:
runs-on: ubuntu-latest
steps:
- name: Remove label ok-to-test from PR
uses: buildsville/add-remove-label@v2.0.0
with:
label: ok-to-test
type: remove
token: ${{ secrets.GITHUB_TOKEN }}
- name: Remove label testing-deploy from PR
uses: buildsville/add-remove-label@v2.0.0
with:

25
.github/workflows/sonar.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
name: sonar
on:
pull_request:
branches:
- main
paths:
- 'frontend/**'
defaults:
run:
working-directory: frontend
jobs:
sonar-analysis:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Sonar analysis
uses: sonarsource/sonarcloud-github-action@master
with:
projectBaseDir: frontend
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

View File

@@ -49,7 +49,8 @@ jobs:
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-signoz-amd64
make build-ee-query-service-amd64
make build-frontend-amd64
make run-testing
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

View File

@@ -49,7 +49,8 @@ jobs:
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}
git checkout --track origin/${GITHUB_BRANCH}
make build-signoz-amd64
make build-ee-query-service-amd64
make build-frontend-amd64
make run-testing
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

6
.gitignore vendored
View File

@@ -76,9 +76,3 @@ dist/
# ignore user_scripts that is fetched by init-clickhouse
deploy/common/clickhouse/user_scripts/
queries.active
# .devenv tmp files
.devenv/**/tmp/**

View File

@@ -16,8 +16,10 @@ tasks:
yarn dev
ports:
- port: 8080
- port: 3301
onOpen: open-browser
- port: 8080
onOpen: ignore
- port: 9000
onOpen: ignore
- port: 8123

View File

@@ -1,79 +1,389 @@
# Contributing Guidelines
Thank you for your interest in contributing to our project! We greatly value feedback and contributions from our community. This document will guide you through the contribution process.
## Welcome to SigNoz Contributing section 🎉
## How can I contribute?
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
### Finding Issues to Work On
- Check our [existing open issues](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue)
- Look for [good first issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with
- Review [recently closed issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) to avoid duplicates
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
### Types of Contributions
- We accept contributions made to the [SigNoz `develop` branch]()
- Find all SigNoz Docker Hub images here
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
1. **Report Bugs**: Use our [Bug Report template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
2. **Request Features**: Submit using [Feature Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
3. **Improve Documentation**: Create an issue with the `documentation` label
4. **Report Performance Issues**: Use our [Performance Issue template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=)
5. **Request Dashboards**: Submit using [Dashboard Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+)
6. **Report Security Issues**: Follow our [Security Policy](https://github.com/SigNoz/signoz/security/policy)
7. **Join Discussions**: Participate in [project discussions](https://github.com/SigNoz/signoz/discussions)
## Finding contributions to work on 💬
### Creating Helpful Issues
Looking at the existing issues is a great way to find something to contribute on.
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
When creating issues, include:
- **For Feature Requests**:
- Clear use case and requirements
- Proposed solution or improvement
- Any open questions or considerations
## Sections:
- [General Instructions](#1-general-instructions-)
- [For Creating Issue(s)](#11-for-creating-issues)
- [For Pull Requests(s)](#12-for-pull-requests)
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
- [Develop Frontend](#3-develop-frontend-)
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
- [Other Ways to Contribute](#other-ways-to-contribute)
- **For Bug Reports**:
- Step-by-step reproduction steps
- Version information
- Relevant environment details
- Any modifications you've made
- Expected vs actual behavior
# 1. General Instructions 📝
### Submitting Pull Requests
## 1.1 For Creating Issue(s)
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
1. **Development**:
- Setup your [development environment](docs/contributing/development.md)
- Work against the latest `main` branch
- Focus on specific changes
- Ensure all tests pass locally
- Follow our [commit convention](#commit-convention)
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
2. **Submit PR**:
- Ensure your branch can be auto-merged
- Address any CI failures
- Respond to review comments promptly
#### Details like these are incredibly useful:
For substantial changes, please split your contribution into multiple PRs:
- **Requirement** - what kind of use case are you trying to solve?
- **Proposal** - what do you suggest to solve the problem or improve the existing
situation?
- Any open questions to address❓
1. First PR: Overall structure (README, configurations, interfaces)
2. Second PR: Core implementation (split further if needed)
3. Final PR: Documentation updates and end-to-end tests
#### If you are reporting a bug, details like these are incredibly useful:
### Commit Convention
- A reproducible test case or series of steps.
- The version of our code being used.
- Any modifications you've made relevant to the bug🐞.
- Anything unusual about your environment or deployment.
We follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/). All commits and PRs should include type specifiers (e.g., `feat:`, `fix:`, `docs:`, etc.).
Discussing your proposed changes ahead of time will make the contribution
process smooth for everyone 🙌.
## How can I contribute to other repositories?
**[`^top^`](#contributing-guidelines)**
<hr>
You can find other repositories in the [SigNoz](https://github.com/SigNoz) organization to contribute to. Here is a list of **highlighted** repositories:
## 1.2 For Pull Request(s)
- [charts](https://github.com/SigNoz/charts)
- [dashboards](https://github.com/SigNoz/dashboards)
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
Before sending us a pull request, please ensure that,
Each repository has its own contributing guidelines. Please refer to the guidelines of the repository you want to contribute to.
- Fork the SigNoz repo on GitHub, clone it on your machine.
- Create a branch with your changes.
- You are working against the latest source on the `develop` branch.
- Modify the source; please focus only on the specific change you are contributing.
- Ensure local tests pass.
- Commit to your fork using clear commit messages.
- Send us a pull request, answering any default questions in the pull request interface.
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
- Once the change has been approved and merged, we will inform you in a comment.
## How can I get help?
Need assistance? Join our Slack community:
- [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M)
- [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B)
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
## Where do I go from here?
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s):
- Set up your [development environment](docs/contributing/development.md)
* 1⃣ First PR should include the overall structure of the new component:
* Readme, configuration, interfaces or base classes, etc...
* This PR is usually trivial to review, so the size limit does not apply to
it.
* 2⃣ Second PR should include the concrete implementation of the component. If the
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
multiple PRs.
* If there are multiple sub-component then ideally each one should be implemented as
a **separate** pull request.
* Last PR should include changes to **any user-facing documentation.** And should include
end-to-end tests if applicable. The component must be enabled
only after sufficient testing, and there is enough confidence in the
stability and quality of the component.
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack).
### Pointers:
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
<hr>
### Conventions to follow when submitting Commits and Pull Request(s).
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
**[`^top^`](#contributing-guidelines)**
<hr>
# 2. How to Contribute 🙋🏻‍♂️
#### There are primarily 2 areas in which you can contribute to SigNoz
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it.
**[`^top^`](#contributing-guidelines)**
<hr>
# 3. Develop Frontend 🌚
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
## 3.1 Contribute to Frontend with Docker installation of SigNoz
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
![develop-frontend](https://user-images.githubusercontent.com/52788043/179009217-6692616b-17dc-4d27-b587-9d007098d739.jpeg)
- run `cd deploy` to move to deploy directory,
- Install signoz locally **without** the frontend,
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
```
ports:
- "8080:8080"
```
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
- Next run,
```
cd deploy/docker
sudo docker compose up -d
```
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
If you have backend api exposed via frontend nginx:
```
FRONTEND_API_ENDPOINT=http://localhost:3301
```
If not:
```
FRONTEND_API_ENDPOINT=http://localhost:8080
```
- Next,
```
yarn install
yarn dev
```
## 3.2 Contribute to Frontend without installing SigNoz backend
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
- Clone the SigNoz repository and cd into signoz/frontend directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
````
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
- Next,
```
yarn install
yarn dev
```
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
**[`^top^`](#contributing-guidelines)**
<hr>
# 4. Contribute to Backend (Query-Service) 🌑
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
## 4.1 Prerequisites
### 4.1.1 Install SQLite3
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
- If not installed already, Install using below command
- on Linux
- on Debian / Ubuntu
```
sudo apt install sqlite3
```
- on CentOS / Fedora / RedHat
```
sudo yum install sqlite3
```
## 4.2 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- run `sudo make dev-setup` to configure local setup to run query-service,
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
```
ports:
- 9001:9000
```
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
- run `cd pkg/query-service/` to move to `query-service` directory,
- Then, you need to create a `.env` file with the following environment variable
```
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
```
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
#### Run locally,
```
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
```
#### Build and Run locally
```
cd pkg/query-service
go build -o build/query-service main.go
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
```
#### Docker Images
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
```
docker pull signoz/query-service
```
```
docker pull signoz/query-service:latest
```
```
docker pull signoz/query-service:develop
```
### Important Note:
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
Click the button below. A workspace with all required environments will be created.
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/SigNoz/signoz)
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
**[`^top^`](#contributing-guidelines)**
<hr>
# 5. Contribute to SigNoz Helm Chart 📊
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
## 5.1 To run helm chart for local development
- Clone the SigNoz repository and cd into charts directory,
```
git clone https://github.com/SigNoz/charts.git && cd charts
```
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [k3d](https://k3d.io/#installation)
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
- next run,
```
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
```
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
**5.1.2 To load data with the HotROD sample app:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
```
**5.1.3 To stop the load generation:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```
**[`^top^`](#contributing-guidelines)**
---
# 6. Contribute to Dashboards 📈
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
1. Create a dashboard JSON file.
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
3. Include screenshots of the dashboard in the `assets/` directory.
4. Submit a pull request for review.
## Other Ways to Contribute
There are many other ways to get involved with the community and to participate in this project:
- Use the product, submitting GitHub issues when a problem is found.
- Help code review pull requests and participate in issue threads.
- Submit a new feature request as an issue.
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
- Tell others about the project on Twitter, your blog, etc.
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
Thank You!

209
Makefile
View File

@@ -7,10 +7,8 @@ BUILD_VERSION ?= $(shell git describe --always --tags)
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
LICENSE_SIGNOZ_IO ?= https://license.signoz.io/api/v1
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
ZEUS_URL ?= https://api.signoz.cloud
DEV_ZEUS_URL ?= https://api.staging.signoz.cloud
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
# Internal variables or constants.
@@ -20,16 +18,15 @@ EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker
SWARM_DIRECTORY ?= deploy/docker-swarm
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
GORELEASER_BIN ?= goreleaser
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
GOPATH ?= $(shell go env GOPATH)
REPONAME ?= signoz
DOCKER_TAG ?= $(BUILD_VERSION)
SIGNOZ_DOCKER_IMAGE ?= signoz
SIGNOZ_COMMUNITY_DOCKER_IMAGE ?= signoz-community
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
# Build-time Go variables
PACKAGE?=go.signoz.io/signoz
@@ -40,46 +37,10 @@ gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
PROD_LD_FLAGS=-X ${zeusURL}=${ZEUS_URL} -X ${licenseSignozIo}=${LICENSE_SIGNOZ_IO}
DEV_LD_FLAGS=-X ${zeusURL}=${DEV_ZEUS_URL} -X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
##############################################################
# common commands
##############################################################
.PHONY: help
help: ## Displays help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n\nTargets:\n"} /^[a-z0-9A-Z_-]+:.*?##/ { printf " \033[36m%-40s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST)
##############################################################
# devenv commands
##############################################################
.PHONY: devenv-clickhouse
devenv-clickhouse: ## Run clickhouse in devenv
@cd .devenv/docker/clickhouse; \
docker compose -f compose.yaml up -d
##############################################################
# run commands
##############################################################
.PHONY: run-go
run-go: ## Runs the go backend server
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
SIGNOZ_WEB_ENABLED=false \
SIGNOZ_JWT_SECRET=secret \
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \
./ee/query-service/main.go \
--config ./pkg/query-service/config/prometheus.yml \
--cluster cluster \
--use-logs-new-schema true \
--use-trace-new-schema true
all: build-push-frontend build-push-signoz
all: build-push-frontend build-push-query-service
# Steps to build static files of frontend
build-frontend-static:
@@ -92,67 +53,95 @@ build-frontend-static:
yarn build && \
ls -l build
# Steps to build static binary of signoz
.PHONY: build-signoz-static
build-signoz-static:
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline)
build-frontend-amd64: build-frontend-static
@echo "------------------"
@echo "--> Building signoz static binary"
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend: build-frontend-static
@echo "------------------"
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build static binary of query service
.PHONY: build-query-service-static
build-query-service-static:
@echo "------------------"
@echo "--> Building query-service static binary"
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${PROD_LD_FLAGS}"; \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
fi
.PHONY: build-signoz-static-amd64
build-signoz-static-amd64:
make GOARCH=amd64 build-signoz-static
.PHONY: build-query-service-static-amd64
build-query-service-static-amd64:
make GOARCH=amd64 build-query-service-static
.PHONY: build-signoz-static-arm64
build-signoz-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-signoz-static
.PHONY: build-query-service-static-arm64
build-query-service-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
# Steps to build static binary of signoz for all platforms
.PHONY: build-signoz-static-all
build-signoz-static-all: build-signoz-static-amd64 build-signoz-static-arm64 build-frontend-static
# Steps to build static binary of query service for all platforms
.PHONY: build-query-service-static-all
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
# Steps to build and push docker image of signoz
.PHONY: build-signoz-amd64 build-push-signoz
# Step to build docker image of signoz in amd64 (used in build pipeline)
build-signoz-amd64: build-signoz-static-amd64 build-frontend-static
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
@echo "------------------"
@echo "--> Building signoz docker image for amd64"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) \
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-signoz: build-signoz-static-all
build-push-query-service: build-query-service-static-all
@echo "------------------"
@echo "--> Building and pushing signoz docker image"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
--push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) .
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Step to build docker image of signoz community in amd64 (used in build pipeline)
build-signoz-community-amd64:
# Step to build EE docker image of query service in amd64 (used in build pipeline)
build-ee-query-service-amd64:
@echo "------------------"
@echo "--> Building signoz docker image for amd64"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-signoz-amd64
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
# Step to build and push docker image of signoz community in amd64 and arm64 (used in push pipeline)
build-push-signoz-community:
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
build-push-ee-query-service:
@echo "------------------"
@echo "--> Building and pushing signoz community docker image"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
dev-setup:
mkdir -p /var/lib/signoz
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
mkdir -p pkg/query-service/config/dashboards
@echo "------------------"
@echo "--> Local Setup completed"
@echo "------------------"
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-push-signoz
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
@@ -166,6 +155,22 @@ run-testing:
down-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-standalone-ch:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
clear-swarm-ch:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
check-no-ee-references:
@echo "Checking for 'ee' package references in 'pkg' directory..."
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
@@ -178,42 +183,14 @@ check-no-ee-references:
test:
go test ./pkg/...
########################################################
# Goreleaser
########################################################
.PHONY: gor-snapshot gor-snapshot-histogram-quantile gor-snapshot-signoz gor-snapshot-signoz-community gor-split gor-split-histogram-quantile gor-split-signoz gor-split-signoz-community gor-merge
gor-snapshot:
goreleaser-snapshot:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --snapshot; \
cd ${GORELEASER_WORKDIR} && \
goreleaser release --clean --snapshot; \
cd -; \
else \
${GORELEASER_BIN} release --clean --snapshot; \
goreleaser release --clean --snapshot; \
fi
gor-snapshot-histogram-quantile:
goreleaser-snapshot-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot
gor-snapshot-signoz: build-frontend-static
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
gor-snapshot-signoz-community: build-frontend-static
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
gor-split:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --split; \
else \
${GORELEASER_BIN} release --clean --split; \
fi
gor-split-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-split
gor-split-signoz: build-frontend-static
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-split
gor-split-signoz-community: build-frontend-static
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-split
gor-merge:
${GORELEASER_BIN} continue --merge

View File

@@ -219,18 +219,12 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
- [Shivanshu Raj Shrivastava](https://github.com/shivanshuraj1333)
- [Ekansh Gupta](https://github.com/eKuG)
- [Aniket Agarwal](https://github.com/aniketio-ctrl)
#### Frontend
- [Yunus M](https://github.com/YounixM)
- [Vikrant Gupta](https://github.com/vikrantgupta25)
- [Sagar Rajput](https://github.com/SagarRajput-7)
- [Shaheer Kochai](https://github.com/ahmadshaheer)
- [Amlan Kumar Nandy](https://github.com/amlannandy)
- [Sahil Khan](https://github.com/sawhil)
#### DevOps

View File

@@ -1,4 +0,0 @@
provider: "inmemory"
inmemory:
ttl: 60m
cleanupInterval: 10m

View File

@@ -70,74 +70,26 @@ sqlstore:
##################### APIServer #####################
apiserver:
timeout:
# Default request timeout.
default: 60s
# Maximum request timeout.
max: 600s
# List of routes to exclude from request timeout.
excluded_routes:
- /api/v1/logs/tail
- /api/v3/logs/livetail
logging:
# List of routes to exclude from request responselogging.
excluded_routes:
- /api/v1/health
##################### TelemetryStore #####################
telemetrystore:
# Specifies the telemetrystore provider to use.
# specifies the telemetrystore provider to use.
provider: clickhouse
clickhouse:
# The DSN to use for ClickHouse.
dsn: http://localhost:9000
# Maximum number of idle connections in the connection pool.
max_idle_conns: 50
# Maximum number of open connections to the database.
max_open_conns: 100
# Maximum time to wait for a connection to be established.
dial_timeout: 5s
clickhouse:
# The DSN to use for ClickHouse.
dsn: http://localhost:9000
##################### Alertmanager #####################
alertmanager:
# Specifies the alertmanager provider to use.
provider: legacy
legacy:
# The API URL (with prefix) of the legacy Alertmanager instance.
api_url: http://localhost:9093/api
signoz:
# The poll interval for periodically syncing the alertmanager with the config in the store.
poll_interval: 1m
# The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself.
external_url: http://localhost:9093
# The global configuration for the alertmanager. All the exahustive fields can be found in the upstream: https://github.com/prometheus/alertmanager/blob/efa05feffd644ba4accb526e98a8c6545d26a783/config/config.go#L833
global:
# ResolveTimeout is the time after which an alert is declared resolved if it has not been updated.
resolve_timeout: 5m
route:
# GroupByStr is the list of labels to group alerts by.
group_by:
- alertname
# GroupInterval is the interval at which alerts are grouped.
group_interval: 1m
# GroupWait is the time to wait before sending alerts to receivers.
group_wait: 1m
# RepeatInterval is the interval at which alerts are repeated.
repeat_interval: 1h
alerts:
# Interval between garbage collection of alerts.
gc_interval: 30m
silences:
# Maximum number of silences, including expired silences. If negative or zero, no limit is set.
max: 0
# Maximum size of the silences in bytes. If negative or zero, no limit is set.
max_size_bytes: 0
# Interval between garbage collection and snapshotting of the silences. The snapshot will be stored in the state store.
maintenance_interval: 15m
# Retention of the silences.
retention: 120h
nflog:
# Interval between garbage collection and snapshotting of the notification logs. The snapshot will be stored in the state store.
maintenance_interval: 15m
# Retention of the notification logs.
retention: 120h
dial_timeout: 5s

View File

@@ -1,25 +0,0 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- 127.0.0.1:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: []
remote_read:
- url: tcp://localhost:9000/signoz_metrics

View File

@@ -26,7 +26,7 @@ cd deploy/docker
docker compose up -d
```
Open http://localhost:8080 in your favourite browser.
Open http://localhost:3301 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:
@@ -55,7 +55,7 @@ cd deploy/docker-swarm
docker stack deploy -c docker-compose.yaml signoz
```
Open http://localhost:8080 in your favourite browser.
Open http://localhost:3301 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:

View File

@@ -0,0 +1,64 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
proxy_pass http://query-service:8080;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
}
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
location /ws {
proxy_pass http://query-service:8080/ws;
proxy_http_version 1.1;
proxy_set_header Upgrade "websocket";
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -1 +1 @@
server_endpoint: ws://signoz:4320/v1/opamp
server_endpoint: ws://query-service:4320/v1/opamp

View File

@@ -76,9 +76,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
deploy:
restart_policy:
condition: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -172,9 +169,19 @@ services:
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- ./clickhouse-setup/data/alertmanager:/data
depends_on:
- query-service
query-service:
!!merge <<: *db-depend
image: signoz/signoz:v0.76.0
image: signoz/query-service:0.71.0
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
@@ -187,15 +194,14 @@ services:
- ../common/dashboards:/root/config/dashboards
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- SIGNOZ_JWT_SECRET=secret
healthcheck:
test:
- CMD
@@ -206,9 +212,19 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.71.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.30
image: signoz/signoz-otel-collector:0.111.26
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -229,10 +245,10 @@ services:
depends_on:
- clickhouse
- schema-migrator
- signoz
- query-service
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.30
image: signoz/signoz-schema-migrator:0.111.24
deploy:
restart_policy:
condition: on-failure
@@ -247,6 +263,8 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:

View File

@@ -73,9 +73,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
deploy:
restart_policy:
condition: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -108,9 +105,19 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
- query-service
query-service:
!!merge <<: *db-depend
image: signoz/signoz:v0.76.0
image: signoz/query-service:0.71.0
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
@@ -123,8 +130,8 @@ services:
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -141,9 +148,19 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.71.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.30
image: signoz/signoz-otel-collector:0.111.26
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -164,10 +181,10 @@ services:
depends_on:
- clickhouse
- schema-migrator
- signoz
- query-service
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.30
image: signoz/signoz-schema-migrator:0.111.24
deploy:
restart_policy:
condition: on-failure
@@ -182,6 +199,8 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -42,7 +42,7 @@ receivers:
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz_(logspout|signoz|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
processors:
batch:
send_batch_size: 10000

View File

@@ -66,7 +66,7 @@ exporters:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
signozclickhousemetrics:
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
@@ -90,11 +90,11 @@ service:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, signozclickhousemetrics]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp]
processors: [batch]

View File

@@ -2,7 +2,7 @@ version: "3"
x-common: &common
networks:
- signoz-net
restart: unless-stopped
restart: on-failure
logging:
options:
max-size: 50m
@@ -79,7 +79,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
restart: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -175,24 +174,36 @@ services:
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse-3:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/signoz:${DOCKER_TAG:-v0.76.0}
container_name: signoz
image: signoz/query-service:${DOCKER_TAG:-0.71.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# ports:
# - "3301:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -209,10 +220,21 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.71.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.26}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -234,11 +256,11 @@ services:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
signoz:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
@@ -249,17 +271,18 @@ services:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
restart: on-failure
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:

View File

@@ -2,7 +2,7 @@ version: "3"
x-common: &common
networks:
- signoz-net
restart: unless-stopped
restart: on-failure
logging:
options:
max-size: 50m
@@ -75,7 +75,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
restart: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -108,10 +107,22 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/signoz:${DOCKER_TAG:-v0.76.0}
container_name: signoz
image: signoz/query-service:${DOCKER_TAG:-0.71.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --gateway-url=https://api.staging.signoz.cloud
@@ -125,8 +136,8 @@ services:
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -144,9 +155,20 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.71.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.26}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -164,11 +186,11 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
signoz:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
@@ -177,20 +199,20 @@ services:
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
restart: on-failure
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -2,7 +2,7 @@ version: "3"
x-common: &common
networks:
- signoz-net
restart: unless-stopped
restart: on-failure
logging:
options:
max-size: 50m
@@ -75,7 +75,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
restart: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -108,24 +107,36 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/signoz:${DOCKER_TAG:-v0.76.0}
container_name: signoz
image: signoz/query-service:${DOCKER_TAG:-0.71.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# ports:
# - "3301:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -142,9 +153,20 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.71.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.26}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -162,11 +184,11 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
signoz:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
@@ -175,20 +197,20 @@ services:
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.30}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
restart: on-failure
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -8,7 +8,7 @@ x-common: &common
options:
max-size: 50m
max-file: "3"
restart: unless-stopped
restart: on-failure
services:
hotrod:
<<: *common

View File

@@ -8,7 +8,7 @@ x-common: &common
options:
max-size: 50m
max-file: "3"
restart: unless-stopped
restart: on-failure
services:
otel-agent:
<<: *common

View File

@@ -79,7 +79,7 @@ receivers:
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz|(signoz-(|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
processors:
batch:
send_batch_size: 10000

View File

@@ -66,7 +66,7 @@ exporters:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
signozclickhousemetrics:
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
@@ -90,11 +90,11 @@ service:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, signozclickhousemetrics]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp]
processors: [batch]

View File

@@ -127,7 +127,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="8080|4317"
local ports_pattern="3301|4317"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@@ -144,7 +144,7 @@ check_ports_occupied() {
send_event "port_not_available"
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 8080 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/"
echo "++++++++++++++++++++++++++++++++++++++++"
echo ""
@@ -248,7 +248,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:8080/api/v1/health?live=1" || true)"
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
if [[ status_code -eq 200 ]]; then
break
else
@@ -484,7 +484,7 @@ pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
# check for open ports, if signoz is not installed
if is_command_present docker-compose; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz" | grep -q "healthy" > /dev/null 2>&1; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
echo "SigNoz already installed, skipping the occupied ports check"
else
check_ports_occupied
@@ -533,7 +533,7 @@ else
echo ""
echo "🟢 Your installation is complete!"
echo ""
echo -e "🟢 SigNoz is running on http://localhost:8080"
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"

View File

@@ -1,95 +0,0 @@
# Development Guide
Welcome! This guide will help you set up your local development environment for SigNoz. Let's get you started! 🚀
## What do I need?
Before diving in, make sure you have these tools installed:
- **Git** - Our version control system
- Download from [git-scm.com](https://git-scm.com/)
- **Go** - Powers our backend
- Download from [go.dev/dl](https://go.dev/dl/)
- Check [go.mod](../../go.mod#L3) for the minimum version
- **GCC** - Required for CGO dependencies
- Download from [gcc.gnu.org](https://gcc.gnu.org/)
- **Node** - Powers our frontend
- Download from [nodejs.org](https://nodejs.org)
- Check [.nvmrc](../../frontend/.nvmrc) for the version
- **Yarn** - Our frontend package manager
- Follow the [installation guide](https://yarnpkg.com/getting-started/install)
- **Docker** - For running Clickhouse and Postgres locally
- Get it from [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)
> 💡 **Tip**: Run `make help` to see all available commands with descriptions
## How do I get the code?
1. Open your terminal
2. Clone the repository:
```bash
git clone https://github.com/SigNoz/signoz.git
```
3. Navigate to the project:
```bash
cd signoz
```
## How do I run it locally?
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
### 1. Setting up Clickhouse
First, we need to get Clickhouse running:
```bash
make devenv-clickhouse
```
This command:
- Starts Clickhouse in a single-shard, single-replica cluster
- Sets up Zookeeper
- Runs the latest schema migrations
### 2. Starting the Backend
1. Run the backend server:
```bash
make run-go
```
2. Verify it's working:
```bash
curl http://localhost:8080/api/v1/health
```
You should see: `{"status":"ok"}`
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
### 3. Setting up the Frontend
1. Install dependencies:
```bash
yarn install
```
2. Create a `.env` file in the `frontend` directory:
```env
FRONTEND_API_ENDPOINT=http://localhost:8080
```
3. Start the development server:
```bash
yarn dev
```
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
Now you're all set to start developing! Happy coding! 🎉

14
e2e/package.json Normal file
View File

@@ -0,0 +1,14 @@
{
"name": "e2e",
"version": "1.0.0",
"main": "index.js",
"license": "MIT",
"devDependencies": {
"@playwright/test": "^1.22.0",
"@types/node": "^20.9.2"
},
"scripts": {},
"dependencies": {
"dotenv": "8.2.0"
}
}

46
e2e/playwright.config.ts Normal file
View File

@@ -0,0 +1,46 @@
import { defineConfig, devices } from "@playwright/test";
import dotenv from "dotenv";
dotenv.config();
export default defineConfig({
testDir: "./tests",
fullyParallel: true,
forbidOnly: !!process.env.CI,
name: "Signoz E2E",
retries: process.env.CI ? 2 : 0,
reporter: process.env.CI ? "github" : "list",
preserveOutput: "always",
updateSnapshots: "all",
quiet: false,
testMatch: ["**/*.spec.ts"],
use: {
trace: "on-first-retry",
baseURL:
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
},
projects: [
{ name: "setup", testMatch: /.*\.setup\.ts/ },
{
name: "chromium",
use: {
...devices["Desktop Chrome"],
// Use prepared auth state.
storageState: ".auth/user.json",
},
dependencies: ["setup"],
},
],
});

37
e2e/tests/auth.setup.ts Normal file
View File

@@ -0,0 +1,37 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import dotenv from "dotenv";
dotenv.config();
const authFile = ".auth/user.json";
test("E2E Login Test", async ({ page }) => {
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
const signup = "Monitor your applications. Find what is causing issues.";
const el = await page.locator(`text=${signup}`);
expect(el).toBeVisible();
await page
.locator("id=loginEmail")
.type(
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
);
await page.getByText("Next").click();
await page
.locator('input[id="currentPassword"]')
.fill(
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
);
await page.locator('button[data-attr="signup"]').click();
await expect(page).toHaveURL(ROUTES.APPLICATION);
await page.context().storageState({ path: authFile });
});

10
e2e/tests/contants.ts Normal file
View File

@@ -0,0 +1,10 @@
export const SERVICE_TABLE_HEADERS = {
APPLICATION: "Applicaton",
P99LATENCY: "P99 latency (in ms)",
ERROR_RATE: "Error Rate (% of total)",
OPS_PER_SECOND: "Operations Per Second",
};
export const DATA_TEST_IDS = {
NEW_DASHBOARD_BTN: "create-new-dashboard",
};

View File

@@ -0,0 +1,40 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
test("Basic Navigation Check across different resources", async ({ page }) => {
// route to services page and check if the page renders fine with BE contract
await Promise.all([
page.goto(ROUTES.APPLICATION),
page.waitForRequest("**/v1/services"),
]);
const p99Latency = page.locator(
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
);
await expect(p99Latency).toBeVisible();
// route to the new trace explorer page and check if the page renders fine
await page.goto(ROUTES.TRACES_EXPLORER);
await page.waitForLoadState("networkidle");
const listViewTable = await page
.locator('div[role="presentation"]')
.isVisible();
expect(listViewTable).toBeTruthy();
// route to the dashboards page and check if the page renders fine
await Promise.all([
page.goto(ROUTES.ALL_DASHBOARD),
page.waitForRequest("**/v1/dashboards"),
]);
const newDashboardBtn = await page
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
.isVisible();
expect(newDashboardBtn).toBeTruthy();
});

46
e2e/yarn.lock Normal file
View File

@@ -0,0 +1,46 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@playwright/test@^1.22.0":
version "1.40.0"
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
dependencies:
playwright "1.40.0"
"@types/node@^20.9.2":
version "20.9.2"
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
dependencies:
undici-types "~5.26.4"
dotenv@8.2.0:
version "8.2.0"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
fsevents@2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
playwright-core@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
playwright@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
dependencies:
playwright-core "1.40.0"
optionalDependencies:
fsevents "2.3.2"
undici-types@~5.26.4:
version "5.26.5"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==

View File

@@ -1,36 +0,0 @@
package middleware
import (
"net/http"
"go.signoz.io/signoz/pkg/types/authtypes"
)
type Pat struct {
uuid *authtypes.UUID
headers []string
}
func NewPat(headers []string) *Pat {
return &Pat{uuid: authtypes.NewUUID(), headers: headers}
}
func (p *Pat) Wrap(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var values []string
for _, header := range p.headers {
values = append(values, r.Header.Get(header))
}
ctx, err := p.uuid.ContextFromRequest(r.Context(), values...)
if err != nil {
next.ServeHTTP(w, r)
return
}
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}

View File

@@ -1,70 +0,0 @@
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
version: 2
project_name: signoz
before:
hooks:
- go mod tidy
builds:
- id: signoz
binary: bin/signoz
main: ee/query-service/main.go
env:
- CGO_ENABLED=1
- >-
{{- if eq .Os "linux" }}
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
{{- end }}
goos:
- linux
- darwin
goarch:
- amd64
- arm64
goamd64:
- v1
goarm64:
- v8.0
ldflags:
- -s -w
- -X github.com/SigNoz/signoz/pkg/query-service/version.version={{ .Version }}
- -X main.commit={{ .Commit }} -X main.date={{ .CommitDate }}
- -X main.builtBy=goreleaser
- -X go.signoz.io/signoz/pkg/query-service/version.buildVersion={{ .Version }}
- -X go.signoz.io/signoz/pkg/query-service/version.buildHash={{ .ShortCommit }}
- -X go.signoz.io/signoz/pkg/query-service/version.buildTime={{ .Date }}
- -X go.signoz.io/signoz/pkg/query-service/version.gitBranch={{ .Branch }}
- -X go.signoz.io/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
- -X go.signoz.io/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
- >-
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
mod_timestamp: "{{ .CommitTimestamp }}"
tags:
- timetzdata
archives:
- formats:
- tar.gz
name_template: >-
{{ .ProjectName }}_{{- .Os }}_{{- .Arch }}
wrap_in_directory: true
strip_binary_directory: false
files:
- src: README.md
dst: README.md
- src: LICENSE
dst: LICENSE
- src: frontend/build
dst: web
- src: conf
dst: conf
- src: templates
dst: templates
release:
name_template: "v{{ .Version }}"
draft: false
prerelease: auto

View File

@@ -13,21 +13,21 @@ RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
# set working directory
WORKDIR /root
# copy the signoz binary
COPY ee/query-service/bin/signoz-${TARGETOS}-${TARGETARCH} /root/signoz
# copy the query-service binary
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
# copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
COPY pkg/query-service/templates /root/templates
# Make signoz executable for non-root users
RUN chmod 755 /root /root/signoz
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service
# Copy frontend
COPY frontend/build/ /etc/signoz/web/
# run the binary
ENTRYPOINT ["./signoz"]
ENTRYPOINT ["./query-service"]
CMD ["-config", "/root/config/prometheus.yml"]

View File

@@ -11,7 +11,6 @@ import (
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
"go.signoz.io/signoz/pkg/alertmanager"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
@@ -21,8 +20,6 @@ import (
basemodel "go.signoz.io/signoz/pkg/query-service/model"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/version"
"go.signoz.io/signoz/pkg/signoz"
"go.signoz.io/signoz/pkg/types/authtypes"
)
type APIHandlerOptions struct {
@@ -44,7 +41,6 @@ type APIHandlerOptions struct {
FluxInterval time.Duration
UseLogsNewSchema bool
UseTraceNewSchema bool
JWT *authtypes.JWT
}
type APIHandler struct {
@@ -53,7 +49,7 @@ type APIHandler struct {
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
@@ -69,8 +65,6 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
Signoz: signoz,
})
if err != nil {

View File

@@ -50,7 +50,7 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
}
// if all looks good, call auth
resp, err := baseauth.Login(ctx, &req, ah.opts.JWT)
resp, err := baseauth.Login(ctx, &req)
if ah.HandleError(w, err, http.StatusUnauthorized) {
return
}
@@ -134,7 +134,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
return
}
_, registerError := baseauth.Register(ctx, req, ah.Signoz.Alertmanager)
_, registerError := baseauth.Register(ctx, req)
if !registerError.IsNil() {
RespondError(w, apierr, nil)
return
@@ -253,7 +253,7 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email, ah.opts.JWT)
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
if err != nil {
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
@@ -331,7 +331,7 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
if err != nil {
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)

View File

@@ -18,7 +18,6 @@ import (
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/dao"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/types"
"go.uber.org/zap"
)
@@ -38,7 +37,7 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
return
}
currentUser, err := auth.GetUserFromReqContext(r.Context())
currentUser, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, basemodel.UnauthorizedError(fmt.Errorf(
"couldn't deduce current user: %w", err,
@@ -46,7 +45,7 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
return
}
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgID, cloudProvider)
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgId, cloudProvider)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't provision PAT for cloud integration:",
@@ -121,11 +120,11 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
allPats, err := ah.AppDao().ListPATs(ctx)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't list PATs: %w", err,
"couldn't list PATs: %w", err.Error(),
))
}
for _, p := range allPats {
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
if p.UserID == integrationUser.Id && p.Name == integrationPATName {
return p.Token, nil
}
}
@@ -137,7 +136,7 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
newPAT := model.PAT{
Token: generatePATToken(),
UserID: integrationUser.ID,
UserID: integrationUser.Id,
Name: integrationPATName,
Role: baseconstants.ViewerGroup,
ExpiresAt: 0,
@@ -147,7 +146,7 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
integrationPAT, err := ah.AppDao().CreatePAT(ctx, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
"couldn't create cloud integration PAT: %w", err.Error(),
))
}
return integrationPAT.Token, nil
@@ -155,7 +154,7 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
ctx context.Context, orgId string, cloudProvider string,
) (*types.User, *basemodel.ApiError) {
) (*basemodel.User, *basemodel.ApiError) {
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
@@ -172,21 +171,19 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
zap.String("cloudProvider", cloudProvider),
)
newUser := &types.User{
ID: cloudIntegrationUserId,
Name: fmt.Sprintf("%s integration", cloudProvider),
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
},
OrgID: orgId,
newUser := &basemodel.User{
Id: cloudIntegrationUserId,
Name: fmt.Sprintf("%s integration", cloudProvider),
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
CreatedAt: time.Now().Unix(),
OrgId: orgId,
}
viewerGroup, apiErr := dao.DB().GetGroupByName(ctx, baseconstants.ViewerGroup)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't get viewer group for creating integration user")
}
newUser.GroupID = viewerGroup.ID
newUser.GroupId = viewerGroup.Id
passwordHash, err := auth.PasswordHash(uuid.NewString())
if err != nil {

View File

@@ -1,15 +1,15 @@
package api
import (
"errors"
"net/http"
"strings"
"github.com/gorilla/mux"
"go.signoz.io/signoz/pkg/errors"
"go.signoz.io/signoz/pkg/http/render"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/types/authtypes"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/model"
)
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
@@ -31,31 +31,26 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
// Get the dashboard UUID from the request
uuid := mux.Vars(r)["uuid"]
if strings.HasPrefix(uuid, "integration") {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "dashboards created by integrations cannot be modified"))
return
if strings.HasPrefix(uuid,"integration") {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: errors.New("dashboards created by integrations cannot be unlocked")}, "You are not authorized to lock/unlock this dashboard")
return
}
claims, ok := authtypes.ClaimsFromContext(r.Context())
if !ok {
render.Error(w, errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated"))
return
}
dashboard, err := dashboards.GetDashboard(r.Context(), claims.OrgID, uuid)
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
if err != nil {
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to get dashboard"))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
return
}
if !auth.IsAdminV2(claims) && (dashboard.CreatedBy != claims.Email) {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "You are not authorized to lock/unlock this dashboard"))
user := common.GetUserFromContext(r.Context())
if !auth.IsAdmin(user) && (dashboard.CreateBy != nil && *dashboard.CreateBy != user.Email) {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: err}, "You are not authorized to lock/unlock this dashboard")
return
}
// Lock/Unlock the dashboard
err = dashboards.LockUnlockDashboard(r.Context(), claims.OrgID, uuid, lock)
err = dashboards.LockUnlockDashboard(r.Context(), uuid, lock)
if err != nil {
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to lock/unlock dashboard"))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
return
}

View File

@@ -34,7 +34,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
RespondError(w, model.BadRequest(err), nil)
return
}
user, err := auth.GetUserFromReqContext(r.Context())
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
@@ -54,7 +54,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
}
// All the PATs are associated with the user creating the PAT.
pat.UserID = user.ID
pat.UserID = user.Id
pat.CreatedAt = time.Now().Unix()
pat.UpdatedAt = time.Now().Unix()
pat.LastUsed = 0
@@ -97,7 +97,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
return
}
user, err := auth.GetUserFromReqContext(r.Context())
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
@@ -112,7 +112,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
return
}
req.UpdatedByUserID = user.ID
req.UpdatedByUserID = user.Id
id := mux.Vars(r)["id"]
req.UpdatedAt = time.Now().Unix()
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
@@ -127,7 +127,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
user, err := auth.GetUserFromReqContext(r.Context())
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
@@ -135,7 +135,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
}, nil)
return
}
zap.L().Info("Get PATs for user", zap.String("user_id", user.ID))
zap.L().Info("Get PATs for user", zap.String("user_id", user.Id))
pats, apierr := ah.AppDao().ListPATs(ctx)
if apierr != nil {
RespondError(w, apierr, nil)
@@ -147,7 +147,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
id := mux.Vars(r)["id"]
user, err := auth.GetUserFromReqContext(r.Context())
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
@@ -157,7 +157,7 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
}
zap.L().Info("Revoke PAT with id", zap.String("id", id))
if apierr := ah.AppDao().RevokePAT(ctx, id, user.ID); apierr != nil {
if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil {
RespondError(w, apierr, nil)
return
}

View File

@@ -1,20 +1,25 @@
package app
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
_ "net/http/pprof" // http profiler
"regexp"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/jmoiron/sqlx"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
eemiddleware "go.signoz.io/signoz/ee/http/middleware"
"go.signoz.io/signoz/ee/query-service/app/api"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/auth"
@@ -23,12 +28,10 @@ import (
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/rules"
"go.signoz.io/signoz/pkg/alertmanager"
"go.signoz.io/signoz/pkg/http/middleware"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/signoz"
"go.signoz.io/signoz/pkg/sqlstore"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
"go.signoz.io/signoz/pkg/web"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
@@ -47,6 +50,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/cache"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck"
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
@@ -76,7 +80,6 @@ type ServerOptions struct {
GatewayUrl string
UseLogsNewSchema bool
UseTraceNewSchema bool
Jwt *authtypes.JWT
}
// Server runs HTTP api service
@@ -107,12 +110,12 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
// NewServer creates and initializes Server
func NewServer(serverOptions *ServerOptions) (*Server, error) {
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore)
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
@@ -120,7 +123,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
@@ -130,7 +133,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
// initiate license manager
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore.BunDB())
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
@@ -177,8 +180,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
<-readerReady
rm, err := makeRulesManager(
serverOptions.PromConfigPath,
rm, err := makeRulesManager(serverOptions.PromConfigPath,
baseconst.GetAlertManagerApiPrefix(),
serverOptions.RuleRepoURL,
serverOptions.SigNoz.SQLStore.SQLxDB(),
reader,
@@ -187,8 +190,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
lm,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
serverOptions.SigNoz.Alertmanager,
serverOptions.SigNoz.SQLStore,
)
if err != nil {
@@ -201,14 +202,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore)
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, fmt.Errorf(
"couldn't create integrations controller: %w", err,
)
}
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore)
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, fmt.Errorf(
"couldn't create cloud provider integrations controller: %w", err,
@@ -268,10 +269,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
GatewayUrl: serverOptions.GatewayUrl,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
JWT: serverOptions.Jwt,
}
apiHandler, err := api.NewAPIHandler(apiOpts, serverOptions.SigNoz)
apiHandler, err := api.NewAPIHandler(apiOpts)
if err != nil {
return nil, err
}
@@ -311,8 +311,6 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
r := baseapp.NewRouter()
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
@@ -344,14 +342,14 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
r := baseapp.NewRouter()
// add auth middleware
getUserFromRequest := func(ctx context.Context) (*types.GettableUser, error) {
user, err := auth.GetUserFromRequestContext(ctx, apiHandler)
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
user, err := auth.GetUserFromRequest(r, apiHandler)
if err != nil {
return nil, err
}
if user.User.OrgID == "" {
if user.User.OrgId == "" {
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
}
@@ -359,8 +357,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
}
am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
@@ -378,9 +374,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
apiHandler.RegisterQueryRangeV4Routes(r, am)
apiHandler.RegisterWebSocketPaths(r, am)
apiHandler.RegisterMessagingQueuesRoutes(r, am)
apiHandler.RegisterThirdPartyApiRoutes(r, am)
apiHandler.MetricExplorerRoutes(r, am)
apiHandler.RegisterTraceFunnelsRoutes(r, am)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
@@ -402,6 +395,174 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
}, nil
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
type loggingResponseWriter struct {
http.ResponseWriter
statusCode int
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
// we default to that status code.
return &loggingResponseWriter{w, http.StatusOK}
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// Flush implements the http.Flush interface.
func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush()
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// Support websockets
func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h, ok := lrw.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, errors.New("hijack not supported")
}
return h.Hijack()
}
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFromV3 := "/api/v3/query_range"
pathToExtractBodyFromV4 := "/api/v4/query_range"
data := map[string]interface{}{}
var postData *v3.QueryRangeParamsV3
if (r.Method == "POST") && ((path == pathToExtractBodyFromV3) || (path == pathToExtractBodyFromV4)) {
if r.Body != nil {
bodyBytes, err := io.ReadAll(r.Body)
if err != nil {
return nil, false
}
r.Body.Close() // must close
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
json.Unmarshal(bodyBytes, &postData)
} else {
return nil, false
}
} else {
return nil, false
}
referrer := r.Header.Get("Referer")
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the referrer", zap.Error(err))
}
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the alert: ", zap.Error(err))
}
logsExplorerMatched, err := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the logs explorer: ", zap.Error(err))
}
traceExplorerMatched, err := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
}
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
if queryInfoResult.MetricsUsed {
telemetry.GetInstance().AddActiveMetricsUser()
}
if queryInfoResult.LogsUsed {
telemetry.GetInstance().AddActiveLogsUser()
}
if queryInfoResult.TracesUsed {
telemetry.GetInstance().AddActiveTracesUser()
}
data["metricsUsed"] = queryInfoResult.MetricsUsed
data["logsUsed"] = queryInfoResult.LogsUsed
data["tracesUsed"] = queryInfoResult.TracesUsed
data["filterApplied"] = queryInfoResult.FilterApplied
data["groupByApplied"] = queryInfoResult.GroupByApplied
data["aggregateOperator"] = queryInfoResult.AggregateOperator
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
data["queryType"] = queryInfoResult.QueryType
data["panelType"] = queryInfoResult.PanelType
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
if err == nil {
// switch case to set data["screen"] based on the referrer
switch {
case dashboardMatched:
data["screen"] = "panel"
case alertMatched:
data["screen"] = "alert"
case logsExplorerMatched:
data["screen"] = "logs-explorer"
case traceExplorerMatched:
data["screen"] = "traces-explorer"
default:
data["screen"] = "unknown"
return data, true
}
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
}
}
return data, true
}
func getActiveLogs(path string, r *http.Request) {
// if path == "/api/v1/dashboards/{uuid}" {
// telemetry.GetInstance().AddActiveMetricsUser()
// }
if path == "/api/v1/logs" {
hasFilters := len(r.URL.Query().Get("q"))
if hasFilters > 0 {
telemetry.GetInstance().AddActiveLogsUser()
}
}
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := baseauth.AttachJwtToContext(r.Context(), r)
r = r.WithContext(ctx)
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
queryRangeData, metadataExists := extractQueryRangeData(path, r)
getActiveLogs(path, r)
lrw := NewLoggingResponseWriter(w)
next.ServeHTTP(lrw, r)
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if metadataExists {
for key, value := range queryRangeData {
data[key] = value
}
}
if _, ok := telemetry.EnabledPaths()[path]; ok {
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail, true, false)
}
}
})
}
// initListeners initialises listeners of the server
func (s *Server) initListeners() error {
// listen on public port
@@ -534,6 +695,7 @@ func (s *Server) Stop() error {
func makeRulesManager(
promConfigPath,
alertManagerURL string,
ruleRepoURL string,
db *sqlx.DB,
ch baseint.Reader,
@@ -541,34 +703,39 @@ func makeRulesManager(
disableRules bool,
fm baseint.FeatureLookup,
useLogsNewSchema bool,
useTraceNewSchema bool,
alertmanager alertmanager.Alertmanager,
sqlstore sqlstore.SQLStore,
) (*baserules.Manager, error) {
useTraceNewSchema bool) (*baserules.Manager, error) {
// create engine
pqle, err := pqle.FromConfigPath(promConfigPath)
if err != nil {
return nil, fmt.Errorf("failed to create pql engine : %v", err)
}
// notifier opts
notifierOpts := basealm.NotifierOptions{
QueueCapacity: 10000,
Timeout: 1 * time.Second,
AlertManagerURLs: []string{alertManagerURL},
}
// create manager opts
managerOpts := &baserules.ManagerOptions{
PqlEngine: pqle,
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
NotifierOpts: notifierOpts,
PqlEngine: pqle,
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification,
Alertmanager: alertmanager,
SQLStore: sqlstore,
}
// create Manager

View File

@@ -3,20 +3,20 @@ package auth
import (
"context"
"fmt"
"net/http"
"time"
"go.signoz.io/signoz/ee/query-service/app/api"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
"go.uber.org/zap"
)
func GetUserFromRequestContext(ctx context.Context, apiHandler *api.APIHandler) (*types.GettableUser, error) {
patToken, ok := authtypes.UUIDFromContext(ctx)
if ok && patToken != "" {
func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel.UserPayload, error) {
patToken := r.Header.Get("SIGNOZ-API-KEY")
if len(patToken) > 0 {
zap.L().Debug("Received a non-zero length PAT token")
ctx := context.Background()
dao := apiHandler.AppDao()
@@ -40,9 +40,9 @@ func GetUserFromRequestContext(ctx context.Context, apiHandler *api.APIHandler)
}
telemetry.GetInstance().SetPatTokenUser()
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
user.User.GroupID = group.ID
user.User.ID = pat.Id
return &types.GettableUser{
user.User.GroupId = group.Id
user.User.Id = pat.Id
return &basemodel.UserPayload{
User: user.User,
Role: pat.Role,
}, nil
@@ -52,5 +52,5 @@ func GetUserFromRequestContext(ctx context.Context, apiHandler *api.APIHandler)
return nil, err
}
}
return baseauth.GetUserFromReqContext(ctx)
return baseauth.GetUserFromRequest(r)
}

View File

@@ -5,7 +5,7 @@ import (
)
const (
DefaultSiteURL = "https://localhost:8080"
DefaultSiteURL = "https://localhost:3301"
)
var LicenseSignozIo = "https://license.signoz.io/api/v1"

View File

@@ -1,10 +1,10 @@
package dao
import (
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
"go.signoz.io/signoz/pkg/sqlstore"
)
func InitDao(sqlStore sqlstore.SQLStore) (ModelDao, error) {
return sqlite.InitDB(sqlStore)
func InitDao(inputDB *sqlx.DB) (ModelDao, error) {
return sqlite.InitDB(inputDB)
}

View File

@@ -10,8 +10,6 @@ import (
basedao "go.signoz.io/signoz/pkg/query-service/dao"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
)
type ModelDao interface {
@@ -24,7 +22,7 @@ type ModelDao interface {
// auth methods
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
// org domain (auth domains) CRUD ops
@@ -40,7 +38,7 @@ type ModelDao interface {
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, token string) (*types.GettableUser, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
}

View File

@@ -14,12 +14,10 @@ import (
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
"go.uber.org/zap"
)
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*types.User, basemodel.BaseApiError) {
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
// get auth domain from email domain
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
@@ -43,17 +41,15 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
return nil, apiErr
}
user := &types.User{
ID: uuid.NewString(),
Name: "",
Email: email,
Password: hash,
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
},
user := &basemodel.User{
Id: uuid.NewString(),
Name: "",
Email: email,
Password: hash,
CreatedAt: time.Now().Unix(),
ProfilePictureURL: "", // Currently unused
GroupID: group.ID,
OrgID: domain.OrgId,
GroupId: group.Id,
OrgId: domain.OrgId,
}
user, apiErr = m.CreateUser(ctx, user, false)
@@ -68,7 +64,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
// PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError) {
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() {
@@ -76,7 +72,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
return "", model.BadRequestStr("invalid user email received from the auth provider")
}
user := &types.User{}
user := &basemodel.User{}
if userPayload == nil {
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
@@ -89,7 +85,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
user = &userPayload.User
}
tokenStore, err := baseauth.GenerateJWTForUser(user, jwt)
tokenStore, err := baseauth.GenerateJWTForUser(user)
if err != nil {
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
return "", model.InternalErrorStr("failed to generate token for the user")
@@ -98,7 +94,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
user.ID,
user.Id,
tokenStore.RefreshJwt), nil
}

View File

@@ -7,7 +7,6 @@ import (
basedao "go.signoz.io/signoz/pkg/query-service/dao"
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/sqlstore"
)
type modelDao struct {
@@ -30,8 +29,8 @@ func (m *modelDao) checkFeature(key string) error {
}
// InitDB creates and extends base model DB repository
func InitDB(sqlStore sqlstore.SQLStore) (*modelDao, error) {
dao, err := basedsql.InitDB(sqlStore)
func InitDB(inputDB *sqlx.DB) (*modelDao, error) {
dao, err := basedsql.InitDB(inputDB)
if err != nil {
return nil, err
}

View File

@@ -8,7 +8,6 @@ import (
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/types"
"go.uber.org/zap"
)
@@ -43,10 +42,10 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
}
} else {
p.CreatedByUser = model.User{
Id: createdByUser.ID,
Id: createdByUser.Id,
Name: createdByUser.Name,
Email: createdByUser.Email,
CreatedAt: createdByUser.CreatedAt.Unix(),
CreatedAt: createdByUser.CreatedAt,
ProfilePictureURL: createdByUser.ProfilePictureURL,
NotFound: false,
}
@@ -96,10 +95,10 @@ func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApi
}
} else {
pats[i].CreatedByUser = model.User{
Id: createdByUser.ID,
Id: createdByUser.Id,
Name: createdByUser.Name,
Email: createdByUser.Email,
CreatedAt: createdByUser.CreatedAt.Unix(),
CreatedAt: createdByUser.CreatedAt,
ProfilePictureURL: createdByUser.ProfilePictureURL,
NotFound: false,
}
@@ -112,10 +111,10 @@ func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApi
}
} else {
pats[i].UpdatedByUser = model.User{
Id: updatedByUser.ID,
Id: updatedByUser.Id,
Name: updatedByUser.Name,
Email: updatedByUser.Email,
CreatedAt: updatedByUser.CreatedAt.Unix(),
CreatedAt: updatedByUser.CreatedAt,
ProfilePictureURL: updatedByUser.ProfilePictureURL,
NotFound: false,
}
@@ -171,8 +170,8 @@ func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basem
}
// deprecated
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*types.GettableUser, basemodel.BaseApiError) {
users := []types.GettableUser{}
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
users := []basemodel.UserPayload{}
query := `SELECT
u.id,

View File

@@ -47,7 +47,7 @@ func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to create request"))
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to create request: %w", err)))
}
// Setting the custom header
@@ -55,7 +55,7 @@ func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
response, err := client.Do(req)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to make post request"))
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to make post request: %w", err)))
}
body, err := io.ReadAll(response.Body)

View File

@@ -9,25 +9,21 @@ import (
"github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"github.com/uptrace/bun"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/types"
"go.uber.org/zap"
)
// Repo is license repo. stores license keys in a secured DB
type Repo struct {
db *sqlx.DB
bundb *bun.DB
db *sqlx.DB
}
// NewLicenseRepo initiates a new license repo
func NewLicenseRepo(db *sqlx.DB, bundb *bun.DB) Repo {
func NewLicenseRepo(db *sqlx.DB) Repo {
return Repo{
db: db,
bundb: bundb,
db: db,
}
}
@@ -169,25 +165,24 @@ func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
return nil
}
func (r *Repo) CreateFeature(req *types.FeatureStatus) *basemodel.ApiError {
func (r *Repo) CreateFeature(req *basemodel.Feature) *basemodel.ApiError {
_, err := r.bundb.NewInsert().
Model(req).
Exec(context.Background())
_, err := r.db.Exec(
`INSERT INTO feature_status (name, active, usage, usage_limit, route)
VALUES (?, ?, ?, ?, ?);`,
req.Name, req.Active, req.Usage, req.UsageLimit, req.Route)
if err != nil {
return &basemodel.ApiError{Typ: basemodel.ErrorInternal, Err: err}
}
return nil
}
func (r *Repo) GetFeature(featureName string) (types.FeatureStatus, error) {
var feature types.FeatureStatus
func (r *Repo) GetFeature(featureName string) (basemodel.Feature, error) {
err := r.bundb.NewSelect().
Model(&feature).
Where("name = ?", featureName).
Scan(context.Background())
var feature basemodel.Feature
err := r.db.Get(&feature,
`SELECT * FROM feature_status WHERE name = ?;`, featureName)
if err != nil {
return feature, err
}
@@ -210,19 +205,18 @@ func (r *Repo) GetAllFeatures() ([]basemodel.Feature, error) {
return feature, nil
}
func (r *Repo) UpdateFeature(req types.FeatureStatus) error {
func (r *Repo) UpdateFeature(req basemodel.Feature) error {
_, err := r.bundb.NewUpdate().
Model(&req).
Where("name = ?", req.Name).
Exec(context.Background())
_, err := r.db.Exec(
`UPDATE feature_status SET active = ?, usage = ?, usage_limit = ?, route = ? WHERE name = ?;`,
req.Active, req.Usage, req.UsageLimit, req.Route, req.Name)
if err != nil {
return err
}
return nil
}
func (r *Repo) InitFeatures(req []types.FeatureStatus) error {
func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
// get a feature by name, if it doesn't exist, create it. If it does exist, update it.
for _, feature := range req {
currentFeature, err := r.GetFeature(feature.Name)
@@ -235,7 +229,7 @@ func (r *Repo) InitFeatures(req []types.FeatureStatus) error {
} else if err != nil {
return err
}
feature.Usage = int(currentFeature.Usage)
feature.Usage = currentFeature.Usage
if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 {
feature.Active = false
}

View File

@@ -7,13 +7,11 @@ import (
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/uptrace/bun"
"sync"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
"go.signoz.io/signoz/ee/query-service/model"
@@ -45,12 +43,12 @@ type Manager struct {
activeFeatures basemodel.FeatureSet
}
func StartManager(db *sqlx.DB, bundb *bun.DB, features ...basemodel.Feature) (*Manager, error) {
func StartManager(db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
if LM != nil {
return LM, nil
}
repo := NewLicenseRepo(db, bundb)
repo := NewLicenseRepo(db)
m := &Manager{
repo: &repo,
}
@@ -239,10 +237,10 @@ func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
defer func() {
if errResponse != nil {
claims, ok := authtypes.ClaimsFromContext(ctx)
if ok {
userEmail, err := auth.GetEmailFromJwt(ctx)
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
map[string]interface{}{"err": errResponse.Err.Error()}, claims.Email, true, false)
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
}
}
}()
@@ -284,41 +282,15 @@ func (lm *Manager) GetFeatureFlags() (basemodel.FeatureSet, error) {
}
func (lm *Manager) InitFeatures(features basemodel.FeatureSet) error {
featureStatus := make([]types.FeatureStatus, len(features))
for i, f := range features {
featureStatus[i] = types.FeatureStatus{
Name: f.Name,
Active: f.Active,
Usage: int(f.Usage),
UsageLimit: int(f.UsageLimit),
Route: f.Route,
}
}
return lm.repo.InitFeatures(featureStatus)
return lm.repo.InitFeatures(features)
}
func (lm *Manager) UpdateFeatureFlag(feature basemodel.Feature) error {
return lm.repo.UpdateFeature(types.FeatureStatus{
Name: feature.Name,
Active: feature.Active,
Usage: int(feature.Usage),
UsageLimit: int(feature.UsageLimit),
Route: feature.Route,
})
return lm.repo.UpdateFeature(feature)
}
func (lm *Manager) GetFeatureFlag(key string) (basemodel.Feature, error) {
featureStatus, err := lm.repo.GetFeature(key)
if err != nil {
return basemodel.Feature{}, err
}
return basemodel.Feature{
Name: featureStatus.Name,
Active: featureStatus.Active,
Usage: int64(featureStatus.Usage),
UsageLimit: int64(featureStatus.UsageLimit),
Route: featureStatus.Route,
}, nil
return lm.repo.GetFeature(key)
}
// GetRepo return the license repo

View File

@@ -7,6 +7,7 @@ import (
"os"
"os/signal"
"strconv"
"syscall"
"time"
"go.opentelemetry.io/otel/sdk/resource"
@@ -19,7 +20,6 @@ import (
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/version"
"go.signoz.io/signoz/pkg/signoz"
"go.signoz.io/signoz/pkg/types/authtypes"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@@ -149,28 +149,11 @@ func main() {
zap.L().Fatal("Failed to create config", zap.Error(err))
}
signoz, err := signoz.New(
context.Background(),
config,
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
signoz.NewSQLStoreProviderFactories(),
signoz.NewTelemetryStoreProviderFactories(),
)
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
if err != nil {
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
}
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
if len(jwtSecret) == 0 {
zap.L().Warn("No JWT secret key is specified.")
} else {
zap.L().Info("JWT secret key set successfully.")
}
jwt := authtypes.NewJWT(jwtSecret, 30*time.Minute, 30*24*time.Hour)
serverOptions := &app.ServerOptions{
Config: config,
SigNoz: signoz,
@@ -188,7 +171,15 @@ func main() {
GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
Jwt: jwt,
}
// Read the jwt secret key
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
if len(auth.JwtSecret) == 0 {
zap.L().Warn("No JWT secret key is specified.")
} else {
zap.L().Info("JWT secret key set successfully.")
}
server, err := app.NewServer(serverOptions)
@@ -204,19 +195,16 @@ func main() {
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err))
}
signoz.Start(context.Background())
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
if err := signoz.Wait(context.Background()); err != nil {
zap.L().Fatal("Failed to start signoz", zap.Error(err))
}
err = server.Stop()
if err != nil {
zap.L().Fatal("Failed to stop server", zap.Error(err))
}
err = signoz.Stop(context.Background())
if err != nil {
zap.L().Fatal("Failed to stop signoz", zap.Error(err))
for {
select {
case status := <-server.HealthCheckStatus():
zap.L().Info("Received HealthCheck status: ", zap.Int("status", int(status)))
case <-signalsChannel:
zap.L().Fatal("Received OS Interrupt Signal ... ")
server.Stop()
}
}
}

View File

@@ -11,7 +11,7 @@ import (
saml2 "github.com/russellhaering/gosaml2"
"go.signoz.io/signoz/ee/query-service/sso"
"go.signoz.io/signoz/ee/query-service/sso/saml"
"go.signoz.io/signoz/pkg/types"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
@@ -33,7 +33,7 @@ type OrgDomain struct {
SamlConfig *SamlConfig `json:"samlConfig"`
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
Org *types.Organization
Org *basemodel.Organization
}
func (od *OrgDomain) String() string {

View File

@@ -97,8 +97,8 @@ func TestNewLicenseV3(t *testing.T) {
},
},
{
name: "Fallback to basic plan if license status is invalid",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
name: "Fallback to basic plan if license status is inactive",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
@@ -108,14 +108,14 @@ func TestNewLicenseV3(t *testing.T) {
"name": "TEAMS",
},
"category": "FREE",
"status": "INVALID",
"status": "INACTIVE",
"valid_from": float64(1730899309),
"valid_until": float64(-1),
},
PlanName: PlanNameBasic,
ValidFrom: 1730899309,
ValidUntil: -1,
Status: "INVALID",
Status: "INACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},

View File

@@ -157,6 +157,13 @@ var BasicPlan = basemodel.FeatureSet{
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AWSIntegration,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
}
var ProPlan = basemodel.FeatureSet{
@@ -279,6 +286,13 @@ var ProPlan = basemodel.FeatureSet{
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AWSIntegration,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
}
var EnterprisePlan = basemodel.FeatureSet{
@@ -415,4 +429,11 @@ var EnterprisePlan = basemodel.FeatureSet{
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AWSIntegration,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
}

View File

@@ -28,7 +28,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
@@ -49,7 +48,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.Logger,
opts.Reader,
opts.ManagerOpts.PqlEngine,
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
@@ -70,7 +68,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.Reader,
opts.Cache,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
return task, err
@@ -129,7 +126,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.UseTraceNewSchema,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
@@ -148,7 +144,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.ManagerOpts.PqlEngine,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
@@ -165,7 +160,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.Cache,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", rule.Name()), zap.Error(err))

3
frontend/.dockerignore Normal file
View File

@@ -0,0 +1,3 @@
node_modules
.vscode
.git

18
frontend/Dockerfile Normal file
View File

@@ -0,0 +1,18 @@
FROM nginx:1.26-alpine
# Add Maintainer Info
LABEL maintainer="signoz"
# Set working directory
WORKDIR /frontend
# Remove default nginx index page
RUN rm -rf /usr/share/nginx/html/*
# Copy custom nginx config and static files
COPY conf/default.conf /etc/nginx/conf.d/default.conf
COPY build /usr/share/nginx/html
EXPOSE 3301
ENTRYPOINT ["nginx", "-g", "daemon off;"]

View File

@@ -0,0 +1,33 @@
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location = /api {
proxy_pass http://signoz-query-service:8080/api;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -0,0 +1,7 @@
version: "3.9"
services:
web:
build: .
image: signoz/frontend:latest
ports:
- "3301:3301"

View File

@@ -1,7 +1,7 @@
NODE_ENV="development"
BUNDLE_ANALYSER="true"
FRONTEND_API_ENDPOINT="http://localhost:8080/"
FRONTEND_API_ENDPOINT="http://localhost:3301/"
INTERCOM_APP_ID="intercom-app-id"
PLAYWRIGHT_TEST_BASE_URL="http://localhost:8080"
PLAYWRIGHT_TEST_BASE_URL="http://localhost:3301"
CI="1"

View File

@@ -5,23 +5,22 @@
"main": "webpack.config.js",
"scripts": {
"i18n:generate-hash": "node ./i18-generate-hash.js",
"dev": "cross-env NODE_ENV=development webpack serve --progress",
"build": "webpack --config=webpack.config.prod.js --progress",
"dev": "npm run i18n:generate-hash && cross-env NODE_ENV=development webpack serve --progress",
"build": "npm run i18n:generate-hash && webpack --config=webpack.config.prod.js --progress",
"prettify": "prettier --write .",
"fmt": "prettier --check .",
"lint": "eslint ./src",
"lint:fix": "eslint ./src --fix",
"lint": "npm run i18n:generate-hash && eslint ./src",
"lint:fix": "npm run i18n:generate-hash && eslint ./src --fix",
"jest": "jest",
"jest:coverage": "jest --coverage",
"jest:watch": "jest --watch",
"postinstall": "yarn i18n:generate-hash && (is-ci || yarn husky:configure)",
"playwright": "NODE_ENV=testing playwright test --config=./playwright.config.ts",
"postinstall": "is-ci || yarn husky:configure",
"playwright": "npm run i18n:generate-hash && NODE_ENV=testing playwright test --config=./playwright.config.ts",
"playwright:local:debug": "PWDEBUG=console yarn playwright --headed --browser=chromium",
"playwright:codegen:local": "playwright codegen http://localhost:3301",
"playwright:codegen:local:auth": "yarn playwright:codegen:local --load-storage=tests/auth.json",
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
"commitlint": "commitlint --edit $1",
"test": "jest",
"test": "jest --coverage",
"test:changedsince": "jest --changedSince=main --coverage --silent"
},
"engines": {
@@ -48,7 +47,6 @@
"@tanstack/react-virtual": "3.11.2",
"@uiw/react-md-editor": "3.23.5",
"@visx/group": "3.3.0",
"@visx/hierarchy": "3.12.0",
"@visx/shape": "3.5.0",
"@visx/tooltip": "3.3.0",
"@xstate/react": "^3.0.0",
@@ -71,9 +69,8 @@
"cross-env": "^7.0.3",
"css-loader": "5.0.0",
"css-minimizer-webpack-plugin": "5.0.1",
"d3-hierarchy": "3.1.2",
"dayjs": "^1.10.7",
"dompurify": "3.2.4",
"dompurify": "3.1.3",
"dotenv": "8.2.0",
"event-source-polyfill": "1.0.31",
"eventemitter3": "5.0.1",

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="800" height="800" viewBox="19.933 68.509 228.155 228.155"><path fill="#a4c639" d="M101.885 207.092c7.865 0 14.241 6.376 14.241 14.241v61.09c0 7.865-6.376 14.24-14.241 14.24s-14.24-6.375-14.24-14.24v-61.09c0-7.864 6.376-14.24 14.24-14.24z"/><path fill="#a4c639" d="M69.374 133.645c-.047.54-.088 1.086-.088 1.638v92.557c0 9.954 7.879 17.973 17.66 17.973h94.124c9.782 0 17.661-8.02 17.661-17.973v-92.557q.001-.83-.066-1.638z"/><path fill="#a4c639" d="M166.133 207.092c7.865 0 14.241 6.376 14.241 14.241v61.09c0 7.865-6.376 14.24-14.241 14.24s-14.24-6.375-14.24-14.24v-61.09c0-7.864 6.376-14.24 14.24-14.24zm-119.728-65.21c7.864 0 14.24 6.376 14.24 14.241v61.09c0 7.865-6.376 14.241-14.24 14.241-7.865 0-14.241-6.376-14.241-14.24v-61.09c-.001-7.865 6.375-14.242 14.241-14.242m175.209 0c7.864 0 14.24 6.376 14.24 14.241v61.09c0 7.865-6.376 14.241-14.24 14.241-7.865 0-14.241-6.376-14.241-14.24v-61.09c0-7.865 6.376-14.242 14.241-14.242M69.79 127.565c.396-28.43 25.21-51.74 57.062-54.812h14.312c31.854 3.073 56.666 26.384 57.062 54.812z"/><path fill="none" stroke="#a4c639" stroke-linecap="round" stroke-linejoin="round" stroke-width="3" d="m74.743 70.009 15.022 26.02m103.511-26.02-15.023 26.02"/><path fill="#fff" d="M114.878 102.087c.012 3.974-3.277 7.205-7.347 7.216-4.068.01-7.376-3.202-7.388-7.176v-.04c-.011-3.975 3.278-7.205 7.347-7.216 4.068-.011 7.376 3.2 7.388 7.176zm54.996 0c.012 3.974-3.277 7.205-7.347 7.216-4.068.01-7.376-3.202-7.388-7.176v-.04c-.011-3.975 3.278-7.205 7.347-7.216 4.068-.011 7.376 3.2 7.388 7.176z"/></svg>

Before

Width:  |  Height:  |  Size: 1.5 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" viewBox="0 0 60 60"><linearGradient id="a" x1="15.959" x2="44.307" y1="-13.014" y2="15.333" gradientTransform="matrix(1 0 0 -1 0 61)" gradientUnits="userSpaceOnUse"><stop offset=".097" style="stop-color:#0095d5"/><stop offset=".301" style="stop-color:#238ad9"/><stop offset=".621" style="stop-color:#557bde"/><stop offset=".864" style="stop-color:#7472e2"/><stop offset="1" style="stop-color:#806ee3"/></linearGradient><path d="m0 60 30.1-30.1L60 60z" style="fill:url(#a)"/><linearGradient id="b" x1="4.209" x2="20.673" y1="48.941" y2="65.405" gradientTransform="matrix(1 0 0 -1 0 61)" gradientUnits="userSpaceOnUse"><stop offset=".118" style="stop-color:#0095d5"/><stop offset=".418" style="stop-color:#3c83dc"/><stop offset=".696" style="stop-color:#6d74e1"/><stop offset=".833" style="stop-color:#806ee3"/></linearGradient><path d="M0 0h30.1L0 32.5z" style="fill:url(#b)"/><linearGradient id="c" x1="-10.102" x2="45.731" y1="5.836" y2="61.669" gradientTransform="matrix(1 0 0 -1 0 61)" gradientUnits="userSpaceOnUse"><stop offset=".107" style="stop-color:#c757bc"/><stop offset=".214" style="stop-color:#d0609a"/><stop offset=".425" style="stop-color:#e1725c"/><stop offset=".605" style="stop-color:#ee7e2f"/><stop offset=".743" style="stop-color:#f58613"/><stop offset=".823" style="stop-color:#f88909"/></linearGradient><path d="M30.1 0 0 31.7V60l30.1-30.1L60 0z" style="fill:url(#c)"/></svg>

Before

Width:  |  Height:  |  Size: 1.4 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="2370" height="2500" preserveAspectRatio="xMinYMin meet" viewBox="0 0 256 270"><path fill="#B3B3B3" d="M127.606.341.849 44.95 20.88 211.022l106.86 58.732 107.412-59.528L255.175 44.16z"/><path fill="#A6120D" d="M242.532 53.758 127.31 14.466v241.256l96.561-53.441 18.66-148.523z"/><path fill="#DD1B16" d="m15.073 54.466 17.165 148.525 95.07 52.731V14.462z"/><path fill="#F2F2F2" d="M159.027 142.898 127.31 157.73H93.881l-15.714 39.305-29.228.54L127.31 23.227l31.717 119.672zm-3.066-7.467-28.44-56.303-23.329 55.334h23.117z"/><path fill="#B3B3B3" d="m127.309 23.226.21 55.902 26.47 55.377h-26.62l-.06 23.189 36.81.035 17.204 39.852 27.967.518z"/></svg>

Before

Width:  |  Height:  |  Size: 695 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" viewBox="0 0 122.88 98.18"><path d="M3.42 0h116.05a3.42 3.42 0 0 1 3.41 3.41v91.36a3.42 3.42 0 0 1-3.41 3.41H3.42C1.54 98.18 0 96.65 0 94.77V3.41C0 1.53 1.54 0 3.42 0m22.47 8.19c2.05 0 3.72 1.67 3.72 3.72s-1.67 3.72-3.72 3.72-3.72-1.67-3.72-3.72c0-2.06 1.66-3.72 3.72-3.72m77.18-.5 2.52 2.77 2.52-2.77 1.97 1.79-2.69 2.96 2.69 2.96-1.97 1.79-2.52-2.77-2.52 2.77-1.97-1.79 2.69-2.96-2.69-2.96zm-88.55.5c2.05 0 3.72 1.67 3.72 3.72s-1.67 3.72-3.72 3.72-3.72-1.67-3.72-3.72a3.707 3.707 0 0 1 3.72-3.72m22.74 0c2.05 0 3.72 1.67 3.72 3.72s-1.67 3.72-3.72 3.72-3.72-1.67-3.72-3.72a3.72 3.72 0 0 1 3.72-3.72M14.05 22.75h93.33c1.77 0 3.22 1.49 3.22 3.22v59.2c0 1.73-1.49 3.22-3.22 3.22H14.05c-1.73 0-3.22-1.45-3.22-3.22v-59.2c.01-1.77 1.46-3.22 3.22-3.22" style="fill-rule:evenodd;clip-rule:evenodd;fill:#1668dc"/></svg>

Before

Width:  |  Height:  |  Size: 872 B

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 6.1 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -1 +1,9 @@
<svg xmlns="http://www.w3.org/2000/svg" width="800" height="800" fill="none" viewBox="0 0 16 16"><path fill="#252F3E" d="M4.51 7.687q0 .297.058.475.061.175.17.384a.23.23 0 0 1 .037.123q.001.08-.1.16l-.336.224a.26.26 0 0 1-.138.048q-.08-.001-.16-.074a1.7 1.7 0 0 1-.192-.251 4 4 0 0 1-.165-.315q-.622.737-1.564.737-.672 0-1.064-.385-.393-.384-.394-1.025-.001-.682.484-1.1c.325-.278.756-.416 1.304-.416q.27.002.564.042c.197.027.4.07.612.118v-.39q-.001-.608-.25-.854-.254-.248-.868-.246-.279-.001-.574.07a4 4 0 0 0-.575.181 2 2 0 0 1-.186.07.3.3 0 0 1-.085.016q-.112.001-.112-.166v-.262c0-.085.01-.15.037-.186a.4.4 0 0 1 .15-.113q.278-.144.67-.24.39-.103.83-.101.948 0 1.394.432.44.432.442 1.314v1.73h.01zm-2.161.811q.261 0 .548-.096c.191-.064.362-.182.505-.342a.85.85 0 0 0 .181-.341c.032-.129.054-.283.054-.465V7.03a4 4 0 0 0-.49-.09 4 4 0 0 0-.5-.033c-.357 0-.618.07-.793.214q-.262.215-.26.614-.002.374.196.566.192.198.559.197m4.273.577c-.096 0-.16-.016-.202-.054-.043-.032-.08-.106-.112-.208l-1.25-4.127a1 1 0 0 1-.049-.214c0-.085.043-.133.128-.133h.522q.15-.001.207.053c.043.032.075.107.107.208l.894 3.535.83-3.535q.038-.16.1-.208a.37.37 0 0 1 .214-.053h.425c.102 0 .17.016.213.053.043.032.08.107.101.208l.841 3.578.92-3.578a.46.46 0 0 1 .107-.208.35.35 0 0 1 .208-.053h.495c.085 0 .133.043.133.133q-.002.04-.01.086a1 1 0 0 1-.038.133l-1.283 4.127q-.048.16-.111.209a.34.34 0 0 1-.203.053h-.457q-.15.001-.213-.053c-.043-.038-.08-.107-.101-.214L8.213 5.37l-.82 3.439q-.038.159-.1.213c-.043.038-.118.054-.213.054h-.458zm6.838.144a3.5 3.5 0 0 1-.82-.096c-.266-.064-.473-.134-.612-.214-.085-.048-.143-.101-.165-.15a.4.4 0 0 1-.031-.149v-.272q.001-.167.122-.166a.3.3 0 0 1 .096.016c.032.011.08.032.133.054.18.08.378.144.585.187.213.042.42.064.633.064q.505 0 .777-.176a.57.57 0 0 0 .277-.508.52.52 0 0 0-.144-.373q-.144-.152-.537-.278l-.772-.24c-.388-.123-.676-.305-.851-.545a1.27 1.27 0 0 1-.266-.774q0-.336.143-.593c.096-.17.224-.32.384-.438a1.7 1.7 0 0 1 .553-.277c.213-.064.436-.091.67-.091.118 0 .24.005.357.021a3.4 3.4 0 0 1 .649.145q.143.048.224.096a.5.5 0 0 1 .16.133.3.3 0 0 1 .047.176v.251q-.001.17-.122.171a.6.6 0 0 1-.202-.064 2.4 2.4 0 0 0-1.022-.208c-.303 0-.543.048-.708.15q-.249.148-.25.475 0 .225.16.379c.106.101.303.202.585.293l.756.24q.576.184.825.513.246.33.244.748 0 .345-.138.619a1.4 1.4 0 0 1-.388.47q-.247.197-.591.299a2.5 2.5 0 0 1-.761.112"/><g fill="#F90" fill-rule="evenodd" clip-rule="evenodd"><path d="M14.465 11.813c-1.75 1.297-4.294 1.986-6.481 1.986-3.065 0-5.827-1.137-7.913-3.027-.165-.15-.016-.353.18-.235 2.257 1.313 5.04 2.109 7.92 2.109 1.941 0 4.075-.406 6.039-1.239.293-.133.543.192.255.406"/><path d="M15.194 10.98c-.223-.287-1.479-.138-2.048-.069-.17.022-.197-.128-.043-.24 1-.705 2.645-.502 2.836-.267.192.24-.053 1.89-.99 2.68-.143.123-.281.06-.218-.1.213-.53.687-1.72.463-2.003z"/></g></svg>
<?xml version="1.0" encoding="utf-8"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg" fill="none">
<path fill="#252F3E" d="M4.51 7.687c0 .197.02.357.058.475.042.117.096.245.17.384a.233.233 0 01.037.123c0 .053-.032.107-.1.16l-.336.224a.255.255 0 01-.138.048c-.054 0-.107-.026-.16-.074a1.652 1.652 0 01-.192-.251 4.137 4.137 0 01-.165-.315c-.415.491-.936.737-1.564.737-.447 0-.804-.129-1.064-.385-.261-.256-.394-.598-.394-1.025 0-.454.16-.822.484-1.1.325-.278.756-.416 1.304-.416.18 0 .367.016.564.042.197.027.4.07.612.118v-.39c0-.406-.085-.689-.25-.854-.17-.166-.458-.246-.868-.246-.186 0-.377.022-.574.07a4.23 4.23 0 00-.575.181 1.525 1.525 0 01-.186.07.326.326 0 01-.085.016c-.075 0-.112-.054-.112-.166v-.262c0-.085.01-.15.037-.186a.399.399 0 01.15-.113c.185-.096.409-.176.67-.24.26-.07.537-.101.83-.101.633 0 1.096.144 1.394.432.293.288.442.726.442 1.314v1.73h.01zm-2.161.811c.175 0 .356-.032.548-.096.191-.064.362-.182.505-.342a.848.848 0 00.181-.341c.032-.129.054-.283.054-.465V7.03a4.43 4.43 0 00-.49-.09 3.996 3.996 0 00-.5-.033c-.357 0-.618.07-.793.214-.176.144-.26.347-.26.614 0 .25.063.437.196.566.128.133.314.197.559.197zm4.273.577c-.096 0-.16-.016-.202-.054-.043-.032-.08-.106-.112-.208l-1.25-4.127a.938.938 0 01-.049-.214c0-.085.043-.133.128-.133h.522c.1 0 .17.016.207.053.043.032.075.107.107.208l.894 3.535.83-3.535c.026-.106.058-.176.1-.208a.365.365 0 01.214-.053h.425c.102 0 .17.016.213.053.043.032.08.107.101.208l.841 3.578.92-3.578a.458.458 0 01.107-.208.346.346 0 01.208-.053h.495c.085 0 .133.043.133.133 0 .027-.006.054-.01.086a.76.76 0 01-.038.133l-1.283 4.127c-.032.107-.069.177-.111.209a.34.34 0 01-.203.053h-.457c-.101 0-.17-.016-.213-.053-.043-.038-.08-.107-.101-.214L8.213 5.37l-.82 3.439c-.026.107-.058.176-.1.213-.043.038-.118.054-.213.054h-.458zm6.838.144a3.51 3.51 0 01-.82-.096c-.266-.064-.473-.134-.612-.214-.085-.048-.143-.101-.165-.15a.378.378 0 01-.031-.149v-.272c0-.112.042-.166.122-.166a.3.3 0 01.096.016c.032.011.08.032.133.054.18.08.378.144.585.187.213.042.42.064.633.064.336 0 .596-.059.777-.176a.575.575 0 00.277-.508.52.52 0 00-.144-.373c-.095-.102-.276-.193-.537-.278l-.772-.24c-.388-.123-.676-.305-.851-.545a1.275 1.275 0 01-.266-.774c0-.224.048-.422.143-.593.096-.17.224-.32.384-.438.16-.122.34-.213.553-.277.213-.064.436-.091.67-.091.118 0 .24.005.357.021.122.016.234.038.346.06.106.026.208.052.303.085.096.032.17.064.224.096a.46.46 0 01.16.133.289.289 0 01.047.176v.251c0 .112-.042.171-.122.171a.552.552 0 01-.202-.064 2.427 2.427 0 00-1.022-.208c-.303 0-.543.048-.708.15-.165.1-.25.256-.25.475 0 .149.053.277.16.379.106.101.303.202.585.293l.756.24c.383.123.66.294.825.513.165.219.244.47.244.748 0 .23-.047.437-.138.619a1.436 1.436 0 01-.388.47c-.165.133-.362.23-.591.299-.24.075-.49.112-.761.112z"/>
<g fill="#F90" fill-rule="evenodd" clip-rule="evenodd">

Before

Width:  |  Height:  |  Size: 2.8 KiB

After

Width:  |  Height:  |  Size: 3.3 KiB

View File

@@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="2.94" x2="8.67" y1="3.74" y2="3.74" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="b" x1="9.13" x2="14.85" y1="3.79" y2="3.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="c" x1=".01" x2="5.73" y1="9.12" y2="9.12" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="d" x1="6.18" x2="11.9" y1="9.08" y2="9.08" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="e" x1="12.35" x2="18.08" y1="9.13" y2="9.13" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="f" x1="2.87" x2="8.6" y1="14.56" y2="14.56" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="g" x1="9.05" x2="14.78" y1="14.6" y2="14.6" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient></defs><path fill="url(#a)" d="m5.8 1.22-2.86.53v3.9l2.86.61 2.87-1.15V2.2z"/><path fill="none" d="m5.91 6.2 2.62-1.06A.2.2 0 0 0 8.65 5V2.36a.21.21 0 0 0-.13-.18l-2.65-.9h-.12l-2.6.48a.2.2 0 0 0-.15.18v3.53a.19.19 0 0 0 .15.19l2.63.55a.3.3 0 0 0 .13-.01"/><path fill="#341a6e" d="M2.94 1.75v3.9l2.89.61v-5zm1.22 3.6-.81-.16v-3l.81-.13zm1.26.23-.93-.15V2l.93-.16z"/><path fill="url(#b)" d="m11.99 1.27-2.86.53v3.9l2.86.61 2.86-1.16v-2.9z"/><path fill="#341a6e" d="M9.13 1.8v3.9l2.87.61v-5zm1.21 3.6-.81-.16v-3l.81-.13zm1.26.23-.93-.15V2.05l.93-.17z"/><path fill="url(#c)" d="m2.87 6.6-2.86.53v3.9l2.86.61 2.87-1.15V7.58z"/><path fill="#341a6e" d="M0 7.13V11l2.89.61v-5zm1.21 3.61-.81-.17v-3l.81-.14zm1.27.26-.93-.15V7.38l.93-.16z"/><path fill="url(#d)" d="m9.04 6.56-2.86.53v3.9l2.86.62 2.86-1.16V7.54z"/><path fill="#341a6e" d="M6.18 7.09V11l2.88.61v-5zm1.21 3.61-.81-.17v-3l.81-.14zm1.26.22-.93-.15V7.34l.93-.16z"/><path fill="url(#e)" d="m15.21 6.61-2.86.53v3.9l2.86.61 2.87-1.15V7.59z"/><path fill="#341a6e" d="M12.35 7.14V11l2.89.61v-5zm1.22 3.61-.81-.17v-3l.81-.14zm1.26.22-.93-.15V7.39l.93-.16z"/><path fill="url(#f)" d="m5.73 12.04-2.86.52v3.9l2.86.62 2.87-1.16v-2.9z"/><path fill="none" d="m5.84 17 2.61-1a.18.18 0 0 0 .12-.18v-2.6a.2.2 0 0 0-.13-.22l-2.64-.9a.17.17 0 0 0-.12 0l-2.6.47a.19.19 0 0 0-.16.19v3.54a.19.19 0 0 0 .15.19L5.7 17a.23.23 0 0 0 .14 0"/><path fill="#341a6e" d="M2.87 12.56v3.9l2.89.62V12zm1.22 3.61L3.28 16v-3l.81-.14zm1.26.23-.93-.15v-3.44l.93-.16z"/><path fill="url(#g)" d="m11.91 12.08-2.86.53v3.9l2.86.61 2.87-1.15v-2.91z"/><path fill="#341a6e" d="M9.05 12.61v3.9l2.89.61v-5zm1.22 3.61-.81-.17v-3l.81-.14zm1.26.22-.93-.15v-3.43l.93-.16z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="2.94" y1="3.74" x2="8.67" y2="3.74" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="b" x1="9.13" y1="3.79" x2="14.85" y2="3.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="c" x1=".01" y1="9.12" x2="5.73" y2="9.12" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="d" x1="6.18" y1="9.08" x2="11.9" y2="9.08" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="e" x1="12.35" y1="9.13" x2="18.08" y2="9.13" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="f" x1="2.87" y1="14.56" x2="8.6" y2="14.56" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="g" x1="9.05" y1="14.6" x2="14.78" y2="14.6" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient></defs><path fill="url(#a)" d="M5.8 1.22l-2.86.53v3.9l2.86.61 2.87-1.15V2.2L5.8 1.22z"/><path d="M5.91 6.2l2.62-1.06A.2.2 0 008.65 5V2.36a.21.21 0 00-.13-.18l-2.65-.9h-.12l-2.6.48a.2.2 0 00-.15.18v3.53a.19.19 0 00.15.19l2.63.55a.32.32 0 00.13-.01z" fill="none"/><path d="M2.94 1.75v3.9l2.89.61v-5zm1.22 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2l.93-.16z" fill="#341a6e"/><path fill="url(#b)" d="M11.99 1.27l-2.86.53v3.9l2.86.61 2.86-1.16v-2.9l-2.86-.98z"/><path d="M9.13 1.8v3.9l2.87.61v-5zm1.21 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2.05l.93-.17z" fill="#341a6e"/><path fill="url(#c)" d="M2.87 6.6l-2.86.53v3.9l2.86.61 2.87-1.15V7.58L2.87 6.6z"/><path d="M0 7.13V11l2.89.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.27.26l-.93-.15V7.38l.93-.16z" fill="#341a6e"/><path fill="url(#d)" d="M9.04 6.56l-2.86.53v3.9l2.86.62 2.86-1.16V7.54l-2.86-.98z"/><path d="M6.18 7.09V11l2.88.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.34l.93-.16z" fill="#341a6e"/><path fill="url(#e)" d="M15.21 6.61l-2.86.53v3.9l2.86.61 2.87-1.15V7.59l-2.87-.98z"/><path d="M12.35 7.14V11l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.39l.93-.16z" fill="#341a6e"/><path fill="url(#f)" d="M5.73 12.04l-2.86.52v3.9l2.86.62 2.87-1.16v-2.9l-2.87-.98z"/><path d="M5.84 17l2.61-1a.18.18 0 00.12-.18v-2.6a.2.2 0 00-.13-.22l-2.64-.9a.17.17 0 00-.12 0l-2.6.47a.19.19 0 00-.16.19v3.54a.19.19 0 00.15.19L5.7 17a.23.23 0 00.14 0z" fill="none"/><path d="M2.87 12.56v3.9l2.89.62V12zm1.22 3.61L3.28 16v-3l.81-.14zm1.26.23l-.93-.15v-3.44l.93-.16z" fill="#341a6e"/><path fill="url(#g)" d="M11.91 12.08l-2.86.53v3.9l2.86.61 2.87-1.15v-2.91l-2.87-.98z"/><path d="M9.05 12.61v3.9l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15v-3.43l.93-.16z" fill="#341a6e"/></svg>

Before

Width:  |  Height:  |  Size: 3.0 KiB

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="b" x1="4.4" x2="4.37" y1="11.48" y2="7.53" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="c" x1="10.13" x2="10.13" y1="15.45" y2="11.9" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="d" x1="14.18" x2="14.18" y1="11.15" y2="7.38" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><radialGradient id="a" cx="13428.81" cy="3518.86" r="56.67" gradientTransform="matrix(.15 0 0 .15 -2005.33 -518.83)" gradientUnits="userSpaceOnUse"><stop offset=".18" stop-color="#5ea0ef"/><stop offset="1" stop-color="#0078d4"/></radialGradient></defs><path fill="url(#a)" d="M14.21 15.72A8.5 8.5 0 0 1 3.79 2.28l.09-.06a8.5 8.5 0 0 1 10.33 13.5"/><path fill="#fff" d="M6.69 7.23a13 13 0 0 1 8.91-3.58 8.5 8.5 0 0 0-1.49-1.44 14.3 14.3 0 0 0-4.69 1.1 12.5 12.5 0 0 0-4.08 2.82 2.76 2.76 0 0 1 1.35 1.1m-4.21 3.42a18 18 0 0 0-.83 2.62 8 8 0 0 0 .62.92c.18.23.35.44.55.65a18 18 0 0 1 1.08-3.47 2.76 2.76 0 0 1-1.42-.72" opacity=".6"/><path fill="#f2f2f2" d="M3.46 6.11a12 12 0 0 1-.69-2.94 8 8 0 0 0-1.1 1.45A12.7 12.7 0 0 0 2.24 7a2.7 2.7 0 0 1 1.22-.89" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><path fill="#f2f2f2" d="M8.36 13.67a1.77 1.77 0 0 1 .54-1.27 11.9 11.9 0 0 1-2.53-1.86 2.74 2.74 0 0 1-1.49.83 13 13 0 0 0 1.45 1.28 12 12 0 0 0 2.05 1.25 2 2 0 0 1-.02-.23m6.3.21a12 12 0 0 1-2.76-.32.4.4 0 0 1 0 .11 1.75 1.75 0 0 1-.51 1.24 13.7 13.7 0 0 0 3.42.24A8.2 8.2 0 0 0 16 13.81a12 12 0 0 1-1.34.07" opacity=".55"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/><path fill="#f2f2f2" d="M12.32 8.93a1.83 1.83 0 0 1 .61-1 25.5 25.5 0 0 1-4.46-4.14 17 17 0 0 1-2-2.92 7.6 7.6 0 0 0-1.09.42 18 18 0 0 0 2.15 3.18 26.4 26.4 0 0 0 4.79 4.46" opacity=".7"/><circle cx="14.18" cy="9.27" r="1.89" fill="url(#d)"/><path fill="#f2f2f2" d="m17.35 10.54-.35-.17-.3-.16h-.06l-.26-.21h-.07L16 9.8a1.76 1.76 0 0 1-.64.92c.12.08.25.15.38.22l.08.05.35.19.86.45a9 9 0 0 0 .29-1.11z" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="b" x1="4.4" y1="11.48" x2="4.37" y2="7.53" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="c" x1="10.13" y1="15.45" x2="10.13" y2="11.9" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="d" x1="14.18" y1="11.15" x2="14.18" y2="7.38" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><radialGradient id="a" cx="13428.81" cy="3518.86" r="56.67" gradientTransform="matrix(.15 0 0 .15 -2005.33 -518.83)" gradientUnits="userSpaceOnUse"><stop offset=".18" stop-color="#5ea0ef"/><stop offset="1" stop-color="#0078d4"/></radialGradient></defs><path d="M14.21 15.72A8.5 8.5 0 013.79 2.28l.09-.06a8.5 8.5 0 0110.33 13.5" fill="url(#a)"/><path d="M6.69 7.23a13 13 0 018.91-3.58 8.47 8.47 0 00-1.49-1.44 14.34 14.34 0 00-4.69 1.1 12.54 12.54 0 00-4.08 2.82 2.76 2.76 0 011.35 1.1zM2.48 10.65a17.86 17.86 0 00-.83 2.62 7.82 7.82 0 00.62.92c.18.23.35.44.55.65a17.94 17.94 0 011.08-3.47 2.76 2.76 0 01-1.42-.72z" fill="#fff" opacity=".6"/><path d="M3.46 6.11a12 12 0 01-.69-2.94 8.15 8.15 0 00-1.1 1.45A12.69 12.69 0 002.24 7a2.69 2.69 0 011.22-.89z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><path d="M8.36 13.67a1.77 1.77 0 01.54-1.27 11.88 11.88 0 01-2.53-1.86 2.74 2.74 0 01-1.49.83 13.1 13.1 0 001.45 1.28 12.12 12.12 0 002.05 1.25 1.79 1.79 0 01-.02-.23zM14.66 13.88a12 12 0 01-2.76-.32.41.41 0 010 .11 1.75 1.75 0 01-.51 1.24 13.69 13.69 0 003.42.24A8.21 8.21 0 0016 13.81a11.5 11.5 0 01-1.34.07z" fill="#f2f2f2" opacity=".55"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/><path d="M12.32 8.93a1.83 1.83 0 01.61-1 25.5 25.5 0 01-4.46-4.14 16.91 16.91 0 01-2-2.92 7.64 7.64 0 00-1.09.42 18.14 18.14 0 002.15 3.18 26.44 26.44 0 004.79 4.46z" fill="#f2f2f2" opacity=".7"/><circle cx="14.18" cy="9.27" r="1.89" fill="url(#d)"/><path d="M17.35 10.54l-.35-.17-.3-.16h-.06l-.26-.21h-.07L16 9.8a1.76 1.76 0 01-.64.92c.12.08.25.15.38.22l.08.05.35.19.86.45a8.63 8.63 0 00.29-1.11z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/></svg>

Before

Width:  |  Height:  |  Size: 2.3 KiB

After

Width:  |  Height:  |  Size: 2.3 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 6.0 KiB

After

Width:  |  Height:  |  Size: 6.7 KiB

View File

@@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18"><defs><linearGradient id="a" x1="8.798" x2="14.683" y1="8.703" y2="8.703" gradientUnits="userSpaceOnUse"><stop offset=".001" stop-color="#773adc"/><stop offset="1" stop-color="#552f99"/></linearGradient><linearGradient id="b" x1="5.764" x2="5.764" y1="3.777" y2="13.78" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a67af4"/><stop offset=".999" stop-color="#773adc"/></linearGradient></defs><path fill="#32bedd" d="M16.932 11.578a8.45 8.45 0 0 1-7.95 5.59 8.2 8.2 0 0 1-2.33-.33 2.1 2.1 0 0 0 .18-.83c.01 0 .03.01.04.01a7.4 7.4 0 0 0 2.11.3 7.65 7.65 0 0 0 6.85-4.28l.01-.01ZM3.582 14.068a2 2 0 0 0-.64.56 8.6 8.6 0 0 1-1.67-2.44l1.04.23v.26a.6.6 0 0 0 .47.59l.14.03a6 6 0 0 0 .62.73ZM12.352.958a2.3 2.3 0 0 0-.27.81c-.02-.01-.05-.02-.07-.03a7.5 7.5 0 0 0-3.03-.63 7.64 7.64 0 0 0-5.9 2.8l-.29.06a.6.6 0 0 0-.48.58v.46l-1.02.19a8.45 8.45 0 0 1 7.69-4.93 8.6 8.6 0 0 1 3.37.69M16.872 5.7l-1.09-.38a6.6 6.6 0 0 0-.72-1.16c-.02-.03-.04-.05-.05-.07a2.1 2.1 0 0 0 .72-.45 7.8 7.8 0 0 1 1.14 2.06"/><path fill="#fff" d="m10.072 11.908 2.54.56-3.94 1.632c-.02 0-.03.01-.05.01a.154.154 0 0 1-.15-.15V3.448a.154.154 0 0 1 .15-.15.1.1 0 0 1 .05.01l4.46 1.56-3.05.57a.565.565 0 0 0-.44.54v5.4a.54.54 0 0 0 .43.53"/><path fill="#a67af4" d="m1.1 5.668 1.21-.23v6.55l-1.23-.27-.99-.22a.11.11 0 0 1-.09-.12v-5.4a.12.12 0 0 1 .09-.12Z"/><path fill="url(#a)" d="m10.072 11.908 2.54.56-3.94 1.632c-.02 0-.03.01-.05.01a.154.154 0 0 1-.15-.15V3.448a.154.154 0 0 1 .15-.15.1.1 0 0 1 .05.01l4.46 1.56-3.05.57a.565.565 0 0 0-.44.54v5.4a.54.54 0 0 0 .43.53"/><path fill="url(#b)" d="M8.586 3.3 2.878 4.378a.18.18 0 0 0-.14.175v8.127a.18.18 0 0 0 .137.174L8.581 14.1a.176.176 0 0 0 .21-.174V3.478a.175.175 0 0 0-.172-.178Z"/><path fill="#b796f9" d="M5.948 4.921v7.562l1.986.331v-8.25zM3.509 5.329v6.625l1.729.363V5.031z" opacity=".5"/><path fill="#32bedd" d="M16 2.048a1.755 1.755 0 1 1-1.76-1.76A1.756 1.756 0 0 1 16 2.048"/><circle cx="4.65" cy="15.973" r="1.759" fill="#32bedd"/><path fill="#773adc" d="M18 6.689v3.844a.22.22 0 0 1-.133.2l-.766.316-3.07 1.268h-.011a.1.1 0 0 1-.038 0 .1.1 0 0 1-.1-.1V5.234a.1.1 0 0 1 .054-.088h.019a.03.03 0 0 1 .019 0 .06.06 0 0 1 .034.008h.023L17.05 6.2l.8.282a.21.21 0 0 1 .15.207"/><path fill="#a67af4" d="m13.959 5.14-3.8.715a.12.12 0 0 0-.093.117v5.409a.12.12 0 0 0 .091.116l3.8.831a.115.115 0 0 0 .137-.09V5.256a.117.117 0 0 0-.115-.118z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="b27f1ad0-7d11-4247-9da3-91bce6211f32" x1="8.798" y1="8.703" x2="14.683" y2="8.703" gradientUnits="userSpaceOnUse"><stop offset="0.001" stop-color="#773adc" /><stop offset="1" stop-color="#552f99" /></linearGradient><linearGradient id="b2f92112-4ca9-4b17-a019-c9f26c1a4a8f" x1="5.764" y1="3.777" x2="5.764" y2="13.78" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a67af4" /><stop offset="0.999" stop-color="#773adc" /></linearGradient></defs><g id="b8a0486a-5501-4d92-b540-a766c4b3b548"><g><g><g><path d="M16.932,11.578a8.448,8.448,0,0,1-7.95,5.59,8.15,8.15,0,0,1-2.33-.33,2.133,2.133,0,0,0,.18-.83c.01,0,.03.01.04.01a7.422,7.422,0,0,0,2.11.3,7.646,7.646,0,0,0,6.85-4.28l.01-.01Z" fill="#32bedd" /><path d="M3.582,14.068a2.025,2.025,0,0,0-.64.56,8.6,8.6,0,0,1-1.67-2.44l1.04.23v.26a.6.6,0,0,0,.47.59l.14.03a6.136,6.136,0,0,0,.62.73Z" fill="#32bedd" /><path d="M12.352.958a2.28,2.28,0,0,0-.27.81c-.02-.01-.05-.02-.07-.03a7.479,7.479,0,0,0-3.03-.63,7.643,7.643,0,0,0-5.9,2.8l-.29.06a.6.6,0,0,0-.48.58v.46l-1.02.19A8.454,8.454,0,0,1,8.982.268,8.6,8.6,0,0,1,12.352.958Z" fill="#32bedd" /><path d="M16.872,5.7l-1.09-.38a6.6,6.6,0,0,0-.72-1.16c-.02-.03-.04-.05-.05-.07a2.083,2.083,0,0,0,.72-.45A7.81,7.81,0,0,1,16.872,5.7Z" fill="#32bedd" /><path d="M10.072,11.908l2.54.56L8.672,14.1c-.02,0-.03.01-.05.01a.154.154,0,0,1-.15-.15V3.448a.154.154,0,0,1,.15-.15.09.09,0,0,1,.05.01l4.46,1.56-3.05.57a.565.565,0,0,0-.44.54v5.4A.537.537,0,0,0,10.072,11.908Z" fill="#fff" /><g><g id="e918f286-5032-4942-ad29-ea17e6f1cc90"><path d="M1.1,5.668l1.21-.23v6.55l-1.23-.27-.99-.22a.111.111,0,0,1-.09-.12v-5.4a.12.12,0,0,1,.09-.12Z" fill="#a67af4" /></g><g><g id="a47a99dd-4d47-4c70-8c42-c5ac274ce496"><g><path d="M10.072,11.908l2.54.56L8.672,14.1c-.02,0-.03.01-.05.01a.154.154,0,0,1-.15-.15V3.448a.154.154,0,0,1,.15-.15.09.09,0,0,1,.05.01l4.46,1.56-3.05.57a.565.565,0,0,0-.44.54v5.4A.537.537,0,0,0,10.072,11.908Z" fill="url(#b27f1ad0-7d11-4247-9da3-91bce6211f32)" /><path d="M8.586,3.3,2.878,4.378a.177.177,0,0,0-.14.175V12.68a.177.177,0,0,0,.137.174L8.581,14.1a.176.176,0,0,0,.21-.174V3.478A.175.175,0,0,0,8.619,3.3Z" fill="url(#b2f92112-4ca9-4b17-a019-c9f26c1a4a8f)" /></g></g><polygon points="5.948 4.921 5.948 12.483 7.934 12.814 7.934 4.564 5.948 4.921" fill="#b796f9" opacity="0.5" /><polygon points="3.509 5.329 3.509 11.954 5.238 12.317 5.238 5.031 3.509 5.329" fill="#b796f9" opacity="0.5" /></g></g></g><path d="M16,2.048a1.755,1.755,0,1,1-1.76-1.76A1.756,1.756,0,0,1,16,2.048Z" fill="#32bedd" /><circle cx="4.65" cy="15.973" r="1.759" fill="#32bedd" /></g><path d="M18,6.689v3.844a.222.222,0,0,1-.133.2l-.766.316-3.07,1.268-.011,0a.126.126,0,0,1-.038,0,.1.1,0,0,1-.1-.1V5.234a.1.1,0,0,1,.054-.088l0,0,.019,0a.031.031,0,0,1,.019,0,.055.055,0,0,1,.034.008l.011,0,.012,0L17.05,6.2l.8.282A.213.213,0,0,1,18,6.689Z" fill="#773adc" /><path d="M13.959,5.14l-3.8.715a.118.118,0,0,0-.093.117v5.409a.118.118,0,0,0,.091.116l3.8.831a.115.115,0,0,0,.137-.09.109.109,0,0,0,0-.026V5.256a.117.117,0,0,0-.115-.118A.082.082,0,0,0,13.959,5.14Z" fill="#a67af4" /></g></g></svg>

Before

Width:  |  Height:  |  Size: 2.4 KiB

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="-175.993" x2="-175.993" y1="-343.723" y2="-359.232" gradientTransform="matrix(1.156 0 0 1.029 212.573 370.548)" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#fea11b"/><stop offset=".284" stop-color="#fea51a"/><stop offset=".547" stop-color="#feb018"/><stop offset=".8" stop-color="#ffc314"/><stop offset="1" stop-color="#ffd70f"/></linearGradient></defs><path fill="#50e6ff" d="m5.54 13.105-.586.588a.267.267 0 0 1-.377 0L.223 9.353a.533.533 0 0 1 0-.755l.588-.59 4.732 4.718a.267.267 0 0 1 0 .378z"/><path fill="#1490df" d="m4.863 4.305.59.588a.267.267 0 0 1 0 .378L.806 9.932l-.59-.589a.533.533 0 0 1-.001-.754l4.273-4.285a.267.267 0 0 1 .376 0z"/><path fill="#50e6ff" d="m17.19 8.012.588.59a.533.533 0 0 1-.001.754l-4.354 4.34a.267.267 0 0 1-.377 0l-.586-.587a.267.267 0 0 1 0-.377l4.732-4.718z"/><path fill="#1490df" d="m17.782 9.34-.59.589-4.648-4.662a.267.267 0 0 1 0-.377l.59-.588a.267.267 0 0 1 .378 0l4.273 4.286a.533.533 0 0 1 0 .753z"/><path fill="url(#a)" d="M8.459 9.9H4.87a.193.193 0 0 1-.2-.181.2.2 0 0 1 .018-.075L8.991 1.13a.21.21 0 0 1 .186-.106h4.245a.193.193 0 0 1 .2.181.17.17 0 0 1-.035.1L8.534 7.966h4.928a.193.193 0 0 1 .2.181.18.18 0 0 1-.052.122l-8.189 8.519c-.077.046-.624.5-.356-.189z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="-175.993" y1="-343.723" x2="-175.993" y2="-359.232" gradientTransform="matrix(1.156 0 0 1.029 212.573 370.548)" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#fea11b"/><stop offset=".284" stop-color="#fea51a"/><stop offset=".547" stop-color="#feb018"/><stop offset=".8" stop-color="#ffc314"/><stop offset="1" stop-color="#ffd70f"/></linearGradient></defs><path d="M5.54 13.105l-.586.588a.267.267 0 01-.377 0L.223 9.353a.533.533 0 010-.755l.588-.59 4.732 4.718a.267.267 0 010 .378z" fill="#50e6ff"/><path d="M4.863 4.305l.59.588a.267.267 0 010 .378L.806 9.932l-.59-.589a.533.533 0 01-.001-.754l4.273-4.285a.267.267 0 01.376 0z" fill="#1490df"/><path d="M17.19 8.012l.588.59a.533.533 0 01-.001.754l-4.354 4.34a.267.267 0 01-.377 0l-.586-.587a.267.267 0 010-.377l4.732-4.718z" fill="#50e6ff"/><path d="M17.782 9.34l-.59.589-4.648-4.662a.267.267 0 010-.377l.59-.588a.267.267 0 01.378 0l4.273 4.286a.533.533 0 010 .753z" fill="#1490df"/><path d="M8.459 9.9H4.87a.193.193 0 01-.2-.181.166.166 0 01.018-.075L8.991 1.13a.206.206 0 01.186-.106h4.245a.193.193 0 01.2.181.165.165 0 01-.035.1L8.534 7.966h4.928a.193.193 0 01.2.181.176.176 0 01-.052.122l-8.189 8.519c-.077.046-.624.5-.356-.189z" fill="url(#a)"/></svg>

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><radialGradient id="b" cx="9.36" cy="10.57" r="7.07" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#f2f2f2"/><stop offset=".58" stop-color="#eee"/><stop offset="1" stop-color="#e6e6e6"/></radialGradient><linearGradient id="a" x1="2.59" x2="15.41" y1="10.16" y2="10.16" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#005ba1"/><stop offset=".07" stop-color="#0060a9"/><stop offset=".36" stop-color="#0071c8"/><stop offset=".52" stop-color="#0078d4"/><stop offset=".64" stop-color="#0074cd"/><stop offset=".82" stop-color="#006abb"/><stop offset="1" stop-color="#005ba1"/></linearGradient></defs><path fill="url(#a)" d="M9 5.14c-3.54 0-6.41-1-6.41-2.32v12.36c0 1.27 2.82 2.3 6.32 2.32H9c3.54 0 6.41-1 6.41-2.32V2.82c0 1.29-2.87 2.32-6.41 2.32"/><path fill="#e8e8e8" d="M15.41 2.82c0 1.29-2.87 2.32-6.41 2.32s-6.41-1-6.41-2.32S5.46.5 9 .5s6.41 1 6.41 2.32"/><path fill="#50e6ff" d="M13.92 2.63c0 .82-2.21 1.48-4.92 1.48s-4.92-.66-4.92-1.48S6.29 1.16 9 1.16s4.92.66 4.92 1.47"/><path fill="#198ab3" d="M9 3a11.6 11.6 0 0 0-3.89.57A11.4 11.4 0 0 0 9 4.11a11.2 11.2 0 0 0 3.89-.58A11.8 11.8 0 0 0 9 3"/><path fill="url(#b)" d="M12.9 11.4V8H12v4.13h2.46v-.73zM5.76 9.73a1.8 1.8 0 0 1-.51-.31.44.44 0 0 1-.12-.32.34.34 0 0 1 .15-.3.68.68 0 0 1 .42-.12 1.62 1.62 0 0 1 1 .29v-.86a2.6 2.6 0 0 0-1-.16 1.64 1.64 0 0 0-1.09.34 1.08 1.08 0 0 0-.42.89c0 .51.32.91 1 1.21a3 3 0 0 1 .62.36.42.42 0 0 1 .15.32.38.38 0 0 1-.16.31.8.8 0 0 1-.45.11 1.66 1.66 0 0 1-1.09-.42V12a2.2 2.2 0 0 0 1.07.24 1.88 1.88 0 0 0 1.18-.33 1.08 1.08 0 0 0 .33-.91 1.05 1.05 0 0 0-.25-.7 2.4 2.4 0 0 0-.83-.57M11 11.32a2.34 2.34 0 0 0 .33-1.26A2.3 2.3 0 0 0 11 9a1.8 1.8 0 0 0-.7-.75 2 2 0 0 0-1-.26 2.1 2.1 0 0 0-1.08.27 1.86 1.86 0 0 0-.73.74 2.46 2.46 0 0 0-.26 1.14 2.26 2.26 0 0 0 .24 1 1.76 1.76 0 0 0 .69.74 2.06 2.06 0 0 0 1 .3l.86 1h1.21L10 12.08a1.8 1.8 0 0 0 1-.76m-1-.25a.94.94 0 0 1-.76.35.92.92 0 0 1-.76-.36 1.52 1.52 0 0 1-.29-1 1.53 1.53 0 0 1 .29-1 1 1 0 0 1 .78-.37.87.87 0 0 1 .75.37 1.62 1.62 0 0 1 .27 1 1.46 1.46 0 0 1-.28 1.01"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><radialGradient id="b" cx="9.36" cy="10.57" r="7.07" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#f2f2f2"/><stop offset=".58" stop-color="#eee"/><stop offset="1" stop-color="#e6e6e6"/></radialGradient><linearGradient id="a" x1="2.59" y1="10.16" x2="15.41" y2="10.16" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#005ba1"/><stop offset=".07" stop-color="#0060a9"/><stop offset=".36" stop-color="#0071c8"/><stop offset=".52" stop-color="#0078d4"/><stop offset=".64" stop-color="#0074cd"/><stop offset=".82" stop-color="#006abb"/><stop offset="1" stop-color="#005ba1"/></linearGradient></defs><path d="M9 5.14c-3.54 0-6.41-1-6.41-2.32v12.36c0 1.27 2.82 2.3 6.32 2.32H9c3.54 0 6.41-1 6.41-2.32V2.82c0 1.29-2.87 2.32-6.41 2.32z" fill="url(#a)"/><path d="M15.41 2.82c0 1.29-2.87 2.32-6.41 2.32s-6.41-1-6.41-2.32S5.46.5 9 .5s6.41 1 6.41 2.32" fill="#e8e8e8"/><path d="M13.92 2.63c0 .82-2.21 1.48-4.92 1.48s-4.92-.66-4.92-1.48S6.29 1.16 9 1.16s4.92.66 4.92 1.47" fill="#50e6ff"/><path d="M9 3a11.55 11.55 0 00-3.89.57A11.42 11.42 0 009 4.11a11.15 11.15 0 003.89-.58A11.84 11.84 0 009 3z" fill="#198ab3"/><path d="M12.9 11.4V8H12v4.13h2.46v-.73zM5.76 9.73a1.83 1.83 0 01-.51-.31.44.44 0 01-.12-.32.34.34 0 01.15-.3.68.68 0 01.42-.12 1.62 1.62 0 011 .29v-.86a2.58 2.58 0 00-1-.16 1.64 1.64 0 00-1.09.34 1.08 1.08 0 00-.42.89c0 .51.32.91 1 1.21a2.88 2.88 0 01.62.36.42.42 0 01.15.32.38.38 0 01-.16.31.81.81 0 01-.45.11 1.66 1.66 0 01-1.09-.42V12a2.17 2.17 0 001.07.24 1.88 1.88 0 001.18-.33 1.08 1.08 0 00.33-.91 1.05 1.05 0 00-.25-.7 2.42 2.42 0 00-.83-.57zM11 11.32a2.34 2.34 0 00.33-1.26A2.32 2.32 0 0011 9a1.81 1.81 0 00-.7-.75 2 2 0 00-1-.26 2.11 2.11 0 00-1.08.27 1.86 1.86 0 00-.73.74 2.46 2.46 0 00-.26 1.14 2.26 2.26 0 00.24 1 1.76 1.76 0 00.69.74 2.06 2.06 0 001 .3l.86 1h1.21L10 12.08a1.79 1.79 0 001-.76zm-1-.25a.94.94 0 01-.76.35.92.92 0 01-.76-.36 1.52 1.52 0 01-.29-1 1.53 1.53 0 01.29-1 1 1 0 01.78-.37.87.87 0 01.75.37 1.62 1.62 0 01.27 1 1.46 1.46 0 01-.28 1.01z" fill="url(#b)"/></svg>

Before

Width:  |  Height:  |  Size: 2.1 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="8.88" x2="8.88" y1="12.21" y2=".21" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#0078d4"/><stop offset=".82" stop-color="#5ea0ef"/></linearGradient><linearGradient id="b" x1="8.88" x2="8.88" y1="16.84" y2="12.21" gradientUnits="userSpaceOnUse"><stop offset=".15" stop-color="#ccc"/><stop offset="1" stop-color="#707070"/></linearGradient></defs><rect width="18" height="12" x="-.12" y=".21" fill="url(#a)" rx=".6"/><path fill="#50e6ff" d="M11.88 4.46v3.49l-3 1.76v-3.5z"/><path fill="#c3f1ff" d="m11.88 4.46-3 1.76-3-1.76 3-1.75z"/><path fill="#9cebff" d="M8.88 6.22v3.49l-3-1.76V4.46z"/><path fill="#c3f1ff" d="m5.88 7.95 3-1.74v3.5z"/><path fill="#9cebff" d="m11.88 7.95-3-1.74v3.5z"/><path fill="url(#b)" d="M12.49 15.84c-1.78-.28-1.85-1.56-1.85-3.63H7.11c0 2.07-.06 3.35-1.84 3.63a1 1 0 0 0-.89 1h9a1 1 0 0 0-.89-1"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="8.88" y1="12.21" x2="8.88" y2=".21" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#0078d4"/><stop offset=".82" stop-color="#5ea0ef"/></linearGradient><linearGradient id="b" x1="8.88" y1="16.84" x2="8.88" y2="12.21" gradientUnits="userSpaceOnUse"><stop offset=".15" stop-color="#ccc"/><stop offset="1" stop-color="#707070"/></linearGradient></defs><rect x="-.12" y=".21" width="18" height="12" rx=".6" fill="url(#a)"/><path fill="#50e6ff" d="M11.88 4.46v3.49l-3 1.76v-3.5l3-1.75z"/><path fill="#c3f1ff" d="M11.88 4.46l-3 1.76-3-1.76 3-1.75 3 1.75z"/><path fill="#9cebff" d="M8.88 6.22v3.49l-3-1.76V4.46l3 1.76z"/><path fill="#c3f1ff" d="M5.88 7.95l3-1.74v3.5l-3-1.76z"/><path fill="#9cebff" d="M11.88 7.95l-3-1.74v3.5l3-1.76z"/><path d="M12.49 15.84c-1.78-.28-1.85-1.56-1.85-3.63H7.11c0 2.07-.06 3.35-1.84 3.63a1 1 0 00-.89 1h9a1 1 0 00-.89-1z" fill="url(#b)"/></svg>

Before

Width:  |  Height:  |  Size: 936 B

After

Width:  |  Height:  |  Size: 973 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="2207" height="2500" preserveAspectRatio="xMidYMid" viewBox="0 0 256 290"><path fill="#B7CA9D" d="m256 199.305-127.957-18.797L0 199.329l128.01 47.439z"/><path fill="#4B612C" d="m25.621 197.113 21.63 6.761 1.971-2.238V50.284l-1.971-2.585-21.63 8.274z"/><path fill="#759C3E" d="m123.832 190.423-76.581 13.451V47.703l76.581 17.222z"/><path fill="#4B612C" d="m89.686 216.889-29.848-9.201V14.928L89.686.004l2.612 2.845v210.858z"/><path fill="#759C3E" d="M191.967 192.894 89.686 216.889V0l102.281 39.866z"/><path fill="#4B612C" d="M127.965 244.714 0 199.329v26.324l127.965 63.983z"/><path fill="#759C3E" d="m256 225.622-128.035 64.014v-44.922L256 199.305z"/><path fill="#B7CA9D" d="M220.039 155.692h-31.026l-88.445 6.026L128 166.775z"/><path fill="#4B612C" d="m100.568 219.906 27.42 8.226.789-.849-.023-61.849-.789-.758-27.397-2.958z"/><path fill="#759C3E" d="m220.039 155.692-92.074 8.98.023 63.46 92.051-27.711z"/></svg>

Before

Width:  |  Height:  |  Size: 962 B

View File

@@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" viewBox="0 0 122.88 103.53"><path d="M5.47 0H117.4c3.01 0 5.47 2.46 5.47 5.47v92.58c0 3.01-2.46 5.47-5.47 5.47H5.47c-3.01 0-5.47-2.46-5.47-5.47V5.47C0 2.46 2.46 0 5.47 0m26.37 38.55 17.79 18.42 2.14 2.13-2.12 2.16-17.97 19.05-5.07-5 15.85-16.15L26.81 43.6zM94.1 79.41H54.69v-6.84H94.1zM38.19 9.83c3.19 0 5.78 2.59 5.78 5.78s-2.59 5.78-5.78 5.78-5.78-2.59-5.78-5.78S35 9.83 38.19 9.83m-19.24 0c3.19 0 5.78 2.59 5.78 5.78s-2.59 5.78-5.78 5.78-5.78-2.59-5.78-5.78 2.58-5.78 5.78-5.78M7.49 5.41H115.4c1.15 0 2.09.94 2.09 2.09v18.32H5.4V7.5c0-1.15.94-2.09 2.09-2.09" style="fill-rule:evenodd;clip-rule:evenodd;fill:#1668dc"/></svg>
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 103.53" style="enable-background:new 0 0 122.88 103.53" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;} .st0{fill:#1668dc;} .st1{fill:#FFFFFF;}</style><g><path class="st0" d="M5.47,0h111.93c3.01,0,5.47,2.46,5.47,5.47v92.58c0,3.01-2.46,5.47-5.47,5.47H5.47 c-3.01,0-5.47-2.46-5.47-5.47V5.47C0,2.46,2.46,0,5.47,0L5.47,0z M31.84,38.55l17.79,18.42l2.14,2.13l-2.12,2.16L31.68,80.31 l-5.07-5l15.85-16.15L26.81,43.6L31.84,38.55L31.84,38.55z M94.1,79.41H54.69v-6.84H94.1V79.41L94.1,79.41z M38.19,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S35,9.83,38.19,9.83L38.19,9.83z M18.95,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S15.75,9.83,18.95,9.83L18.95,9.83z M7.49,5.41 h107.91c1.15,0,2.09,0.94,2.09,2.09v18.32H5.4V7.5C5.4,6.35,6.34,5.41,7.49,5.41L7.49,5.41z"/></g></svg>

Before

Width:  |  Height:  |  Size: 687 B

After

Width:  |  Height:  |  Size: 1.0 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="2222" height="2500" preserveAspectRatio="xMinYMin meet" viewBox="0 0 256 288"><path fill="#5C8DBC" d="M255.569 84.72c-.002-4.83-1.035-9.098-3.124-12.761-2.052-3.602-5.125-6.621-9.247-9.008-34.025-19.619-68.083-39.178-102.097-58.817-9.17-5.294-18.061-5.101-27.163.269C100.395 12.39 32.59 51.237 12.385 62.94 4.064 67.757.015 75.129.013 84.711 0 124.166.013 163.62 0 203.076c.002 4.724.991 8.909 2.988 12.517 2.053 3.711 5.169 6.813 9.386 9.254 20.206 11.703 88.02 50.547 101.56 58.536 9.106 5.373 17.997 5.565 27.17.269 34.015-19.64 68.075-39.198 102.105-58.817 4.217-2.44 7.333-5.544 9.386-9.252 1.994-3.608 2.985-7.793 2.987-12.518 0 0 0-78.889-.013-118.345"/><path fill="#1A4674" d="M128.182 143.509 2.988 215.593c2.053 3.711 5.169 6.813 9.386 9.254 20.206 11.703 88.02 50.547 101.56 58.536 9.106 5.373 17.997 5.565 27.17.269 34.015-19.64 68.075-39.198 102.105-58.817 4.217-2.44 7.333-5.544 9.386-9.252z"/><path fill="#1A4674" d="M91.101 164.861c7.285 12.718 20.98 21.296 36.69 21.296 15.807 0 29.58-8.687 36.828-21.541l-36.437-21.107z"/><path fill="#1B598E" d="M255.569 84.72c-.002-4.83-1.035-9.098-3.124-12.761l-124.263 71.55 124.413 72.074c1.994-3.608 2.985-7.793 2.987-12.518 0 0 0-78.889-.013-118.345"/><path fill="#FFF" d="M248.728 148.661h-9.722v9.724h-9.724v-9.724h-9.721v-9.721h9.721v-9.722h9.724v9.722h9.722zm-35.475 0h-9.721v9.724h-9.722v-9.724h-9.722v-9.721h9.722v-9.722h9.722v9.722h9.721z"/><path fill="#FFF" d="M164.619 164.616c-7.248 12.854-21.021 21.541-36.828 21.541-15.71 0-29.405-8.578-36.69-21.296a42.06 42.06 0 0 1-5.574-20.968c0-23.341 18.923-42.263 42.264-42.263 15.609 0 29.232 8.471 36.553 21.059l36.941-21.272c-14.683-25.346-42.096-42.398-73.494-42.398-46.876 0-84.875 38-84.875 84.874 0 15.378 4.091 29.799 11.241 42.238 14.646 25.48 42.137 42.637 73.634 42.637 31.555 0 59.089-17.226 73.714-42.781z"/></svg>

Before

Width:  |  Height:  |  Size: 1.8 KiB

Some files were not shown because too many files have changed in this diff Show More