Compare commits
34 Commits
v0.78.0
...
quickFilte
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6445bc0ad | ||
|
|
ce81ab73b1 | ||
|
|
20f7615a80 | ||
|
|
9777b020c5 | ||
|
|
be72e2ea1d | ||
|
|
38a5a21ff0 | ||
|
|
ca6f90926c | ||
|
|
b0d19035a4 | ||
|
|
054dea366e | ||
|
|
aaf0b597dc | ||
|
|
19372c8194 | ||
|
|
eb74adad44 | ||
|
|
d5c04e1342 | ||
|
|
2b9632c8fd | ||
|
|
24920ae903 | ||
|
|
6f096632a2 | ||
|
|
a42eacec4b | ||
|
|
e723399f7f | ||
|
|
48936bed9b | ||
|
|
ee70474cc7 | ||
|
|
c3fa7144ee | ||
|
|
5dd02a5b8e | ||
|
|
c0f01e4cb9 | ||
|
|
fed84cb50a | ||
|
|
80545c4d07 | ||
|
|
0b1faec092 | ||
|
|
ba6f31b1c3 | ||
|
|
eed92978a4 | ||
|
|
41cbd316b5 | ||
|
|
8d7d33393d | ||
|
|
8d143b44b1 | ||
|
|
423aebd6eb | ||
|
|
8d630707af | ||
|
|
a5b52431b7 |
@@ -1,6 +1,7 @@
|
||||
.git
|
||||
.github
|
||||
.vscode
|
||||
.devenv
|
||||
README.md
|
||||
deploy
|
||||
sample-apps
|
||||
|
||||
58
.github/workflows/build-community.yaml
vendored
58
.github/workflows/build-community.yaml
vendored
@@ -2,25 +2,49 @@ name: build-community
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
run: |
|
||||
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "providers=gcp" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
@@ -45,13 +69,13 @@ jobs:
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/signoz/zeus/pkg/version.Version=\$($MAKE info-version)
|
||||
-X github.com/signoz/zeus/pkg/version.variant=community
|
||||
-X github.com/signoz/zeus/pkg/version.hash=\$($MAKE info-commit-short)
|
||||
-X github.com/signoz/zeus/pkg/version.time=\$($MAKE info-timestamp)
|
||||
-X github.com/signoz/zeus/pkg/version.branch=\$($MAKE info-branch)'
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
|
||||
53
.github/workflows/build-enterprise.yaml
vendored
53
.github/workflows/build-enterprise.yaml
vendored
@@ -2,17 +2,50 @@ name: build-enterprise
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
run: |
|
||||
@@ -51,7 +84,7 @@ jobs:
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
@@ -66,13 +99,13 @@ jobs:
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/signoz/zeus/pkg/version.Version=\$($MAKE info-version)
|
||||
-X github.com/signoz/zeus/pkg/version.variant=enterprise
|
||||
-X github.com/signoz/zeus/pkg/version.hash=\$($MAKE info-commit-short)
|
||||
-X github.com/signoz/zeus/pkg/version.time=\$($MAKE info-timestamp)
|
||||
-X github.com/signoz/zeus/pkg/version.branch=\$($MAKE info-branch)
|
||||
-X github.com/signoz/zeus/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/signoz/zeus/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
|
||||
122
.github/workflows/build-staging.yaml
vendored
Normal file
122
.github/workflows/build-staging.yaml
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
name: build-staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
deployment: ${{ steps.build-info.outputs.deployment }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
|
||||
staging_label="${{ github.event.label.name }}"
|
||||
if [[ "${staging_label}" == "staging:"* ]]; then
|
||||
deployment=${staging_label#"staging:"}
|
||||
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
|
||||
deployment="staging"
|
||||
else
|
||||
echo "error: not able to determine deployment - please verify the PR label or the branch"
|
||||
exit 1
|
||||
fi
|
||||
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'TUNNEL_URL=https://telemetry.staging.signoz.cloud/tunnel' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN=https://telemetry.staging.signoz.cloud' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: staging-dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: staging-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [prepare, go-build]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_ENVIRONMENT: staging
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
55
.github/workflows/integrationci.yaml
vendored
Normal file
55
.github/workflows/integrationci.yaml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: integrationci
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
src:
|
||||
- bootstrap
|
||||
sqlstore-provider:
|
||||
- postgres
|
||||
- sqlite
|
||||
clickhouse-version:
|
||||
- 24.1.2-alpine
|
||||
- 24.12-alpine
|
||||
schema-migrator-version:
|
||||
- v0.111.38
|
||||
postgres-version:
|
||||
- 15
|
||||
if: |
|
||||
((github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))) && contains(github.event.pull_request.labels.*.name, 'safe-to-integrate')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.13
|
||||
- name: poetry
|
||||
run: |
|
||||
python -m pip install poetry==2.1.2
|
||||
python -m poetry config virtualenvs.in-project true
|
||||
cd tests/integration && poetry install --no-root
|
||||
- name: run
|
||||
run: |
|
||||
cd tests/integration && \
|
||||
poetry run pytest -ra \
|
||||
--basetemp=./tmp/ \
|
||||
-vv \
|
||||
--capture=no \
|
||||
src/${{matrix.src}} \
|
||||
--sqlstore-provider ${{matrix.sqlstore-provider}} \
|
||||
--postgres-version ${{matrix.postgres-version}} \
|
||||
--clickhouse-version ${{matrix.clickhouse-version}} \
|
||||
--schema-migrator-version ${{matrix.schema-migrator-version}}
|
||||
4
.github/workflows/prereleaser.yaml
vendored
4
.github/workflows/prereleaser.yaml
vendored
@@ -1,9 +1,9 @@
|
||||
name: prereleaser
|
||||
|
||||
on:
|
||||
# schedule every wednesday 9:30 AM UTC (3pm IST)
|
||||
# schedule every wednesday 6:30 AM UTC (12:00 PM IST)
|
||||
schedule:
|
||||
- cron: '30 9 * * 3'
|
||||
- cron: '30 6 * * 3'
|
||||
|
||||
# allow manual triggering of the workflow by a maintainer
|
||||
workflow_dispatch:
|
||||
|
||||
13
.github/workflows/staging-deployment.yaml
vendored
13
.github/workflows/staging-deployment.yaml
vendored
@@ -36,12 +36,17 @@ jobs:
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
docker system prune --force --all
|
||||
OTELCOL_TAG=$(curl -s https://api.github.com/repos/SigNoz/signoz-otel-collector/releases/latest | jq -r '.tag_name // "not-found"')
|
||||
if [[ "${OTELCOL_TAG}" == "not-found" ]]; then
|
||||
echo "warning: unable to determine latest SigNoz OtelCollector release tag, skipping latest otelcol deployment"
|
||||
else
|
||||
export OTELCOL_TAG=${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-otel-collector:${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-schema-migrator:${OTELCOL_TAG}
|
||||
fi
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
2
.github/workflows/testing-deployment.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
docker system prune --force
|
||||
docker system prune --force --all
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
147
.gitignore
vendored
147
.gitignore
vendored
@@ -80,6 +80,153 @@ deploy/common/clickhouse/user_scripts/
|
||||
|
||||
queries.active
|
||||
|
||||
# tmp
|
||||
**/tmp/**
|
||||
|
||||
# .devenv tmp files
|
||||
.devenv/**/tmp/**
|
||||
.qodo
|
||||
|
||||
### Python ###
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
### Python Patch ###
|
||||
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
||||
poetry.toml
|
||||
|
||||
# ruff
|
||||
.ruff_cache/
|
||||
|
||||
# LSP config files
|
||||
pyrightconfig.json
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/python
|
||||
32
Makefile
32
Makefile
@@ -10,7 +10,7 @@ COMMIT_SHORT_SHA ?= $(shell git rev-parse --short HEAD)
|
||||
BRANCH_NAME ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
VERSION ?= $(BRANCH_NAME)-$(COMMIT_SHORT_SHA)
|
||||
TIMESTAMP ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
ARCHS = amd64 arm64
|
||||
ARCHS ?= amd64 arm64
|
||||
TARGET_DIR ?= $(shell pwd)/target
|
||||
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
@@ -23,6 +23,7 @@ GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service
|
||||
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
|
||||
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service
|
||||
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
|
||||
|
||||
@@ -119,6 +120,18 @@ $(GO_BUILD_ARCHS_ENTERPRISE): go-build-enterprise-%: $(TARGET_DIR)
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
.PHONY: go-build-enterprise-race $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
go-build-enterprise-race: ## Builds the go backend server for enterprise with race
|
||||
go-build-enterprise-race: $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
$(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
##############################################################
|
||||
# js commands
|
||||
##############################################################
|
||||
@@ -167,3 +180,20 @@ docker-buildx-enterprise: go-build-enterprise js-build
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--push \
|
||||
--tag $(DOCKER_REGISTRY_ENTERPRISE):$(VERSION) $(SRC)
|
||||
|
||||
##############################################################
|
||||
# python commands
|
||||
##############################################################
|
||||
.PHONY: py-fmt
|
||||
py-fmt: ## Run black for integration tests
|
||||
@cd tests/integration && poetry run black .
|
||||
|
||||
.PHONY: py-lint
|
||||
py-lint: ## Run lint for integration tests
|
||||
@cd tests/integration && poetry run isort .
|
||||
@cd tests/integration && poetry run autoflake .
|
||||
@cd tests/integration && poetry run pylint .
|
||||
|
||||
.PHONY: py-test
|
||||
py-test: ## Runs integration tests
|
||||
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
@@ -174,7 +174,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.78.0
|
||||
image: signoz/signoz:v0.79.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
@@ -208,7 +208,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.38
|
||||
image: signoz/signoz-otel-collector:v0.111.39
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -232,7 +232,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.38
|
||||
image: signoz/signoz-schema-migrator:v0.111.39
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.78.0
|
||||
image: signoz/signoz:v0.79.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
@@ -143,7 +143,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.38
|
||||
image: signoz/signoz-otel-collector:v0.111.39
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -167,7 +167,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.38
|
||||
image: signoz/signoz-schema-migrator:v0.111.39
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -177,7 +177,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.78.0}
|
||||
image: signoz/signoz:${VERSION:-v0.79.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -212,7 +212,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -238,7 +238,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -249,7 +249,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.78.0}
|
||||
image: signoz/signoz:${VERSION:-v0.79.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -146,7 +146,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -168,7 +168,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -180,7 +180,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.78.0}
|
||||
image: signoz/signoz:${VERSION:-v0.79.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -144,7 +144,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +166,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +178,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
36
ee/query-service/Dockerfile.integration
Normal file
36
ee/query-service/Dockerfile.integration
Normal file
@@ -0,0 +1,36 @@
|
||||
FROM golang:1.22-bullseye
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
ARG ZEUSURL
|
||||
|
||||
# This path is important for stacktraces
|
||||
WORKDIR $GOPATH/src/github.com/signoz/signoz
|
||||
WORKDIR /root
|
||||
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
g++ \
|
||||
gcc \
|
||||
libc6-dev \
|
||||
make \
|
||||
pkg-config \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY ./ee/ ./ee/
|
||||
COPY ./pkg/ ./pkg/
|
||||
COPY ./templates/email /root/templates
|
||||
|
||||
COPY Makefile Makefile
|
||||
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
||||
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["/root/signoz"]
|
||||
@@ -153,9 +153,11 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*types.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
|
||||
|
||||
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
|
||||
// TODO(nitya): there should be orgId here
|
||||
integrationUserResult, apiErr := ah.AppDao().GetUserByEmail(ctx, email)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
|
||||
}
|
||||
@@ -170,9 +172,9 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
)
|
||||
|
||||
newUser := &types.User{
|
||||
ID: cloudIntegrationUserId,
|
||||
Name: fmt.Sprintf("%s integration", cloudProvider),
|
||||
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
|
||||
ID: uuid.New().String(),
|
||||
Name: cloudIntegrationUser,
|
||||
Email: email,
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
|
||||
@@ -5,16 +5,18 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/types"
|
||||
eeTypes "github.com/SigNoz/signoz/ee/types"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
baseconstants "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
"go.uber.org/zap"
|
||||
@@ -58,7 +60,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
ah.Respond(w, &pat)
|
||||
}
|
||||
|
||||
func validatePATRequest(req types.GettablePAT) error {
|
||||
func validatePATRequest(req eeTypes.GettablePAT) error {
|
||||
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
|
||||
return fmt.Errorf("valid role is required")
|
||||
}
|
||||
@@ -74,12 +76,19 @@ func validatePATRequest(req types.GettablePAT) error {
|
||||
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := types.GettablePAT{}
|
||||
req := eeTypes.GettablePAT{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
idStr := mux.Vars(r)["id"]
|
||||
id, err := valuer.NewUUID(idStr)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is not a valid uuid-v7"))
|
||||
return
|
||||
}
|
||||
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
@@ -89,6 +98,25 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
//get the pat
|
||||
existingPAT, paterr := ah.AppDao().GetPATByID(ctx, user.OrgID, id)
|
||||
if paterr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, paterr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// get the user
|
||||
createdByUser, usererr := ah.AppDao().GetUser(ctx, existingPAT.UserID)
|
||||
if usererr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, usererr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user pat cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
err = validatePATRequest(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
@@ -96,12 +124,6 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
req.UpdatedByUserID = user.ID
|
||||
idStr := mux.Vars(r)["id"]
|
||||
id, err := valuer.NewUUID(idStr)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is not a valid uuid-v7"))
|
||||
return
|
||||
}
|
||||
req.UpdatedAt = time.Now()
|
||||
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||
var apierr basemodel.BaseApiError
|
||||
@@ -149,6 +171,25 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
//get the pat
|
||||
existingPAT, paterr := ah.AppDao().GetPATByID(ctx, user.OrgID, id)
|
||||
if paterr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, paterr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// get the user
|
||||
createdByUser, usererr := ah.AppDao().GetUser(ctx, existingPAT.UserID)
|
||||
if usererr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, usererr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user pat cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
zap.L().Info("Revoke PAT with id", zap.String("id", id.StringValue()))
|
||||
if apierr := ah.AppDao().RevokePAT(ctx, user.OrgID, id, user.ID); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
basedao "github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
ossTypes "github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
@@ -40,7 +39,6 @@ type ModelDao interface {
|
||||
UpdatePAT(ctx context.Context, orgID string, p types.GettablePAT, id valuer.UUID) basemodel.BaseApiError
|
||||
GetPAT(ctx context.Context, pat string) (*types.GettablePAT, basemodel.BaseApiError)
|
||||
GetPATByID(ctx context.Context, orgID string, id valuer.UUID) (*types.GettablePAT, basemodel.BaseApiError)
|
||||
GetUserByPAT(ctx context.Context, orgID string, token string) (*ossTypes.GettableUser, basemodel.BaseApiError)
|
||||
ListPATs(ctx context.Context, orgID string) ([]types.GettablePAT, basemodel.BaseApiError)
|
||||
RevokePAT(ctx context.Context, orgID string, id valuer.UUID, userID string) basemodel.BaseApiError
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
@@ -44,7 +43,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
|
||||
}
|
||||
|
||||
user := &types.User{
|
||||
ID: uuid.NewString(),
|
||||
ID: uuid.New().String(),
|
||||
Name: "",
|
||||
Email: email,
|
||||
Password: hash,
|
||||
@@ -162,12 +161,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
||||
// find domain from email
|
||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
var emailDomain string
|
||||
emailComponents := strings.Split(email, "@")
|
||||
if len(emailComponents) > 0 {
|
||||
emailDomain = emailComponents[1]
|
||||
}
|
||||
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
|
||||
zap.L().Error("failed to get org domain from email", zap.String("email", email), zap.Error(apierr.ToError()))
|
||||
return resp, apierr
|
||||
}
|
||||
|
||||
|
||||
@@ -196,27 +196,3 @@ func (m *modelDao) GetPATByID(ctx context.Context, orgID string, id valuer.UUID)
|
||||
|
||||
return &patWithUser, nil
|
||||
}
|
||||
|
||||
// deprecated
|
||||
func (m *modelDao) GetUserByPAT(ctx context.Context, orgID string, token string) (*ossTypes.GettableUser, basemodel.BaseApiError) {
|
||||
users := []ossTypes.GettableUser{}
|
||||
|
||||
if err := m.DB().NewSelect().
|
||||
Model(&users).
|
||||
Column("u.id", "u.name", "u.email", "u.password", "u.created_at", "u.profile_picture_url", "u.org_id", "u.group_id").
|
||||
Join("JOIN personal_access_tokens p ON u.id = p.user_id").
|
||||
Where("p.token = ?", token).
|
||||
Where("p.expires_at >= strftime('%s', 'now')").
|
||||
Where("p.org_id = ?", orgID).
|
||||
Scan(ctx); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||
}
|
||||
|
||||
if len(users) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
||||
}
|
||||
}
|
||||
return &users[0], nil
|
||||
}
|
||||
|
||||
@@ -17,13 +17,15 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
Org = "org"
|
||||
User = "user"
|
||||
Org = "org"
|
||||
User = "user"
|
||||
CloudIntegration = "cloud_integration"
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
)
|
||||
|
||||
type dialect struct {
|
||||
@@ -211,6 +213,8 @@ func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.I
|
||||
fkReferences = append(fkReferences, OrgReference)
|
||||
} else if reference == User && !slices.Contains(fkReferences, UserReference) {
|
||||
fkReferences = append(fkReferences, UserReference)
|
||||
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
|
||||
fkReferences = append(fkReferences, CloudIntegrationReference)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,9 +11,12 @@ const logEvent = async (
|
||||
rateLimited?: boolean,
|
||||
): Promise<SuccessResponse<EventSuccessPayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
// add tenant_url to attributes
|
||||
const { hostname } = window.location;
|
||||
const updatedAttributes = { ...attributes, tenant_url: hostname };
|
||||
const response = await axios.post('/event', {
|
||||
eventName,
|
||||
attributes,
|
||||
attributes: updatedAttributes,
|
||||
eventType: eventType || 'track',
|
||||
rateLimited: rateLimited || false, // TODO: Update this once we have a proper way to handle rate limiting
|
||||
});
|
||||
|
||||
@@ -8,6 +8,5 @@ export enum FeatureKeys {
|
||||
PREMIUM_SUPPORT = 'PREMIUM_SUPPORT',
|
||||
ANOMALY_DETECTION = 'ANOMALY_DETECTION',
|
||||
ONBOARDING_V3 = 'ONBOARDING_V3',
|
||||
THIRD_PARTY_API = 'THIRD_PARTY_API',
|
||||
TRACE_FUNNELS = 'TRACE_FUNNELS',
|
||||
}
|
||||
|
||||
@@ -284,16 +284,6 @@ function SideNav(): JSX.Element {
|
||||
manageLicenseMenuItem,
|
||||
];
|
||||
|
||||
const isApiMonitoringEnabled = featureFlags?.find(
|
||||
(flag) => flag.name === FeatureKeys.THIRD_PARTY_API,
|
||||
)?.active;
|
||||
|
||||
if (!isApiMonitoringEnabled) {
|
||||
updatedMenuItems = updatedMenuItems.filter(
|
||||
(item) => item.key !== ROUTES.API_MONITORING,
|
||||
);
|
||||
}
|
||||
|
||||
if (isCloudUser || isEnterpriseSelfHostedUser) {
|
||||
const isOnboardingEnabled =
|
||||
featureFlags?.find((feature) => feature.name === FeatureKeys.ONBOARDING)
|
||||
|
||||
6
go.mod
6
go.mod
@@ -10,7 +10,8 @@ require (
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.30.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.39
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/coreos/go-oidc/v3 v3.11.0
|
||||
@@ -89,10 +90,9 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/ClickHouse/ch-go v0.61.5 // indirect
|
||||
github.com/ClickHouse/ch-go v0.63.1 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
|
||||
14
go.sum
14
go.sum
@@ -85,8 +85,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mx
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4=
|
||||
github.com/ClickHouse/ch-go v0.61.5/go.mod h1:s1LJW/F/LcFs5HJnuogFMta50kKDO0lf9zzfrbl0RQg=
|
||||
github.com/ClickHouse/ch-go v0.63.1 h1:s2JyZvWLTCSAGdtjMBBmAgQQHMco6pawLJMOXi0FODM=
|
||||
github.com/ClickHouse/ch-go v0.63.1/go.mod h1:I1kJJCL3WJcBMGe1m+HVK0+nREaG+JOYYBWjrDrF3R0=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.30.0 h1:AG4D/hW39qa58+JHQIFOSnxyL46H6h2lrmGGk17dhFo=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.30.0/go.mod h1:i9ZQAojcayW3RsdCb3YR+n+wC2h65eJsZCscZ1Z1wyo=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
|
||||
@@ -100,8 +100,10 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkbj57eGXx8H3ZJ4zhmQXBnrW523ktj8=
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.39-beta.1 h1:ZpSNrOZBOH2iCJIPeER5X0mfxOe64yP3JRX7FzBNfwY=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.39-beta.1/go.mod h1:DCu/D+lqhsPNSGS4IMD+4gn7q06TGzOCKazSy+GURVc=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.39 h1:Dl8QqZNAsj2atxP572OzsszPK0XPpd3LLPNPRAUJ5wo=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.39/go.mod h1:DCu/D+lqhsPNSGS4IMD+4gn7q06TGzOCKazSy+GURVc=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@@ -820,8 +822,8 @@ github.com/prometheus/prometheus v0.300.1/go.mod h1:gtTPY/XVyCdqqnjA3NzDMb0/nc5H
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0 h1:i+cMcpEDY1BkNm7lPDkCtE4oElsYLn+EKF8kAu2vXT4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
|
||||
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0=
|
||||
github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
|
||||
@@ -104,7 +104,7 @@ fullText
|
||||
* ...
|
||||
*/
|
||||
functionCall
|
||||
: (HAS | HASANY | HASALL | HASNONE) LPAREN functionParamList RPAREN
|
||||
: (HAS | HASANY | HASALL) LPAREN functionParamList RPAREN
|
||||
;
|
||||
|
||||
// Function parameters can be keys, single scalar values, or arrays
|
||||
@@ -182,7 +182,6 @@ OR : [Oo][Rr] ;
|
||||
HAS : [Hh][Aa][Ss] ;
|
||||
HASANY : [Hh][Aa][Ss][Aa][Nn][Yy] ;
|
||||
HASALL : [Hh][Aa][Ss][Aa][Ll][Ll] ;
|
||||
HASNONE : [Hh][Aa][Ss][Nn][Oo][Nn][Ee] ;
|
||||
|
||||
// Potential boolean constants
|
||||
BOOL
|
||||
@@ -205,7 +204,7 @@ QUOTED_TEXT
|
||||
// Keys can have letters, digits, underscores, dots, and bracket pairs
|
||||
// e.g. service.name, service.namespace, db.queries[].query_duration
|
||||
KEY
|
||||
: [a-zA-Z0-9_] [a-zA-Z0-9_.[\]]*
|
||||
: [a-zA-Z0-9_] [a-zA-Z0-9_.*[\]]*
|
||||
;
|
||||
|
||||
// Ignore whitespace
|
||||
@@ -218,4 +217,4 @@ fragment DIGIT
|
||||
: [0-9]
|
||||
;
|
||||
|
||||
FREETEXT : (~[ \t\r\n=()'"<>![\]])+ ;
|
||||
FREETEXT : (~[ \t\r\n=()'"<>!,[\]])+ ;
|
||||
|
||||
@@ -25,6 +25,25 @@ type postableAlert struct {
|
||||
Receivers []string `json:"receivers"`
|
||||
}
|
||||
|
||||
func (pa *postableAlert) MarshalJSON() ([]byte, error) {
|
||||
// Marshal the embedded PostableAlert to get its JSON representation.
|
||||
alertJSON, err := json.Marshal(pa.PostableAlert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal that JSON into a map so we can add extra fields.
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal(alertJSON, &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add the Receivers field.
|
||||
m["receivers"] = pa.Receivers
|
||||
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
const (
|
||||
alertsPath string = "/v1/alerts"
|
||||
routesPath string = "/v1/routes"
|
||||
|
||||
35
pkg/alertmanager/legacyalertmanager/provider_test.go
Normal file
35
pkg/alertmanager/legacyalertmanager/provider_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package legacyalertmanager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/alertmanagertypes"
|
||||
"github.com/prometheus/alertmanager/api/v2/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestProvider_TestAlert(t *testing.T) {
|
||||
pa := &postableAlert{
|
||||
PostableAlert: &alertmanagertypes.PostableAlert{
|
||||
Alert: models.Alert{
|
||||
Labels: models.LabelSet{
|
||||
"alertname": "test",
|
||||
},
|
||||
GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
|
||||
},
|
||||
Annotations: models.LabelSet{
|
||||
"summary": "test",
|
||||
},
|
||||
},
|
||||
Receivers: []string{"receiver1", "receiver2"},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(pa)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to marshal postable alert: %v", err)
|
||||
}
|
||||
|
||||
assert.Contains(t, string(body), "receiver1")
|
||||
assert.Contains(t, string(body), "receiver2")
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -25,13 +25,12 @@ OR=24
|
||||
HAS=25
|
||||
HASANY=26
|
||||
HASALL=27
|
||||
HASNONE=28
|
||||
BOOL=29
|
||||
NUMBER=30
|
||||
QUOTED_TEXT=31
|
||||
KEY=32
|
||||
WS=33
|
||||
FREETEXT=34
|
||||
BOOL=28
|
||||
NUMBER=29
|
||||
QUOTED_TEXT=30
|
||||
KEY=31
|
||||
WS=32
|
||||
FREETEXT=33
|
||||
'('=1
|
||||
')'=2
|
||||
'['=3
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -25,13 +25,12 @@ OR=24
|
||||
HAS=25
|
||||
HASANY=26
|
||||
HASALL=27
|
||||
HASNONE=28
|
||||
BOOL=29
|
||||
NUMBER=30
|
||||
QUOTED_TEXT=31
|
||||
KEY=32
|
||||
WS=33
|
||||
FREETEXT=34
|
||||
BOOL=28
|
||||
NUMBER=29
|
||||
QUOTED_TEXT=30
|
||||
KEY=31
|
||||
WS=32
|
||||
FREETEXT=33
|
||||
'('=1
|
||||
')'=2
|
||||
'['=3
|
||||
|
||||
@@ -50,150 +50,146 @@ func filterquerylexerLexerInit() {
|
||||
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "FREETEXT",
|
||||
"HAS", "HASANY", "HASALL", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS",
|
||||
"FREETEXT",
|
||||
}
|
||||
staticData.RuleNames = []string{
|
||||
"LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "DIGIT", "FREETEXT",
|
||||
"HAS", "HASANY", "HASALL", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS",
|
||||
"DIGIT", "FREETEXT",
|
||||
}
|
||||
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
|
||||
staticData.serializedATN = []int32{
|
||||
4, 0, 34, 280, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
|
||||
4, 0, 33, 270, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
|
||||
4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
|
||||
10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
|
||||
7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
|
||||
20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
|
||||
2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
|
||||
31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 1, 0, 1, 0, 1, 1,
|
||||
1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 85, 8,
|
||||
5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1,
|
||||
10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13,
|
||||
1, 13, 1, 13, 1, 13, 4, 13, 112, 8, 13, 11, 13, 12, 13, 113, 1, 13, 1,
|
||||
13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15,
|
||||
1, 15, 1, 15, 1, 15, 4, 15, 131, 8, 15, 11, 15, 12, 15, 132, 1, 15, 1,
|
||||
15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16,
|
||||
1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 155, 8,
|
||||
17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19,
|
||||
1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 172, 8, 19, 1, 20, 1, 20, 1,
|
||||
20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23,
|
||||
1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1,
|
||||
25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27,
|
||||
1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1,
|
||||
28, 1, 28, 1, 28, 1, 28, 1, 28, 3, 28, 223, 8, 28, 1, 29, 4, 29, 226, 8,
|
||||
29, 11, 29, 12, 29, 227, 1, 29, 1, 29, 4, 29, 232, 8, 29, 11, 29, 12, 29,
|
||||
233, 3, 29, 236, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 242, 8, 30,
|
||||
10, 30, 12, 30, 245, 9, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 252,
|
||||
8, 30, 10, 30, 12, 30, 255, 9, 30, 1, 30, 3, 30, 258, 8, 30, 1, 31, 1,
|
||||
31, 5, 31, 262, 8, 31, 10, 31, 12, 31, 265, 9, 31, 1, 32, 4, 32, 268, 8,
|
||||
32, 11, 32, 12, 32, 269, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 4, 34, 277,
|
||||
8, 34, 11, 34, 12, 34, 278, 0, 0, 35, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11,
|
||||
31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1,
|
||||
2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 83, 8, 5, 1, 6, 1, 6,
|
||||
1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1,
|
||||
11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13,
|
||||
1, 13, 4, 13, 110, 8, 13, 11, 13, 12, 13, 111, 1, 13, 1, 13, 1, 13, 1,
|
||||
13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15,
|
||||
1, 15, 4, 15, 129, 8, 15, 11, 15, 12, 15, 130, 1, 15, 1, 15, 1, 15, 1,
|
||||
15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16,
|
||||
1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 153, 8, 17, 1, 18, 1,
|
||||
18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19,
|
||||
1, 19, 1, 19, 1, 19, 3, 19, 170, 8, 19, 1, 20, 1, 20, 1, 20, 1, 21, 1,
|
||||
21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24,
|
||||
1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1,
|
||||
26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27,
|
||||
1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 3, 27, 213, 8, 27, 1, 28, 4, 28, 216,
|
||||
8, 28, 11, 28, 12, 28, 217, 1, 28, 1, 28, 4, 28, 222, 8, 28, 11, 28, 12,
|
||||
28, 223, 3, 28, 226, 8, 28, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 232, 8,
|
||||
29, 10, 29, 12, 29, 235, 9, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29,
|
||||
242, 8, 29, 10, 29, 12, 29, 245, 9, 29, 1, 29, 3, 29, 248, 8, 29, 1, 30,
|
||||
1, 30, 5, 30, 252, 8, 30, 10, 30, 12, 30, 255, 9, 30, 1, 31, 4, 31, 258,
|
||||
8, 31, 11, 31, 12, 31, 259, 1, 31, 1, 31, 1, 32, 1, 32, 1, 33, 4, 33, 267,
|
||||
8, 33, 11, 33, 12, 33, 268, 0, 0, 34, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11,
|
||||
6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15,
|
||||
31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24,
|
||||
49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, 30, 61, 31, 63, 32, 65, 33,
|
||||
67, 0, 69, 34, 1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105,
|
||||
2, 0, 75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 78, 78, 110, 110,
|
||||
2, 0, 79, 79, 111, 111, 2, 0, 84, 84, 116, 116, 2, 0, 9, 9, 32, 32, 2,
|
||||
0, 66, 66, 98, 98, 2, 0, 87, 87, 119, 119, 2, 0, 88, 88, 120, 120, 2, 0,
|
||||
83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103, 103, 2, 0,
|
||||
80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 65, 65, 97, 97, 2, 0, 68,
|
||||
68, 100, 100, 2, 0, 72, 72, 104, 104, 2, 0, 89, 89, 121, 121, 2, 0, 85,
|
||||
85, 117, 117, 2, 0, 70, 70, 102, 102, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39,
|
||||
92, 92, 4, 0, 48, 57, 65, 90, 95, 95, 97, 122, 6, 0, 46, 46, 48, 57, 65,
|
||||
49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, 30, 61, 31, 63, 32, 65, 0,
|
||||
67, 33, 1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105, 2, 0,
|
||||
75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 78, 78, 110, 110, 2, 0,
|
||||
79, 79, 111, 111, 2, 0, 84, 84, 116, 116, 2, 0, 9, 9, 32, 32, 2, 0, 66,
|
||||
66, 98, 98, 2, 0, 87, 87, 119, 119, 2, 0, 88, 88, 120, 120, 2, 0, 83, 83,
|
||||
115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103, 103, 2, 0, 80, 80,
|
||||
112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 65, 65, 97, 97, 2, 0, 68, 68, 100,
|
||||
100, 2, 0, 72, 72, 104, 104, 2, 0, 89, 89, 121, 121, 2, 0, 85, 85, 117,
|
||||
117, 2, 0, 70, 70, 102, 102, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, 92,
|
||||
4, 0, 48, 57, 65, 90, 95, 95, 97, 122, 7, 0, 42, 42, 46, 46, 48, 57, 65,
|
||||
91, 93, 93, 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57,
|
||||
7, 0, 9, 10, 13, 13, 32, 34, 39, 41, 60, 62, 91, 91, 93, 93, 295, 0, 1,
|
||||
1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9,
|
||||
1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0,
|
||||
17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0,
|
||||
0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0,
|
||||
0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0,
|
||||
0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1,
|
||||
0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55,
|
||||
1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0,
|
||||
63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 1, 71, 1, 0, 0, 0,
|
||||
3, 73, 1, 0, 0, 0, 5, 75, 1, 0, 0, 0, 7, 77, 1, 0, 0, 0, 9, 79, 1, 0, 0,
|
||||
0, 11, 84, 1, 0, 0, 0, 13, 86, 1, 0, 0, 0, 15, 89, 1, 0, 0, 0, 17, 92,
|
||||
1, 0, 0, 0, 19, 94, 1, 0, 0, 0, 21, 97, 1, 0, 0, 0, 23, 99, 1, 0, 0, 0,
|
||||
25, 102, 1, 0, 0, 0, 27, 107, 1, 0, 0, 0, 29, 120, 1, 0, 0, 0, 31, 126,
|
||||
1, 0, 0, 0, 33, 140, 1, 0, 0, 0, 35, 148, 1, 0, 0, 0, 37, 156, 1, 0, 0,
|
||||
0, 39, 163, 1, 0, 0, 0, 41, 173, 1, 0, 0, 0, 43, 176, 1, 0, 0, 0, 45, 180,
|
||||
1, 0, 0, 0, 47, 184, 1, 0, 0, 0, 49, 187, 1, 0, 0, 0, 51, 191, 1, 0, 0,
|
||||
0, 53, 198, 1, 0, 0, 0, 55, 205, 1, 0, 0, 0, 57, 222, 1, 0, 0, 0, 59, 225,
|
||||
1, 0, 0, 0, 61, 257, 1, 0, 0, 0, 63, 259, 1, 0, 0, 0, 65, 267, 1, 0, 0,
|
||||
0, 67, 273, 1, 0, 0, 0, 69, 276, 1, 0, 0, 0, 71, 72, 5, 40, 0, 0, 72, 2,
|
||||
1, 0, 0, 0, 73, 74, 5, 41, 0, 0, 74, 4, 1, 0, 0, 0, 75, 76, 5, 91, 0, 0,
|
||||
76, 6, 1, 0, 0, 0, 77, 78, 5, 93, 0, 0, 78, 8, 1, 0, 0, 0, 79, 80, 5, 44,
|
||||
0, 0, 80, 10, 1, 0, 0, 0, 81, 85, 5, 61, 0, 0, 82, 83, 5, 61, 0, 0, 83,
|
||||
85, 5, 61, 0, 0, 84, 81, 1, 0, 0, 0, 84, 82, 1, 0, 0, 0, 85, 12, 1, 0,
|
||||
0, 0, 86, 87, 5, 33, 0, 0, 87, 88, 5, 61, 0, 0, 88, 14, 1, 0, 0, 0, 89,
|
||||
90, 5, 60, 0, 0, 90, 91, 5, 62, 0, 0, 91, 16, 1, 0, 0, 0, 92, 93, 5, 60,
|
||||
0, 0, 93, 18, 1, 0, 0, 0, 94, 95, 5, 60, 0, 0, 95, 96, 5, 61, 0, 0, 96,
|
||||
20, 1, 0, 0, 0, 97, 98, 5, 62, 0, 0, 98, 22, 1, 0, 0, 0, 99, 100, 5, 62,
|
||||
0, 0, 100, 101, 5, 61, 0, 0, 101, 24, 1, 0, 0, 0, 102, 103, 7, 0, 0, 0,
|
||||
103, 104, 7, 1, 0, 0, 104, 105, 7, 2, 0, 0, 105, 106, 7, 3, 0, 0, 106,
|
||||
26, 1, 0, 0, 0, 107, 108, 7, 4, 0, 0, 108, 109, 7, 5, 0, 0, 109, 111, 7,
|
||||
6, 0, 0, 110, 112, 7, 7, 0, 0, 111, 110, 1, 0, 0, 0, 112, 113, 1, 0, 0,
|
||||
0, 113, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115,
|
||||
116, 7, 0, 0, 0, 116, 117, 7, 1, 0, 0, 117, 118, 7, 2, 0, 0, 118, 119,
|
||||
7, 3, 0, 0, 119, 28, 1, 0, 0, 0, 120, 121, 7, 1, 0, 0, 121, 122, 7, 0,
|
||||
0, 0, 122, 123, 7, 1, 0, 0, 123, 124, 7, 2, 0, 0, 124, 125, 7, 3, 0, 0,
|
||||
125, 30, 1, 0, 0, 0, 126, 127, 7, 4, 0, 0, 127, 128, 7, 5, 0, 0, 128, 130,
|
||||
7, 6, 0, 0, 129, 131, 7, 7, 0, 0, 130, 129, 1, 0, 0, 0, 131, 132, 1, 0,
|
||||
0, 0, 132, 130, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0,
|
||||
134, 135, 7, 1, 0, 0, 135, 136, 7, 0, 0, 0, 136, 137, 7, 1, 0, 0, 137,
|
||||
138, 7, 2, 0, 0, 138, 139, 7, 3, 0, 0, 139, 32, 1, 0, 0, 0, 140, 141, 7,
|
||||
8, 0, 0, 141, 142, 7, 3, 0, 0, 142, 143, 7, 6, 0, 0, 143, 144, 7, 9, 0,
|
||||
0, 144, 145, 7, 3, 0, 0, 145, 146, 7, 3, 0, 0, 146, 147, 7, 4, 0, 0, 147,
|
||||
34, 1, 0, 0, 0, 148, 149, 7, 3, 0, 0, 149, 150, 7, 10, 0, 0, 150, 151,
|
||||
7, 1, 0, 0, 151, 152, 7, 11, 0, 0, 152, 154, 7, 6, 0, 0, 153, 155, 7, 11,
|
||||
0, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 36, 1, 0, 0, 0,
|
||||
156, 157, 7, 12, 0, 0, 157, 158, 7, 3, 0, 0, 158, 159, 7, 13, 0, 0, 159,
|
||||
160, 7, 3, 0, 0, 160, 161, 7, 10, 0, 0, 161, 162, 7, 14, 0, 0, 162, 38,
|
||||
1, 0, 0, 0, 163, 164, 7, 15, 0, 0, 164, 165, 7, 5, 0, 0, 165, 166, 7, 4,
|
||||
0, 0, 166, 167, 7, 6, 0, 0, 167, 168, 7, 16, 0, 0, 168, 169, 7, 1, 0, 0,
|
||||
169, 171, 7, 4, 0, 0, 170, 172, 7, 11, 0, 0, 171, 170, 1, 0, 0, 0, 171,
|
||||
172, 1, 0, 0, 0, 172, 40, 1, 0, 0, 0, 173, 174, 7, 1, 0, 0, 174, 175, 7,
|
||||
4, 0, 0, 175, 42, 1, 0, 0, 0, 176, 177, 7, 4, 0, 0, 177, 178, 7, 5, 0,
|
||||
0, 178, 179, 7, 6, 0, 0, 179, 44, 1, 0, 0, 0, 180, 181, 7, 16, 0, 0, 181,
|
||||
182, 7, 4, 0, 0, 182, 183, 7, 17, 0, 0, 183, 46, 1, 0, 0, 0, 184, 185,
|
||||
7, 5, 0, 0, 185, 186, 7, 12, 0, 0, 186, 48, 1, 0, 0, 0, 187, 188, 7, 18,
|
||||
0, 0, 188, 189, 7, 16, 0, 0, 189, 190, 7, 11, 0, 0, 190, 50, 1, 0, 0, 0,
|
||||
191, 192, 7, 18, 0, 0, 192, 193, 7, 16, 0, 0, 193, 194, 7, 11, 0, 0, 194,
|
||||
195, 7, 16, 0, 0, 195, 196, 7, 4, 0, 0, 196, 197, 7, 19, 0, 0, 197, 52,
|
||||
1, 0, 0, 0, 198, 199, 7, 18, 0, 0, 199, 200, 7, 16, 0, 0, 200, 201, 7,
|
||||
11, 0, 0, 201, 202, 7, 16, 0, 0, 202, 203, 7, 0, 0, 0, 203, 204, 7, 0,
|
||||
0, 0, 204, 54, 1, 0, 0, 0, 205, 206, 7, 18, 0, 0, 206, 207, 7, 16, 0, 0,
|
||||
207, 208, 7, 11, 0, 0, 208, 209, 7, 4, 0, 0, 209, 210, 7, 5, 0, 0, 210,
|
||||
211, 7, 4, 0, 0, 211, 212, 7, 3, 0, 0, 212, 56, 1, 0, 0, 0, 213, 214, 7,
|
||||
6, 0, 0, 214, 215, 7, 12, 0, 0, 215, 216, 7, 20, 0, 0, 216, 223, 7, 3,
|
||||
0, 0, 217, 218, 7, 21, 0, 0, 218, 219, 7, 16, 0, 0, 219, 220, 7, 0, 0,
|
||||
0, 220, 221, 7, 11, 0, 0, 221, 223, 7, 3, 0, 0, 222, 213, 1, 0, 0, 0, 222,
|
||||
217, 1, 0, 0, 0, 223, 58, 1, 0, 0, 0, 224, 226, 3, 67, 33, 0, 225, 224,
|
||||
1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 225, 1, 0, 0, 0, 227, 228, 1, 0,
|
||||
0, 0, 228, 235, 1, 0, 0, 0, 229, 231, 5, 46, 0, 0, 230, 232, 3, 67, 33,
|
||||
0, 231, 230, 1, 0, 0, 0, 232, 233, 1, 0, 0, 0, 233, 231, 1, 0, 0, 0, 233,
|
||||
234, 1, 0, 0, 0, 234, 236, 1, 0, 0, 0, 235, 229, 1, 0, 0, 0, 235, 236,
|
||||
1, 0, 0, 0, 236, 60, 1, 0, 0, 0, 237, 243, 5, 34, 0, 0, 238, 242, 8, 22,
|
||||
0, 0, 239, 240, 5, 92, 0, 0, 240, 242, 9, 0, 0, 0, 241, 238, 1, 0, 0, 0,
|
||||
241, 239, 1, 0, 0, 0, 242, 245, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 243,
|
||||
244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 243, 1, 0, 0, 0, 246, 258,
|
||||
5, 34, 0, 0, 247, 253, 5, 39, 0, 0, 248, 252, 8, 23, 0, 0, 249, 250, 5,
|
||||
92, 0, 0, 250, 252, 9, 0, 0, 0, 251, 248, 1, 0, 0, 0, 251, 249, 1, 0, 0,
|
||||
0, 252, 255, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254,
|
||||
256, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 256, 258, 5, 39, 0, 0, 257, 237,
|
||||
1, 0, 0, 0, 257, 247, 1, 0, 0, 0, 258, 62, 1, 0, 0, 0, 259, 263, 7, 24,
|
||||
0, 0, 260, 262, 7, 25, 0, 0, 261, 260, 1, 0, 0, 0, 262, 265, 1, 0, 0, 0,
|
||||
263, 261, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 64, 1, 0, 0, 0, 265, 263,
|
||||
1, 0, 0, 0, 266, 268, 7, 26, 0, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0,
|
||||
0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0,
|
||||
271, 272, 6, 32, 0, 0, 272, 66, 1, 0, 0, 0, 273, 274, 7, 27, 0, 0, 274,
|
||||
68, 1, 0, 0, 0, 275, 277, 8, 28, 0, 0, 276, 275, 1, 0, 0, 0, 277, 278,
|
||||
1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 70, 1, 0,
|
||||
0, 0, 18, 0, 84, 113, 132, 154, 171, 222, 227, 233, 235, 241, 243, 251,
|
||||
253, 257, 263, 269, 278, 1, 6, 0, 0,
|
||||
8, 0, 9, 10, 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 285,
|
||||
0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0,
|
||||
0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0,
|
||||
0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0,
|
||||
0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1,
|
||||
0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39,
|
||||
1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0,
|
||||
47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0,
|
||||
0, 55, 1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0,
|
||||
0, 0, 63, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 1, 69, 1, 0, 0, 0, 3, 71, 1, 0,
|
||||
0, 0, 5, 73, 1, 0, 0, 0, 7, 75, 1, 0, 0, 0, 9, 77, 1, 0, 0, 0, 11, 82,
|
||||
1, 0, 0, 0, 13, 84, 1, 0, 0, 0, 15, 87, 1, 0, 0, 0, 17, 90, 1, 0, 0, 0,
|
||||
19, 92, 1, 0, 0, 0, 21, 95, 1, 0, 0, 0, 23, 97, 1, 0, 0, 0, 25, 100, 1,
|
||||
0, 0, 0, 27, 105, 1, 0, 0, 0, 29, 118, 1, 0, 0, 0, 31, 124, 1, 0, 0, 0,
|
||||
33, 138, 1, 0, 0, 0, 35, 146, 1, 0, 0, 0, 37, 154, 1, 0, 0, 0, 39, 161,
|
||||
1, 0, 0, 0, 41, 171, 1, 0, 0, 0, 43, 174, 1, 0, 0, 0, 45, 178, 1, 0, 0,
|
||||
0, 47, 182, 1, 0, 0, 0, 49, 185, 1, 0, 0, 0, 51, 189, 1, 0, 0, 0, 53, 196,
|
||||
1, 0, 0, 0, 55, 212, 1, 0, 0, 0, 57, 215, 1, 0, 0, 0, 59, 247, 1, 0, 0,
|
||||
0, 61, 249, 1, 0, 0, 0, 63, 257, 1, 0, 0, 0, 65, 263, 1, 0, 0, 0, 67, 266,
|
||||
1, 0, 0, 0, 69, 70, 5, 40, 0, 0, 70, 2, 1, 0, 0, 0, 71, 72, 5, 41, 0, 0,
|
||||
72, 4, 1, 0, 0, 0, 73, 74, 5, 91, 0, 0, 74, 6, 1, 0, 0, 0, 75, 76, 5, 93,
|
||||
0, 0, 76, 8, 1, 0, 0, 0, 77, 78, 5, 44, 0, 0, 78, 10, 1, 0, 0, 0, 79, 83,
|
||||
5, 61, 0, 0, 80, 81, 5, 61, 0, 0, 81, 83, 5, 61, 0, 0, 82, 79, 1, 0, 0,
|
||||
0, 82, 80, 1, 0, 0, 0, 83, 12, 1, 0, 0, 0, 84, 85, 5, 33, 0, 0, 85, 86,
|
||||
5, 61, 0, 0, 86, 14, 1, 0, 0, 0, 87, 88, 5, 60, 0, 0, 88, 89, 5, 62, 0,
|
||||
0, 89, 16, 1, 0, 0, 0, 90, 91, 5, 60, 0, 0, 91, 18, 1, 0, 0, 0, 92, 93,
|
||||
5, 60, 0, 0, 93, 94, 5, 61, 0, 0, 94, 20, 1, 0, 0, 0, 95, 96, 5, 62, 0,
|
||||
0, 96, 22, 1, 0, 0, 0, 97, 98, 5, 62, 0, 0, 98, 99, 5, 61, 0, 0, 99, 24,
|
||||
1, 0, 0, 0, 100, 101, 7, 0, 0, 0, 101, 102, 7, 1, 0, 0, 102, 103, 7, 2,
|
||||
0, 0, 103, 104, 7, 3, 0, 0, 104, 26, 1, 0, 0, 0, 105, 106, 7, 4, 0, 0,
|
||||
106, 107, 7, 5, 0, 0, 107, 109, 7, 6, 0, 0, 108, 110, 7, 7, 0, 0, 109,
|
||||
108, 1, 0, 0, 0, 110, 111, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 111, 112,
|
||||
1, 0, 0, 0, 112, 113, 1, 0, 0, 0, 113, 114, 7, 0, 0, 0, 114, 115, 7, 1,
|
||||
0, 0, 115, 116, 7, 2, 0, 0, 116, 117, 7, 3, 0, 0, 117, 28, 1, 0, 0, 0,
|
||||
118, 119, 7, 1, 0, 0, 119, 120, 7, 0, 0, 0, 120, 121, 7, 1, 0, 0, 121,
|
||||
122, 7, 2, 0, 0, 122, 123, 7, 3, 0, 0, 123, 30, 1, 0, 0, 0, 124, 125, 7,
|
||||
4, 0, 0, 125, 126, 7, 5, 0, 0, 126, 128, 7, 6, 0, 0, 127, 129, 7, 7, 0,
|
||||
0, 128, 127, 1, 0, 0, 0, 129, 130, 1, 0, 0, 0, 130, 128, 1, 0, 0, 0, 130,
|
||||
131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 133, 7, 1, 0, 0, 133, 134,
|
||||
7, 0, 0, 0, 134, 135, 7, 1, 0, 0, 135, 136, 7, 2, 0, 0, 136, 137, 7, 3,
|
||||
0, 0, 137, 32, 1, 0, 0, 0, 138, 139, 7, 8, 0, 0, 139, 140, 7, 3, 0, 0,
|
||||
140, 141, 7, 6, 0, 0, 141, 142, 7, 9, 0, 0, 142, 143, 7, 3, 0, 0, 143,
|
||||
144, 7, 3, 0, 0, 144, 145, 7, 4, 0, 0, 145, 34, 1, 0, 0, 0, 146, 147, 7,
|
||||
3, 0, 0, 147, 148, 7, 10, 0, 0, 148, 149, 7, 1, 0, 0, 149, 150, 7, 11,
|
||||
0, 0, 150, 152, 7, 6, 0, 0, 151, 153, 7, 11, 0, 0, 152, 151, 1, 0, 0, 0,
|
||||
152, 153, 1, 0, 0, 0, 153, 36, 1, 0, 0, 0, 154, 155, 7, 12, 0, 0, 155,
|
||||
156, 7, 3, 0, 0, 156, 157, 7, 13, 0, 0, 157, 158, 7, 3, 0, 0, 158, 159,
|
||||
7, 10, 0, 0, 159, 160, 7, 14, 0, 0, 160, 38, 1, 0, 0, 0, 161, 162, 7, 15,
|
||||
0, 0, 162, 163, 7, 5, 0, 0, 163, 164, 7, 4, 0, 0, 164, 165, 7, 6, 0, 0,
|
||||
165, 166, 7, 16, 0, 0, 166, 167, 7, 1, 0, 0, 167, 169, 7, 4, 0, 0, 168,
|
||||
170, 7, 11, 0, 0, 169, 168, 1, 0, 0, 0, 169, 170, 1, 0, 0, 0, 170, 40,
|
||||
1, 0, 0, 0, 171, 172, 7, 1, 0, 0, 172, 173, 7, 4, 0, 0, 173, 42, 1, 0,
|
||||
0, 0, 174, 175, 7, 4, 0, 0, 175, 176, 7, 5, 0, 0, 176, 177, 7, 6, 0, 0,
|
||||
177, 44, 1, 0, 0, 0, 178, 179, 7, 16, 0, 0, 179, 180, 7, 4, 0, 0, 180,
|
||||
181, 7, 17, 0, 0, 181, 46, 1, 0, 0, 0, 182, 183, 7, 5, 0, 0, 183, 184,
|
||||
7, 12, 0, 0, 184, 48, 1, 0, 0, 0, 185, 186, 7, 18, 0, 0, 186, 187, 7, 16,
|
||||
0, 0, 187, 188, 7, 11, 0, 0, 188, 50, 1, 0, 0, 0, 189, 190, 7, 18, 0, 0,
|
||||
190, 191, 7, 16, 0, 0, 191, 192, 7, 11, 0, 0, 192, 193, 7, 16, 0, 0, 193,
|
||||
194, 7, 4, 0, 0, 194, 195, 7, 19, 0, 0, 195, 52, 1, 0, 0, 0, 196, 197,
|
||||
7, 18, 0, 0, 197, 198, 7, 16, 0, 0, 198, 199, 7, 11, 0, 0, 199, 200, 7,
|
||||
16, 0, 0, 200, 201, 7, 0, 0, 0, 201, 202, 7, 0, 0, 0, 202, 54, 1, 0, 0,
|
||||
0, 203, 204, 7, 6, 0, 0, 204, 205, 7, 12, 0, 0, 205, 206, 7, 20, 0, 0,
|
||||
206, 213, 7, 3, 0, 0, 207, 208, 7, 21, 0, 0, 208, 209, 7, 16, 0, 0, 209,
|
||||
210, 7, 0, 0, 0, 210, 211, 7, 11, 0, 0, 211, 213, 7, 3, 0, 0, 212, 203,
|
||||
1, 0, 0, 0, 212, 207, 1, 0, 0, 0, 213, 56, 1, 0, 0, 0, 214, 216, 3, 65,
|
||||
32, 0, 215, 214, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 215, 1, 0, 0, 0,
|
||||
217, 218, 1, 0, 0, 0, 218, 225, 1, 0, 0, 0, 219, 221, 5, 46, 0, 0, 220,
|
||||
222, 3, 65, 32, 0, 221, 220, 1, 0, 0, 0, 222, 223, 1, 0, 0, 0, 223, 221,
|
||||
1, 0, 0, 0, 223, 224, 1, 0, 0, 0, 224, 226, 1, 0, 0, 0, 225, 219, 1, 0,
|
||||
0, 0, 225, 226, 1, 0, 0, 0, 226, 58, 1, 0, 0, 0, 227, 233, 5, 34, 0, 0,
|
||||
228, 232, 8, 22, 0, 0, 229, 230, 5, 92, 0, 0, 230, 232, 9, 0, 0, 0, 231,
|
||||
228, 1, 0, 0, 0, 231, 229, 1, 0, 0, 0, 232, 235, 1, 0, 0, 0, 233, 231,
|
||||
1, 0, 0, 0, 233, 234, 1, 0, 0, 0, 234, 236, 1, 0, 0, 0, 235, 233, 1, 0,
|
||||
0, 0, 236, 248, 5, 34, 0, 0, 237, 243, 5, 39, 0, 0, 238, 242, 8, 23, 0,
|
||||
0, 239, 240, 5, 92, 0, 0, 240, 242, 9, 0, 0, 0, 241, 238, 1, 0, 0, 0, 241,
|
||||
239, 1, 0, 0, 0, 242, 245, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 243, 244,
|
||||
1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 243, 1, 0, 0, 0, 246, 248, 5, 39,
|
||||
0, 0, 247, 227, 1, 0, 0, 0, 247, 237, 1, 0, 0, 0, 248, 60, 1, 0, 0, 0,
|
||||
249, 253, 7, 24, 0, 0, 250, 252, 7, 25, 0, 0, 251, 250, 1, 0, 0, 0, 252,
|
||||
255, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 62, 1,
|
||||
0, 0, 0, 255, 253, 1, 0, 0, 0, 256, 258, 7, 26, 0, 0, 257, 256, 1, 0, 0,
|
||||
0, 258, 259, 1, 0, 0, 0, 259, 257, 1, 0, 0, 0, 259, 260, 1, 0, 0, 0, 260,
|
||||
261, 1, 0, 0, 0, 261, 262, 6, 31, 0, 0, 262, 64, 1, 0, 0, 0, 263, 264,
|
||||
7, 27, 0, 0, 264, 66, 1, 0, 0, 0, 265, 267, 8, 28, 0, 0, 266, 265, 1, 0,
|
||||
0, 0, 267, 268, 1, 0, 0, 0, 268, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0,
|
||||
269, 68, 1, 0, 0, 0, 18, 0, 82, 111, 130, 152, 169, 212, 217, 223, 225,
|
||||
231, 233, 241, 243, 247, 253, 259, 268, 1, 6, 0, 0,
|
||||
}
|
||||
deserializer := antlr.NewATNDeserializer(nil)
|
||||
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
|
||||
@@ -261,11 +257,10 @@ const (
|
||||
FilterQueryLexerHAS = 25
|
||||
FilterQueryLexerHASANY = 26
|
||||
FilterQueryLexerHASALL = 27
|
||||
FilterQueryLexerHASNONE = 28
|
||||
FilterQueryLexerBOOL = 29
|
||||
FilterQueryLexerNUMBER = 30
|
||||
FilterQueryLexerQUOTED_TEXT = 31
|
||||
FilterQueryLexerKEY = 32
|
||||
FilterQueryLexerWS = 33
|
||||
FilterQueryLexerFREETEXT = 34
|
||||
FilterQueryLexerBOOL = 28
|
||||
FilterQueryLexerNUMBER = 29
|
||||
FilterQueryLexerQUOTED_TEXT = 30
|
||||
FilterQueryLexerKEY = 31
|
||||
FilterQueryLexerWS = 32
|
||||
FilterQueryLexerFREETEXT = 33
|
||||
)
|
||||
|
||||
@@ -40,8 +40,8 @@ func filterqueryParserInit() {
|
||||
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "FREETEXT",
|
||||
"HAS", "HASANY", "HASALL", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS",
|
||||
"FREETEXT",
|
||||
}
|
||||
staticData.RuleNames = []string{
|
||||
"query", "expression", "orExpression", "andExpression", "unaryExpression",
|
||||
@@ -51,7 +51,7 @@ func filterqueryParserInit() {
|
||||
}
|
||||
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
|
||||
staticData.serializedATN = []int32{
|
||||
4, 1, 34, 212, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
|
||||
4, 1, 33, 212, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
|
||||
4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
|
||||
10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
|
||||
2, 16, 7, 16, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 5, 2, 43,
|
||||
@@ -72,7 +72,7 @@ func filterqueryParserInit() {
|
||||
13, 1, 13, 1, 13, 3, 13, 202, 8, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15,
|
||||
1, 15, 1, 16, 1, 16, 1, 16, 0, 0, 17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
|
||||
20, 22, 24, 26, 28, 30, 32, 0, 6, 1, 0, 7, 8, 2, 0, 13, 13, 15, 15, 2,
|
||||
0, 14, 14, 16, 16, 2, 0, 31, 31, 34, 34, 1, 0, 25, 28, 1, 0, 29, 32, 225,
|
||||
0, 14, 14, 16, 16, 2, 0, 30, 30, 33, 33, 1, 0, 25, 27, 1, 0, 28, 31, 225,
|
||||
0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 39, 1, 0, 0, 0, 6, 47, 1, 0, 0,
|
||||
0, 8, 57, 1, 0, 0, 0, 10, 69, 1, 0, 0, 0, 12, 147, 1, 0, 0, 0, 14, 159,
|
||||
1, 0, 0, 0, 16, 173, 1, 0, 0, 0, 18, 175, 1, 0, 0, 0, 20, 183, 1, 0, 0,
|
||||
@@ -140,7 +140,7 @@ func filterqueryParserInit() {
|
||||
201, 198, 1, 0, 0, 0, 201, 199, 1, 0, 0, 0, 201, 200, 1, 0, 0, 0, 202,
|
||||
27, 1, 0, 0, 0, 203, 204, 5, 3, 0, 0, 204, 205, 3, 18, 9, 0, 205, 206,
|
||||
5, 4, 0, 0, 206, 29, 1, 0, 0, 0, 207, 208, 7, 5, 0, 0, 208, 31, 1, 0, 0,
|
||||
0, 209, 210, 5, 32, 0, 0, 210, 33, 1, 0, 0, 0, 11, 44, 51, 53, 57, 69,
|
||||
0, 209, 210, 5, 31, 0, 0, 210, 33, 1, 0, 0, 0, 11, 44, 51, 53, 57, 69,
|
||||
147, 159, 173, 180, 195, 201,
|
||||
}
|
||||
deserializer := antlr.NewATNDeserializer(nil)
|
||||
@@ -207,13 +207,12 @@ const (
|
||||
FilterQueryParserHAS = 25
|
||||
FilterQueryParserHASANY = 26
|
||||
FilterQueryParserHASALL = 27
|
||||
FilterQueryParserHASNONE = 28
|
||||
FilterQueryParserBOOL = 29
|
||||
FilterQueryParserNUMBER = 30
|
||||
FilterQueryParserQUOTED_TEXT = 31
|
||||
FilterQueryParserKEY = 32
|
||||
FilterQueryParserWS = 33
|
||||
FilterQueryParserFREETEXT = 34
|
||||
FilterQueryParserBOOL = 28
|
||||
FilterQueryParserNUMBER = 29
|
||||
FilterQueryParserQUOTED_TEXT = 30
|
||||
FilterQueryParserKEY = 31
|
||||
FilterQueryParserWS = 32
|
||||
FilterQueryParserFREETEXT = 33
|
||||
)
|
||||
|
||||
// FilterQueryParser rules.
|
||||
@@ -803,7 +802,7 @@ func (p *FilterQueryParser) AndExpression() (localctx IAndExpressionContext) {
|
||||
}
|
||||
_la = p.GetTokenStream().LA(1)
|
||||
|
||||
for (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&24138219522) != 0 {
|
||||
for (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&12058624002) != 0 {
|
||||
p.SetState(51)
|
||||
p.GetErrorHandler().Sync(p)
|
||||
if p.HasError() {
|
||||
@@ -825,7 +824,7 @@ func (p *FilterQueryParser) AndExpression() (localctx IAndExpressionContext) {
|
||||
p.UnaryExpression()
|
||||
}
|
||||
|
||||
case FilterQueryParserLPAREN, FilterQueryParserNOT, FilterQueryParserHAS, FilterQueryParserHASANY, FilterQueryParserHASALL, FilterQueryParserHASNONE, FilterQueryParserQUOTED_TEXT, FilterQueryParserKEY, FilterQueryParserFREETEXT:
|
||||
case FilterQueryParserLPAREN, FilterQueryParserNOT, FilterQueryParserHAS, FilterQueryParserHASANY, FilterQueryParserHASALL, FilterQueryParserQUOTED_TEXT, FilterQueryParserKEY, FilterQueryParserFREETEXT:
|
||||
{
|
||||
p.SetState(50)
|
||||
p.UnaryExpression()
|
||||
@@ -2653,7 +2652,6 @@ type IFunctionCallContext interface {
|
||||
HAS() antlr.TerminalNode
|
||||
HASANY() antlr.TerminalNode
|
||||
HASALL() antlr.TerminalNode
|
||||
HASNONE() antlr.TerminalNode
|
||||
|
||||
// IsFunctionCallContext differentiates from other interfaces.
|
||||
IsFunctionCallContext()
|
||||
@@ -2727,10 +2725,6 @@ func (s *FunctionCallContext) HASALL() antlr.TerminalNode {
|
||||
return s.GetToken(FilterQueryParserHASALL, 0)
|
||||
}
|
||||
|
||||
func (s *FunctionCallContext) HASNONE() antlr.TerminalNode {
|
||||
return s.GetToken(FilterQueryParserHASNONE, 0)
|
||||
}
|
||||
|
||||
func (s *FunctionCallContext) GetRuleContext() antlr.RuleContext {
|
||||
return s
|
||||
}
|
||||
@@ -2771,7 +2765,7 @@ func (p *FilterQueryParser) FunctionCall() (localctx IFunctionCallContext) {
|
||||
p.SetState(185)
|
||||
_la = p.GetTokenStream().LA(1)
|
||||
|
||||
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&503316480) != 0) {
|
||||
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&234881024) != 0) {
|
||||
p.GetErrorHandler().RecoverInline(p)
|
||||
} else {
|
||||
p.GetErrorHandler().ReportMatch(p)
|
||||
@@ -3411,7 +3405,7 @@ func (p *FilterQueryParser) Value() (localctx IValueContext) {
|
||||
p.SetState(207)
|
||||
_la = p.GetTokenStream().LA(1)
|
||||
|
||||
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&8053063680) != 0) {
|
||||
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&4026531840) != 0) {
|
||||
p.GetErrorHandler().RecoverInline(p)
|
||||
} else {
|
||||
p.GetErrorHandler().ReportMatch(p)
|
||||
|
||||
48
pkg/parser/grammar/query_to_keys.go
Normal file
48
pkg/parser/grammar/query_to_keys.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/antlr4-go/antlr/v4"
|
||||
)
|
||||
|
||||
// QueryStringToKeysSelectors converts a query string to a list of field key selectors
|
||||
//
|
||||
// e.g. "service.name="query-service" AND http.status_code=200 AND resource.k8s.namespace.name="application"" -> []*telemetrytypes.FieldKeySelector{
|
||||
// {
|
||||
// Name: "service.name",
|
||||
// FieldContext: telemetrytypes.FieldContextUnspecified,
|
||||
// FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
// },
|
||||
// {
|
||||
// Name: "http.status_code",
|
||||
// FieldContext: telemetrytypes.FieldContextUnspecified,
|
||||
// FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
// },
|
||||
// {
|
||||
// Name: "resource.k8s.namespace.name",
|
||||
// FieldContext: telemetrytypes.FieldContextResource,
|
||||
// FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
// },
|
||||
// }
|
||||
func QueryStringToKeysSelectors(query string) ([]*telemetrytypes.FieldKeySelector, error) {
|
||||
lexer := NewFilterQueryLexer(antlr.NewInputStream(query))
|
||||
keys := []*telemetrytypes.FieldKeySelector{}
|
||||
for {
|
||||
tok := lexer.NextToken()
|
||||
if tok.GetTokenType() == antlr.TokenEOF {
|
||||
break
|
||||
}
|
||||
|
||||
if tok.GetTokenType() == FilterQueryLexerKEY {
|
||||
key := telemetrytypes.GetFieldKeyFromKeyText(tok.GetText())
|
||||
keys = append(keys, &telemetrytypes.FieldKeySelector{
|
||||
Name: key.Name,
|
||||
Signal: key.Signal,
|
||||
FieldContext: key.FieldContext,
|
||||
FieldDataType: key.FieldDataType,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
101
pkg/parser/grammar/query_to_keys_test.go
Normal file
101
pkg/parser/grammar/query_to_keys_test.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func TestQueryToKeys(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
query string
|
||||
expectedKeys []telemetrytypes.FieldKeySelector
|
||||
}{
|
||||
{
|
||||
query: `service.name="redis"`,
|
||||
expectedKeys: []telemetrytypes.FieldKeySelector{
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalUnspecified,
|
||||
FieldContext: telemetrytypes.FieldContextUnspecified,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: `resource.service.name="redis"`,
|
||||
expectedKeys: []telemetrytypes.FieldKeySelector{
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalUnspecified,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: `service.name="redis" AND http.status_code=200`,
|
||||
expectedKeys: []telemetrytypes.FieldKeySelector{
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalUnspecified,
|
||||
FieldContext: telemetrytypes.FieldContextUnspecified,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
},
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalUnspecified,
|
||||
FieldContext: telemetrytypes.FieldContextUnspecified,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: `has(payload.user_ids, 123)`,
|
||||
expectedKeys: []telemetrytypes.FieldKeySelector{
|
||||
{
|
||||
Name: "payload.user_ids",
|
||||
Signal: telemetrytypes.SignalUnspecified,
|
||||
FieldContext: telemetrytypes.FieldContextUnspecified,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: `body.user_ids[*] = 123`,
|
||||
expectedKeys: []telemetrytypes.FieldKeySelector{
|
||||
{
|
||||
Name: "body.user_ids[*]",
|
||||
Signal: telemetrytypes.SignalUnspecified,
|
||||
FieldContext: telemetrytypes.FieldContextUnspecified,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
keys, err := QueryStringToKeysSelectors(testCase.query)
|
||||
if err != nil {
|
||||
t.Fatalf("Error: %v", err)
|
||||
}
|
||||
if len(keys) != len(testCase.expectedKeys) {
|
||||
t.Fatalf("Expected %d keys, got %d", len(testCase.expectedKeys), len(keys))
|
||||
}
|
||||
for i, key := range keys {
|
||||
if key.Name != testCase.expectedKeys[i].Name {
|
||||
t.Fatalf("Expected key %v, got %v", testCase.expectedKeys[i], key)
|
||||
}
|
||||
if key.Signal != testCase.expectedKeys[i].Signal {
|
||||
t.Fatalf("Expected signal %v, got %v", testCase.expectedKeys[i].Signal, key.Signal)
|
||||
}
|
||||
if key.FieldContext != testCase.expectedKeys[i].FieldContext {
|
||||
t.Fatalf("Expected field context %v, got %v", testCase.expectedKeys[i].FieldContext, key.FieldContext)
|
||||
}
|
||||
if key.FieldDataType != testCase.expectedKeys[i].FieldDataType {
|
||||
t.Fatalf("Expected field data type %v, got %v", testCase.expectedKeys[i].FieldDataType, key.FieldDataType)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
587
pkg/parser/grammar/where_clause_visitor.go
Normal file
587
pkg/parser/grammar/where_clause_visitor.go
Normal file
@@ -0,0 +1,587 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/antlr4-go/antlr/v4"
|
||||
|
||||
sqlbuilder "github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
// WhereClauseVisitor implements the FilterQueryVisitor interface
|
||||
// to convert the parsed filter expressions into ClickHouse WHERE clause
|
||||
type WhereClauseVisitor struct {
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
warnings []error
|
||||
fieldKeys map[string][]telemetrytypes.TelemetryFieldKey
|
||||
errors []error
|
||||
builder *sqlbuilder.SelectBuilder
|
||||
fullTextColumn telemetrytypes.TelemetryFieldKey
|
||||
}
|
||||
|
||||
// NewWhereClauseVisitor creates a new WhereClauseVisitor
|
||||
func NewWhereClauseVisitor(
|
||||
conditionBuilder qbtypes.ConditionBuilder,
|
||||
fieldKeys map[string][]telemetrytypes.TelemetryFieldKey,
|
||||
builder *sqlbuilder.SelectBuilder,
|
||||
fullTextColumn telemetrytypes.TelemetryFieldKey,
|
||||
) *WhereClauseVisitor {
|
||||
return &WhereClauseVisitor{
|
||||
conditionBuilder: conditionBuilder,
|
||||
fieldKeys: fieldKeys,
|
||||
builder: builder,
|
||||
fullTextColumn: fullTextColumn,
|
||||
}
|
||||
}
|
||||
|
||||
type SyntaxError struct {
|
||||
line, column int
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string {
|
||||
return fmt.Sprintf("line %d:%d %s", e.line, e.column, e.msg)
|
||||
}
|
||||
|
||||
// ErrorListener is a custom error listener to capture syntax errors
|
||||
type ErrorListener struct {
|
||||
*antlr.DefaultErrorListener
|
||||
Errors []error
|
||||
}
|
||||
|
||||
// NewErrorListener creates a new error listener
|
||||
func NewErrorListener() *ErrorListener {
|
||||
return &ErrorListener{
|
||||
DefaultErrorListener: antlr.NewDefaultErrorListener(),
|
||||
Errors: []error{},
|
||||
}
|
||||
}
|
||||
|
||||
// SyntaxError captures syntax errors during parsing
|
||||
func (l *ErrorListener) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) {
|
||||
l.Errors = append(l.Errors, &SyntaxError{line: line, column: column, msg: msg})
|
||||
}
|
||||
|
||||
// PrepareWhereClause generates a ClickHouse compatible WHERE clause from the filter query
|
||||
func PrepareWhereClause(
|
||||
query string,
|
||||
fieldKeys map[string][]telemetrytypes.TelemetryFieldKey,
|
||||
conditionBuilder qbtypes.ConditionBuilder,
|
||||
fullTextColumn telemetrytypes.TelemetryFieldKey,
|
||||
) (string, []any, []error, error) {
|
||||
// Setup the ANTLR parsing pipeline
|
||||
input := antlr.NewInputStream(query)
|
||||
lexer := NewFilterQueryLexer(input)
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
visitor := NewWhereClauseVisitor(conditionBuilder, fieldKeys, sb, fullTextColumn)
|
||||
|
||||
// Set up error handling
|
||||
lexerErrorListener := NewErrorListener()
|
||||
lexer.RemoveErrorListeners()
|
||||
lexer.AddErrorListener(lexerErrorListener)
|
||||
|
||||
tokens := antlr.NewCommonTokenStream(lexer, 0)
|
||||
parserErrorListener := NewErrorListener()
|
||||
parser := NewFilterQueryParser(tokens)
|
||||
parser.RemoveErrorListeners()
|
||||
parser.AddErrorListener(parserErrorListener)
|
||||
|
||||
// Parse the query
|
||||
tree := parser.Query()
|
||||
|
||||
// Handle syntax errors
|
||||
if len(parserErrorListener.Errors) > 0 {
|
||||
combinedErrors := errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"found %d syntax errors while parsing the search expression: %v",
|
||||
len(parserErrorListener.Errors),
|
||||
parserErrorListener.Errors,
|
||||
)
|
||||
return "", nil, nil, combinedErrors
|
||||
}
|
||||
|
||||
// Visit the parse tree with our ClickHouse visitor
|
||||
cond := visitor.Visit(tree).(string)
|
||||
|
||||
if len(visitor.errors) > 0 {
|
||||
// combine all errors into a single error
|
||||
combinedErrors := errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"found %d errors while parsing the search expression: %v",
|
||||
len(visitor.errors),
|
||||
visitor.errors,
|
||||
)
|
||||
return "", nil, nil, combinedErrors
|
||||
}
|
||||
|
||||
whereClause, args := visitor.builder.Where(cond).BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
return whereClause, args, visitor.warnings, nil
|
||||
}
|
||||
|
||||
// Visit dispatches to the specific visit method based on node type
|
||||
func (v *WhereClauseVisitor) Visit(tree antlr.ParseTree) any {
|
||||
// Handle nil nodes to prevent panic
|
||||
if tree == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch t := tree.(type) {
|
||||
case *QueryContext:
|
||||
return v.VisitQuery(t)
|
||||
case *ExpressionContext:
|
||||
return v.VisitExpression(t)
|
||||
case *OrExpressionContext:
|
||||
return v.VisitOrExpression(t)
|
||||
case *AndExpressionContext:
|
||||
return v.VisitAndExpression(t)
|
||||
case *UnaryExpressionContext:
|
||||
return v.VisitUnaryExpression(t)
|
||||
case *PrimaryContext:
|
||||
return v.VisitPrimary(t)
|
||||
case *ComparisonContext:
|
||||
return v.VisitComparison(t)
|
||||
case *InClauseContext:
|
||||
return v.VisitInClause(t)
|
||||
case *NotInClauseContext:
|
||||
return v.VisitNotInClause(t)
|
||||
case *ValueListContext:
|
||||
return v.VisitValueList(t)
|
||||
case *FullTextContext:
|
||||
return v.VisitFullText(t)
|
||||
case *FunctionCallContext:
|
||||
return v.VisitFunctionCall(t)
|
||||
case *FunctionParamListContext:
|
||||
return v.VisitFunctionParamList(t)
|
||||
case *FunctionParamContext:
|
||||
return v.VisitFunctionParam(t)
|
||||
case *ArrayContext:
|
||||
return v.VisitArray(t)
|
||||
case *ValueContext:
|
||||
return v.VisitValue(t)
|
||||
case *KeyContext:
|
||||
return v.VisitKey(t)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (v *WhereClauseVisitor) VisitQuery(ctx *QueryContext) any {
|
||||
|
||||
return v.Visit(ctx.Expression())
|
||||
}
|
||||
|
||||
// VisitExpression passes through to the orExpression
|
||||
func (v *WhereClauseVisitor) VisitExpression(ctx *ExpressionContext) any {
|
||||
return v.Visit(ctx.OrExpression())
|
||||
}
|
||||
|
||||
// VisitOrExpression handles OR expressions
|
||||
func (v *WhereClauseVisitor) VisitOrExpression(ctx *OrExpressionContext) any {
|
||||
andExpressions := ctx.AllAndExpression()
|
||||
|
||||
andExpressionConditions := make([]string, len(andExpressions))
|
||||
for i, expr := range andExpressions {
|
||||
andExpressionConditions[i] = v.Visit(expr).(string)
|
||||
}
|
||||
|
||||
if len(andExpressionConditions) == 1 {
|
||||
return andExpressionConditions[0]
|
||||
}
|
||||
|
||||
return v.builder.Or(andExpressionConditions...)
|
||||
}
|
||||
|
||||
// VisitAndExpression handles AND expressions
|
||||
func (v *WhereClauseVisitor) VisitAndExpression(ctx *AndExpressionContext) any {
|
||||
unaryExpressions := ctx.AllUnaryExpression()
|
||||
|
||||
unaryExpressionConditions := make([]string, len(unaryExpressions))
|
||||
for i, expr := range unaryExpressions {
|
||||
unaryExpressionConditions[i] = v.Visit(expr).(string)
|
||||
}
|
||||
|
||||
if len(unaryExpressionConditions) == 1 {
|
||||
return unaryExpressionConditions[0]
|
||||
}
|
||||
|
||||
return v.builder.And(unaryExpressionConditions...)
|
||||
}
|
||||
|
||||
// VisitUnaryExpression handles NOT expressions
|
||||
func (v *WhereClauseVisitor) VisitUnaryExpression(ctx *UnaryExpressionContext) any {
|
||||
result := v.Visit(ctx.Primary()).(string)
|
||||
|
||||
// Check if this is a NOT expression
|
||||
if ctx.NOT() != nil {
|
||||
return fmt.Sprintf("NOT (%s)", result)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// VisitPrimary handles grouped expressions, comparisons, function calls, and full-text search
|
||||
func (v *WhereClauseVisitor) VisitPrimary(ctx *PrimaryContext) any {
|
||||
if ctx.OrExpression() != nil {
|
||||
// This is a parenthesized expression
|
||||
return fmt.Sprintf("(%s)", v.Visit(ctx.OrExpression()).(string))
|
||||
} else if ctx.Comparison() != nil {
|
||||
return v.Visit(ctx.Comparison())
|
||||
} else if ctx.FunctionCall() != nil {
|
||||
return v.Visit(ctx.FunctionCall())
|
||||
} else if ctx.FullText() != nil {
|
||||
return v.Visit(ctx.FullText())
|
||||
}
|
||||
|
||||
// Handle standalone key as a full text search term
|
||||
if ctx.GetChildCount() == 1 {
|
||||
child := ctx.GetChild(0)
|
||||
if keyCtx, ok := child.(*KeyContext); ok {
|
||||
// create a full text search condition on the body field
|
||||
keyText := keyCtx.GetText()
|
||||
cond, err := v.conditionBuilder.GetCondition(context.Background(), &v.fullTextColumn, qbtypes.FilterOperatorRegexp, keyText, v.builder)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
}
|
||||
}
|
||||
|
||||
return "" // Should not happen with valid input
|
||||
}
|
||||
|
||||
// VisitComparison handles all comparison operators
|
||||
func (v *WhereClauseVisitor) VisitComparison(ctx *ComparisonContext) any {
|
||||
keys := v.Visit(ctx.Key()).([]telemetrytypes.TelemetryFieldKey)
|
||||
|
||||
// Handle EXISTS specially
|
||||
if ctx.EXISTS() != nil {
|
||||
op := qbtypes.FilterOperatorExists
|
||||
if ctx.NOT() != nil {
|
||||
op = qbtypes.FilterOperatorNotExists
|
||||
}
|
||||
var conds []string
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.GetCondition(context.Background(), &key, op, nil, v.builder)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
conds = append(conds, condition)
|
||||
}
|
||||
return v.builder.Or(conds...)
|
||||
}
|
||||
|
||||
// Handle IN clause
|
||||
if ctx.InClause() != nil || ctx.NotInClause() != nil {
|
||||
values := v.Visit(ctx.InClause()).([]any)
|
||||
op := qbtypes.FilterOperatorIn
|
||||
if ctx.NotInClause() != nil {
|
||||
op = qbtypes.FilterOperatorNotIn
|
||||
}
|
||||
var conds []string
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.GetCondition(context.Background(), &key, op, values, v.builder)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
conds = append(conds, condition)
|
||||
}
|
||||
return v.builder.Or(conds...)
|
||||
}
|
||||
|
||||
// Handle BETWEEN
|
||||
if ctx.BETWEEN() != nil {
|
||||
op := qbtypes.FilterOperatorBetween
|
||||
if ctx.NOT() != nil {
|
||||
op = qbtypes.FilterOperatorNotBetween
|
||||
}
|
||||
|
||||
values := ctx.AllValue()
|
||||
if len(values) != 2 {
|
||||
return ""
|
||||
}
|
||||
|
||||
value1 := v.Visit(values[0])
|
||||
value2 := v.Visit(values[1])
|
||||
|
||||
var conds []string
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.GetCondition(context.Background(), &key, op, []any{value1, value2}, v.builder)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
conds = append(conds, condition)
|
||||
}
|
||||
return v.builder.Or(conds...)
|
||||
}
|
||||
|
||||
// Get all values for operations that need them
|
||||
values := ctx.AllValue()
|
||||
if len(values) > 0 {
|
||||
value := v.Visit(values[0])
|
||||
|
||||
var op qbtypes.FilterOperator
|
||||
|
||||
// Handle each type of comparison
|
||||
if ctx.EQUALS() != nil {
|
||||
op = qbtypes.FilterOperatorEqual
|
||||
} else if ctx.NOT_EQUALS() != nil || ctx.NEQ() != nil {
|
||||
op = qbtypes.FilterOperatorNotEqual
|
||||
} else if ctx.LT() != nil {
|
||||
op = qbtypes.FilterOperatorLessThan
|
||||
} else if ctx.LE() != nil {
|
||||
op = qbtypes.FilterOperatorLessThanOrEq
|
||||
} else if ctx.GT() != nil {
|
||||
op = qbtypes.FilterOperatorGreaterThan
|
||||
} else if ctx.GE() != nil {
|
||||
op = qbtypes.FilterOperatorGreaterThanOrEq
|
||||
} else if ctx.LIKE() != nil {
|
||||
op = qbtypes.FilterOperatorLike
|
||||
} else if ctx.ILIKE() != nil {
|
||||
op = qbtypes.FilterOperatorLike
|
||||
} else if ctx.NOT_LIKE() != nil {
|
||||
op = qbtypes.FilterOperatorNotLike
|
||||
} else if ctx.NOT_ILIKE() != nil {
|
||||
op = qbtypes.FilterOperatorNotLike
|
||||
} else if ctx.REGEXP() != nil {
|
||||
op = qbtypes.FilterOperatorRegexp
|
||||
} else if ctx.NOT() != nil && ctx.REGEXP() != nil {
|
||||
op = qbtypes.FilterOperatorNotRegexp
|
||||
} else if ctx.CONTAINS() != nil {
|
||||
op = qbtypes.FilterOperatorContains
|
||||
} else if ctx.NOT() != nil && ctx.CONTAINS() != nil {
|
||||
op = qbtypes.FilterOperatorNotContains
|
||||
}
|
||||
|
||||
var conds []string
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.GetCondition(context.Background(), &key, op, value, v.builder)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
conds = append(conds, condition)
|
||||
}
|
||||
return v.builder.Or(conds...)
|
||||
}
|
||||
|
||||
return "" // Should not happen with valid input
|
||||
}
|
||||
|
||||
// VisitInClause handles IN expressions
|
||||
func (v *WhereClauseVisitor) VisitInClause(ctx *InClauseContext) any {
|
||||
return v.Visit(ctx.ValueList())
|
||||
}
|
||||
|
||||
// VisitNotInClause handles NOT IN expressions
|
||||
func (v *WhereClauseVisitor) VisitNotInClause(ctx *NotInClauseContext) any {
|
||||
return v.Visit(ctx.ValueList())
|
||||
}
|
||||
|
||||
// VisitValueList handles comma-separated value lists
|
||||
func (v *WhereClauseVisitor) VisitValueList(ctx *ValueListContext) any {
|
||||
values := ctx.AllValue()
|
||||
|
||||
parts := []any{}
|
||||
for _, val := range values {
|
||||
parts = append(parts, v.Visit(val))
|
||||
}
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
// VisitFullText handles standalone quoted strings for full-text search
|
||||
func (v *WhereClauseVisitor) VisitFullText(ctx *FullTextContext) any {
|
||||
// remove quotes from the quotedText
|
||||
quotedText := strings.Trim(ctx.QUOTED_TEXT().GetText(), "\"'")
|
||||
cond, err := v.conditionBuilder.GetCondition(context.Background(), &v.fullTextColumn, qbtypes.FilterOperatorRegexp, quotedText, v.builder)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
}
|
||||
|
||||
// VisitFunctionCall handles function calls like has(), hasAny(), etc.
|
||||
func (v *WhereClauseVisitor) VisitFunctionCall(ctx *FunctionCallContext) any {
|
||||
// Get function name based on which token is present
|
||||
var functionName string
|
||||
if ctx.HAS() != nil {
|
||||
functionName = "has"
|
||||
} else if ctx.HASANY() != nil {
|
||||
functionName = "hasAny"
|
||||
} else if ctx.HASALL() != nil {
|
||||
functionName = "hasAll"
|
||||
} else {
|
||||
// Default fallback
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"unknown function `%s`",
|
||||
ctx.GetText(),
|
||||
))
|
||||
return ""
|
||||
}
|
||||
params := v.Visit(ctx.FunctionParamList()).([]any)
|
||||
|
||||
if len(params) < 2 {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` expects key and value parameters",
|
||||
functionName,
|
||||
))
|
||||
return ""
|
||||
}
|
||||
|
||||
keys, ok := params[0].([]telemetrytypes.TelemetryFieldKey)
|
||||
if !ok {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` expects key parameter to be a field key",
|
||||
functionName,
|
||||
))
|
||||
return ""
|
||||
}
|
||||
value := params[1:]
|
||||
var conds []string
|
||||
for _, key := range keys {
|
||||
var fieldName string
|
||||
|
||||
if strings.HasPrefix(key.Name, telemetrylogs.BodyJSONStringSearchPrefix) {
|
||||
fieldName, _ = telemetrylogs.GetBodyJSONKey(context.Background(), &key, qbtypes.FilterOperatorUnknown, value)
|
||||
} else {
|
||||
fieldName, _ = v.conditionBuilder.GetTableFieldName(context.Background(), &key)
|
||||
}
|
||||
|
||||
var cond string
|
||||
// Map our functions to ClickHouse equivalents
|
||||
switch functionName {
|
||||
case "has":
|
||||
cond = fmt.Sprintf("has(%s, %s)", fieldName, v.builder.Var(value[0]))
|
||||
case "hasAny":
|
||||
cond = fmt.Sprintf("hasAny(%s, %s)", fieldName, v.builder.Var(value))
|
||||
case "hasAll":
|
||||
cond = fmt.Sprintf("hasAll(%s, %s)", fieldName, v.builder.Var(value))
|
||||
}
|
||||
conds = append(conds, cond)
|
||||
}
|
||||
|
||||
return v.builder.Or(conds...)
|
||||
}
|
||||
|
||||
// VisitFunctionParamList handles the parameter list for function calls
|
||||
func (v *WhereClauseVisitor) VisitFunctionParamList(ctx *FunctionParamListContext) any {
|
||||
params := ctx.AllFunctionParam()
|
||||
parts := make([]any, len(params))
|
||||
|
||||
for i, param := range params {
|
||||
parts[i] = v.Visit(param)
|
||||
}
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
// VisitFunctionParam handles individual parameters in function calls
|
||||
func (v *WhereClauseVisitor) VisitFunctionParam(ctx *FunctionParamContext) any {
|
||||
if ctx.Key() != nil {
|
||||
return v.Visit(ctx.Key())
|
||||
} else if ctx.Value() != nil {
|
||||
return v.Visit(ctx.Value())
|
||||
} else if ctx.Array() != nil {
|
||||
return v.Visit(ctx.Array())
|
||||
}
|
||||
|
||||
return "" // Should not happen with valid input
|
||||
}
|
||||
|
||||
// VisitArray handles array literals
|
||||
func (v *WhereClauseVisitor) VisitArray(ctx *ArrayContext) any {
|
||||
return v.Visit(ctx.ValueList())
|
||||
}
|
||||
|
||||
// VisitValue handles literal values: strings, numbers, booleans
|
||||
func (v *WhereClauseVisitor) VisitValue(ctx *ValueContext) any {
|
||||
if ctx.QUOTED_TEXT() != nil {
|
||||
txt := ctx.QUOTED_TEXT().GetText()
|
||||
// trim quotes and return the value
|
||||
return strings.Trim(txt, "\"'")
|
||||
} else if ctx.NUMBER() != nil {
|
||||
number, err := strconv.ParseFloat(ctx.NUMBER().GetText(), 64)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"failed to parse number %s",
|
||||
ctx.NUMBER().GetText(),
|
||||
))
|
||||
return ""
|
||||
}
|
||||
return number
|
||||
} else if ctx.BOOL() != nil {
|
||||
// Convert to ClickHouse boolean literal
|
||||
boolText := strings.ToLower(ctx.BOOL().GetText())
|
||||
return boolText == "true"
|
||||
} else if ctx.KEY() != nil {
|
||||
// Why do we have a KEY context here?
|
||||
// When the user writes an expression like `service.name=redis`
|
||||
// The `redis` part is a VALUE context but parsed as a KEY token
|
||||
// so we return the text as is
|
||||
return ctx.KEY().GetText()
|
||||
}
|
||||
|
||||
return "" // Should not happen with valid input
|
||||
}
|
||||
|
||||
// VisitKey handles field/column references
|
||||
func (v *WhereClauseVisitor) VisitKey(ctx *KeyContext) any {
|
||||
|
||||
fieldKey := telemetrytypes.GetFieldKeyFromKeyText(ctx.KEY().GetText())
|
||||
|
||||
keyName := strings.TrimPrefix(fieldKey.Name, telemetrylogs.BodyJSONStringSearchPrefix)
|
||||
|
||||
fieldKeysForName := v.fieldKeys[keyName]
|
||||
|
||||
// for the body json search, we need to add search on the body field even
|
||||
// if there is a field with the same name as attribute/resource attribute
|
||||
// Since it will ORed with the fieldKeysForName, it will not result empty
|
||||
// when either of them have values
|
||||
if strings.HasPrefix(fieldKey.Name, telemetrylogs.BodyJSONStringSearchPrefix) {
|
||||
fieldKeysForName = append(fieldKeysForName, fieldKey)
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): do we want to return an error here?
|
||||
// should we infer the type and auto-magically build a key for expression?
|
||||
if len(fieldKeysForName) == 0 {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"key `%s` not found",
|
||||
fieldKey.Name,
|
||||
))
|
||||
}
|
||||
|
||||
if len(fieldKeysForName) > 1 {
|
||||
// this is warning state, we must have a unambiguous key
|
||||
v.warnings = append(v.warnings, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"key `%s` is ambiguous, found %d different combinations of field context and data type: %v",
|
||||
fieldKey.Name,
|
||||
len(fieldKeysForName),
|
||||
fieldKeysForName,
|
||||
))
|
||||
}
|
||||
|
||||
return fieldKeysForName
|
||||
}
|
||||
638
pkg/parser/grammar/where_clause_visitor_test.go
Normal file
638
pkg/parser/grammar/where_clause_visitor_test.go
Normal file
@@ -0,0 +1,638 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrytraces"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func TestConvertToClickHouseLogsQuery(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
fieldKeys map[string][]telemetrytypes.TelemetryFieldKey
|
||||
query string
|
||||
expectedSearchString string
|
||||
expectedSearchArgs []any
|
||||
}{
|
||||
{
|
||||
name: "test-simple-service-name-filter",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"service.name": {
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "service.name=redis",
|
||||
expectedSearchString: "WHERE (resources_string['service.name'] = ?)",
|
||||
expectedSearchArgs: []any{"redis"},
|
||||
},
|
||||
{
|
||||
name: "test-simple-service-name-filter-with-materialised-column",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"service.name": {
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "service.name=redis",
|
||||
expectedSearchString: "WHERE (resource_string_service$$name = ?)",
|
||||
expectedSearchArgs: []any{"redis"},
|
||||
},
|
||||
{
|
||||
name: "http-status-code-multiple-data-types",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"http.status_code": {
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "http.status_code=200",
|
||||
expectedSearchString: "WHERE (attributes_number['http.status_code'] = ? OR toFloat64OrNull(attributes_string['http.status_code']) = ?)",
|
||||
expectedSearchArgs: []any{float64(200), float64(200)},
|
||||
},
|
||||
{
|
||||
name: "http-status-code-multiple-data-types-between-operator",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"http.status_code": {
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "http.status_code between 200 and 300",
|
||||
expectedSearchString: "WHERE (attributes_number['http.status_code'] BETWEEN ? AND ? OR toFloat64OrNull(attributes_string['http.status_code']) BETWEEN ? AND ?)",
|
||||
expectedSearchArgs: []any{float64(200), float64(300), float64(200), float64(300)},
|
||||
},
|
||||
{
|
||||
name: "response-body-multiple-data-types-string-contains",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"response.body": {
|
||||
{
|
||||
Name: "response.body",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
{
|
||||
Name: "response.body",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "response.body contains error",
|
||||
expectedSearchString: "WHERE (LOWER(toString(attributes_number['response.body'])) LIKE LOWER(?) OR LOWER(attributes_string['response.body']) LIKE LOWER(?))",
|
||||
expectedSearchArgs: []any{"%error%", "%error%"},
|
||||
},
|
||||
{
|
||||
name: "search-on-top-level-key",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"severity_text": {
|
||||
{
|
||||
Name: "severity_text",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "severity_text=error",
|
||||
expectedSearchString: "WHERE (severity_text = ?)",
|
||||
expectedSearchArgs: []any{"error"},
|
||||
},
|
||||
{
|
||||
name: "search-on-top-level-key-conflict-with-attribute",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"severity_text": {
|
||||
{
|
||||
Name: "severity_text",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
{
|
||||
Name: "severity_text",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "severity_text=error",
|
||||
expectedSearchString: "WHERE (severity_text = ? OR attributes_string['severity_text'] = ?)",
|
||||
expectedSearchArgs: []any{"error", "error"},
|
||||
},
|
||||
{
|
||||
name: "collision-with-attribute-field-and-resource-attribute",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"k8s.namespace.name": {
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "k8s.namespace.name=test",
|
||||
expectedSearchString: "WHERE (resources_string['k8s.namespace.name'] = ? OR attributes_string['k8s.namespace.name'] = ?)",
|
||||
expectedSearchArgs: []any{"test", "test"},
|
||||
},
|
||||
{
|
||||
name: "collision-with-attribute-field-and-resource-attribute-materialised-column",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"k8s.namespace.name": {
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "k8s.namespace.name=test",
|
||||
expectedSearchString: "WHERE (resource_string_k8s$$namespace$$name = ? OR attributes_string['k8s.namespace.name'] = ?)",
|
||||
expectedSearchArgs: []any{"test", "test"},
|
||||
},
|
||||
{
|
||||
name: "boolean-collision-with-attribute-field-and-data-type-boolean",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"did_user_login": {
|
||||
{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "did_user_login=true",
|
||||
expectedSearchString: "WHERE (attributes_bool['did_user_login'] = ? OR attributes_string['did_user_login'] = ?)",
|
||||
expectedSearchArgs: []any{true, "true"},
|
||||
},
|
||||
{
|
||||
name: "regexp-search",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"k8s.namespace.name": {
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
"service.name": {
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "k8s.namespace.name REGEXP 'test' OR service.name='redis'",
|
||||
expectedSearchString: "WHERE (((match(attributes_string['k8s.namespace.name'], ?))) OR (resources_string['service.name'] = ?))",
|
||||
expectedSearchArgs: []any{"test", "redis"},
|
||||
},
|
||||
{
|
||||
name: "full-text-search-multiple-words",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: "waiting for response",
|
||||
expectedSearchString: "WHERE ((match(body, ?)) AND (match(body, ?)) AND (match(body, ?)))",
|
||||
expectedSearchArgs: []any{"waiting", "for", "response"},
|
||||
},
|
||||
{
|
||||
name: "full-text-search-with-phrase-search",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: `"waiting for response"`,
|
||||
expectedSearchString: "WHERE (match(body, ?))",
|
||||
expectedSearchArgs: []any{"waiting for response"},
|
||||
},
|
||||
{
|
||||
name: "full-text-search-with-word-and-not-word",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: "error NOT buggy_app",
|
||||
expectedSearchString: "WHERE ((match(body, ?)) AND NOT ((match(body, ?))))",
|
||||
expectedSearchArgs: []any{"error", "buggy_app"},
|
||||
},
|
||||
{
|
||||
name: "full-text-search-with-word-and-not-word-and-not-word",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: "error NOT buggy_app NOT redis",
|
||||
expectedSearchString: "WHERE ((match(body, ?)) AND NOT ((match(body, ?))) AND NOT ((match(body, ?))))",
|
||||
expectedSearchArgs: []any{"error", "buggy_app", "redis"},
|
||||
},
|
||||
{
|
||||
name: "full-text-search-with-word-and-not-word-and-not-word-tricky",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: "error NOT buggy_app OR redis",
|
||||
expectedSearchString: "WHERE (((match(body, ?)) AND NOT ((match(body, ?)))) OR (match(body, ?)))",
|
||||
expectedSearchArgs: []any{"error", "buggy_app", "redis"},
|
||||
},
|
||||
{
|
||||
name: "has-function",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"service.name": {
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
"payload.user_ids": {
|
||||
{
|
||||
Name: "payload.user_ids",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "has(service.name, 'redis')",
|
||||
expectedSearchString: "WHERE (has(resources_string['service.name'], ?))",
|
||||
expectedSearchArgs: []any{"redis"},
|
||||
},
|
||||
{
|
||||
name: "has-from-list-of-values",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: "has(body.payload.user_ids[*], 'u1292')",
|
||||
expectedSearchString: "WHERE (has(JSONExtract(JSON_QUERY(body, '$.payload.user_ids[*]'), 'Array(String)'), ?))",
|
||||
expectedSearchArgs: []any{"u1292"},
|
||||
},
|
||||
{
|
||||
name: "body-json-search-that-also-has-attribute-with-same-name",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"http.status_code": {
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
Materialized: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "body.http.status_code=200",
|
||||
expectedSearchString: "WHERE (attribute_number_http$$status_code = ? OR JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Float64') = ?)",
|
||||
expectedSearchArgs: []any{float64(200), float64(200)},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Logf("running test %s", c.name)
|
||||
chQuery, chQueryArgs, _, err := PrepareWhereClause(c.query, c.fieldKeys, telemetrylogs.NewConditionBuilder(), telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Error converting query to ClickHouse: %v", err)
|
||||
}
|
||||
if chQuery != c.expectedSearchString {
|
||||
t.Errorf("Expected %s, got %s", c.expectedSearchString, chQuery)
|
||||
}
|
||||
if !reflect.DeepEqual(chQueryArgs, c.expectedSearchArgs) {
|
||||
for i, arg := range chQueryArgs {
|
||||
t.Logf("Expected %v with type %T, got %v with type %T\n", c.expectedSearchArgs[i], c.expectedSearchArgs[i], arg, arg)
|
||||
}
|
||||
t.Errorf("Expected %v, got %v", c.expectedSearchArgs, chQueryArgs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertToClickHouseSpansQuery(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
fieldKeys map[string][]telemetrytypes.TelemetryFieldKey
|
||||
query string
|
||||
expectedSearchString string
|
||||
expectedSearchArgs []any
|
||||
}{
|
||||
{
|
||||
name: "test-simple-service-name-filter",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"service.name": {
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "service.name=redis",
|
||||
expectedSearchString: "WHERE (resources_string['service.name'] = ?)",
|
||||
expectedSearchArgs: []any{"redis"},
|
||||
},
|
||||
{
|
||||
name: "test-simple-service-name-filter-with-materialised-column",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"service.name": {
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "service.name=redis",
|
||||
expectedSearchString: "WHERE (resource_string_service$$name = ?)",
|
||||
expectedSearchArgs: []any{"redis"},
|
||||
},
|
||||
{
|
||||
name: "http-status-code-multiple-data-types",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"http.status_code": {
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "http.status_code=200",
|
||||
expectedSearchString: "WHERE (attributes_number['http.status_code'] = ? OR toFloat64OrNull(attributes_string['http.status_code']) = ?)",
|
||||
expectedSearchArgs: []any{float64(200), float64(200)},
|
||||
},
|
||||
{
|
||||
name: "http-status-code-multiple-data-types-between-operator",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"http.status_code": {
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
{
|
||||
Name: "http.status_code",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "http.status_code between 200 and 300",
|
||||
expectedSearchString: "WHERE (attributes_number['http.status_code'] BETWEEN ? AND ? OR toFloat64OrNull(attributes_string['http.status_code']) BETWEEN ? AND ?)",
|
||||
expectedSearchArgs: []any{float64(200), float64(300), float64(200), float64(300)},
|
||||
},
|
||||
{
|
||||
name: "response-body-multiple-data-types-string-contains",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"response.body": {
|
||||
{
|
||||
Name: "response.body",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
{
|
||||
Name: "response.body",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "response.body contains error",
|
||||
expectedSearchString: "WHERE (LOWER(toString(attributes_number['response.body'])) LIKE LOWER(?) OR LOWER(attributes_string['response.body']) LIKE LOWER(?))",
|
||||
expectedSearchArgs: []any{"%error%", "%error%"},
|
||||
},
|
||||
{
|
||||
name: "collision-with-attribute-field-and-resource-attribute",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"k8s.namespace.name": {
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "k8s.namespace.name=test",
|
||||
expectedSearchString: "WHERE (resources_string['k8s.namespace.name'] = ? OR attributes_string['k8s.namespace.name'] = ?)",
|
||||
expectedSearchArgs: []any{"test", "test"},
|
||||
},
|
||||
{
|
||||
name: "collision-with-attribute-field-and-resource-attribute-materialised-column",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"k8s.namespace.name": {
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "k8s.namespace.name=test",
|
||||
expectedSearchString: "WHERE (resource_string_k8s$$namespace$$name = ? OR attributes_string['k8s.namespace.name'] = ?)",
|
||||
expectedSearchArgs: []any{"test", "test"},
|
||||
},
|
||||
{
|
||||
name: "boolean-collision-with-attribute-field-and-data-type-boolean",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"did_user_login": {
|
||||
{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "did_user_login=true",
|
||||
expectedSearchString: "WHERE (attributes_bool['did_user_login'] = ? OR attributes_string['did_user_login'] = ?)",
|
||||
expectedSearchArgs: []any{true, "true"},
|
||||
},
|
||||
{
|
||||
name: "regexp-search",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{
|
||||
"k8s.namespace.name": {
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
"service.name": {
|
||||
{
|
||||
Name: "service.name",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
query: "k8s.namespace.name REGEXP 'test' OR service.name='redis'",
|
||||
expectedSearchString: "WHERE (((match(attributes_string['k8s.namespace.name'], ?))) OR (resources_string['service.name'] = ?))",
|
||||
expectedSearchArgs: []any{"test", "redis"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
chQuery, chQueryArgs, _, err := PrepareWhereClause(c.query, c.fieldKeys, telemetrytraces.NewConditionBuilder(), telemetrytypes.TelemetryFieldKey{
|
||||
Name: "dummy",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Error converting query to ClickHouse: %v", err)
|
||||
}
|
||||
if chQuery != c.expectedSearchString {
|
||||
t.Errorf("Expected %s, got %s", c.expectedSearchString, chQuery)
|
||||
}
|
||||
if !reflect.DeepEqual(chQueryArgs, c.expectedSearchArgs) {
|
||||
for i, arg := range chQueryArgs {
|
||||
t.Logf("Expected %v with type %T, got %v with type %T\n", c.expectedSearchArgs[i], c.expectedSearchArgs[i], arg, arg)
|
||||
}
|
||||
t.Errorf("Expected %v, got %v", c.expectedSearchArgs, chQueryArgs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertToClickHouseSpansQueryWithErrors(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
fieldKeys map[string][]telemetrytypes.TelemetryFieldKey
|
||||
query string
|
||||
expectedSearchString string
|
||||
expectedSearchArgs []any
|
||||
expectedErrorSubString string
|
||||
expectedWarnings []error
|
||||
}{
|
||||
{
|
||||
name: "has-function-with-multiple-values",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: "key.that.does.not.exist = 'redis'",
|
||||
expectedSearchString: "",
|
||||
expectedSearchArgs: []any{},
|
||||
expectedErrorSubString: "key `key.that.does.not.exist` not found",
|
||||
expectedWarnings: []error{},
|
||||
},
|
||||
{
|
||||
name: "unknown-function",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: "unknown.function()",
|
||||
expectedSearchString: "",
|
||||
expectedSearchArgs: []any{},
|
||||
expectedErrorSubString: "expecting {'(', NOT, HAS, HASANY, HASALL, QUOTED_TEXT, KEY, FREETEXT}",
|
||||
expectedWarnings: []error{},
|
||||
},
|
||||
{
|
||||
name: "has-function-not-enough-params",
|
||||
fieldKeys: map[string][]telemetrytypes.TelemetryFieldKey{},
|
||||
query: "has(key.that.does.not.exist)",
|
||||
expectedSearchString: "",
|
||||
expectedSearchArgs: []any{},
|
||||
expectedErrorSubString: "function `has` expects key and value parameters",
|
||||
expectedWarnings: []error{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
_, _, warnings, err := PrepareWhereClause(c.query, c.fieldKeys, telemetrytraces.NewConditionBuilder(), telemetrytypes.TelemetryFieldKey{
|
||||
Name: "dummy",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
})
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), c.expectedErrorSubString) {
|
||||
t.Errorf("Expected error %v, got %v", c.expectedErrorSubString, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(warnings) != len(c.expectedWarnings) {
|
||||
t.Errorf("Expected %d warnings, got %d", len(c.expectedWarnings), len(warnings))
|
||||
}
|
||||
for i, warning := range warnings {
|
||||
if warning.Error() != c.expectedWarnings[i].Error() {
|
||||
t.Errorf("Expected warning %d to be %v, got %v", i, c.expectedWarnings[i], warning)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1085,7 +1085,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
for k, v := range item.Attributes_number {
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
item.Attributes_string[k] = strconv.FormatFloat(v, 'f', -1, 64)
|
||||
}
|
||||
for k, v := range item.Resources_string {
|
||||
item.Attributes_string[k] = v
|
||||
@@ -3928,11 +3928,16 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
|
||||
var rows driver.Rows
|
||||
var response v3.FilterAttributeKeyResponse
|
||||
|
||||
tagTypeFilter := `tag_type != 'logfield'`
|
||||
if req.TagType != "" {
|
||||
tagTypeFilter = fmt.Sprintf(`tag_type != 'logfield' and tag_type = '%s'`, req.TagType)
|
||||
}
|
||||
|
||||
if len(req.SearchText) != 0 {
|
||||
query = fmt.Sprintf("select distinct tag_key, tag_type, tag_data_type from %s.%s where tag_type != 'logfield' and tag_key ILIKE $1 limit $2", r.logsDB, r.logsTagAttributeTableV2)
|
||||
query = fmt.Sprintf("select distinct tag_key, tag_type, tag_data_type from %s.%s where %s and tag_key ILIKE $1 limit $2", r.logsDB, r.logsTagAttributeTableV2, tagTypeFilter)
|
||||
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
|
||||
} else {
|
||||
query = fmt.Sprintf("select distinct tag_key, tag_type, tag_data_type from %s.%s where tag_type != 'logfield' limit $1", r.logsDB, r.logsTagAttributeTableV2)
|
||||
query = fmt.Sprintf("select distinct tag_key, tag_type, tag_data_type from %s.%s where %s limit $1", r.logsDB, r.logsTagAttributeTableV2, tagTypeFilter)
|
||||
rows, err = r.db.Query(ctx, query, req.Limit)
|
||||
}
|
||||
|
||||
@@ -3967,13 +3972,16 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
|
||||
response.AttributeKeys = append(response.AttributeKeys, key)
|
||||
}
|
||||
|
||||
// add other attributes
|
||||
for _, f := range constants.StaticFieldsLogsV3 {
|
||||
if (v3.AttributeKey{} == f) {
|
||||
continue
|
||||
}
|
||||
if len(req.SearchText) == 0 || strings.Contains(f.Key, req.SearchText) {
|
||||
response.AttributeKeys = append(response.AttributeKeys, f)
|
||||
// add other attributes only when the tagType is not specified
|
||||
// i.e retrieve all attributes
|
||||
if req.TagType == "" {
|
||||
for _, f := range constants.StaticFieldsLogsV3 {
|
||||
if (v3.AttributeKey{} == f) {
|
||||
continue
|
||||
}
|
||||
if len(req.SearchText) == 0 || strings.Contains(f.Key, req.SearchText) {
|
||||
response.AttributeKeys = append(response.AttributeKeys, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4233,11 +4241,12 @@ func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([
|
||||
isValidPoint = true
|
||||
point.Value = float64(reflect.ValueOf(v).Elem().Float())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Float()))
|
||||
val := strconv.FormatFloat(reflect.ValueOf(v).Elem().Float(), 'f', -1, 64)
|
||||
groupBy = append(groupBy, val)
|
||||
if _, ok := groupAttributes[colName]; !ok {
|
||||
groupAttributesArray = append(groupAttributesArray, map[string]string{colName: fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Float())})
|
||||
groupAttributesArray = append(groupAttributesArray, map[string]string{colName: val})
|
||||
}
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Float())
|
||||
groupAttributes[colName] = val
|
||||
}
|
||||
case **float64, **float32:
|
||||
val := reflect.ValueOf(v)
|
||||
@@ -4247,11 +4256,12 @@ func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([
|
||||
isValidPoint = true
|
||||
point.Value = value
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", value))
|
||||
val := strconv.FormatFloat(value, 'f', -1, 64)
|
||||
groupBy = append(groupBy, val)
|
||||
if _, ok := groupAttributes[colName]; !ok {
|
||||
groupAttributesArray = append(groupAttributesArray, map[string]string{colName: fmt.Sprintf("%v", value)})
|
||||
groupAttributesArray = append(groupAttributesArray, map[string]string{colName: val})
|
||||
}
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", value)
|
||||
groupAttributes[colName] = val
|
||||
}
|
||||
}
|
||||
case *uint, *uint8, *uint64, *uint16, *uint32:
|
||||
@@ -4715,7 +4725,12 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi
|
||||
var rows driver.Rows
|
||||
var response v3.FilterAttributeKeyResponse
|
||||
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type FROM %s.%s WHERE tag_key ILIKE $1 and tag_type != 'spanfield' LIMIT $2", r.TraceDB, r.spanAttributeTableV2)
|
||||
tagTypeFilter := `tag_type != 'spanfield'`
|
||||
if req.TagType != "" {
|
||||
tagTypeFilter = fmt.Sprintf(`tag_type != 'spanfield' and tag_type = '%s'`, req.TagType)
|
||||
}
|
||||
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type FROM %s.%s WHERE tag_key ILIKE $1 and %s LIMIT $2", r.TraceDB, r.spanAttributeTableV2, tagTypeFilter)
|
||||
|
||||
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
|
||||
|
||||
@@ -4760,13 +4775,16 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi
|
||||
fields = constants.DeprecatedStaticFieldsTraces
|
||||
}
|
||||
|
||||
// add the new static fields
|
||||
for _, f := range fields {
|
||||
if (v3.AttributeKey{} == f) {
|
||||
continue
|
||||
}
|
||||
if len(req.SearchText) == 0 || strings.Contains(f.Key, req.SearchText) {
|
||||
response.AttributeKeys = append(response.AttributeKeys, f)
|
||||
// add the new static fields only when the tagType is not specified
|
||||
// i.e retrieve all attributes
|
||||
if req.TagType == "" {
|
||||
for _, f := range fields {
|
||||
if (v3.AttributeKey{} == f) {
|
||||
continue
|
||||
}
|
||||
if len(req.SearchText) == 0 || strings.Contains(f.Key, req.SearchText) {
|
||||
response.AttributeKeys = append(response.AttributeKeys, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6873,7 +6891,7 @@ func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.Sea
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
for k, v := range item.Attributes_number {
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
item.Attributes_string[k] = strconv.FormatFloat(v, 'f', -1, 64)
|
||||
}
|
||||
for k, v := range item.Resources_string {
|
||||
item.Attributes_string[k] = v
|
||||
|
||||
@@ -8,68 +8,59 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type cloudProviderAccountsRepository interface {
|
||||
listConnected(ctx context.Context, cloudProvider string) ([]AccountRecord, *model.ApiError)
|
||||
listConnected(ctx context.Context, orgId string, provider string) ([]types.CloudIntegration, *model.ApiError)
|
||||
|
||||
get(ctx context.Context, cloudProvider string, id string) (*AccountRecord, *model.ApiError)
|
||||
get(ctx context.Context, orgId string, provider string, id string) (*types.CloudIntegration, *model.ApiError)
|
||||
|
||||
getConnectedCloudAccount(
|
||||
ctx context.Context, cloudProvider string, cloudAccountId string,
|
||||
) (*AccountRecord, *model.ApiError)
|
||||
getConnectedCloudAccount(ctx context.Context, orgId string, provider string, accountID string) (*types.CloudIntegration, *model.ApiError)
|
||||
|
||||
// Insert an account or update it by (cloudProvider, id)
|
||||
// for specified non-empty fields
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgId string,
|
||||
provider string,
|
||||
id *string,
|
||||
config *AccountConfig,
|
||||
cloudAccountId *string,
|
||||
agentReport *AgentReport,
|
||||
config *types.AccountConfig,
|
||||
accountId *string,
|
||||
agentReport *types.AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*AccountRecord, *model.ApiError)
|
||||
) (*types.CloudIntegration, *model.ApiError)
|
||||
}
|
||||
|
||||
func newCloudProviderAccountsRepository(db *sqlx.DB) (
|
||||
func newCloudProviderAccountsRepository(store sqlstore.SQLStore) (
|
||||
*cloudProviderAccountsSQLRepository, error,
|
||||
) {
|
||||
return &cloudProviderAccountsSQLRepository{
|
||||
db: db,
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type cloudProviderAccountsSQLRepository struct {
|
||||
db *sqlx.DB
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) listConnected(
|
||||
ctx context.Context, cloudProvider string,
|
||||
) ([]AccountRecord, *model.ApiError) {
|
||||
accounts := []AccountRecord{}
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) ([]types.CloudIntegration, *model.ApiError) {
|
||||
accounts := []types.CloudIntegration{}
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&accounts).
|
||||
Where("org_id = ?", orgId).
|
||||
Where("provider = ?", cloudProvider).
|
||||
Where("removed_at is NULL").
|
||||
Where("account_id is not NULL").
|
||||
Where("last_agent_report is not NULL").
|
||||
Order("created_at").
|
||||
Scan(ctx)
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &accounts, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and removed_at is NULL
|
||||
and cloud_account_id is not NULL
|
||||
and last_agent_report_json is not NULL
|
||||
order by created_at
|
||||
`, cloudProvider,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query connected cloud accounts: %w", err,
|
||||
@@ -80,27 +71,16 @@ func (r *cloudProviderAccountsSQLRepository) listConnected(
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) get(
|
||||
ctx context.Context, cloudProvider string, id string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
var result AccountRecord
|
||||
ctx context.Context, orgId string, provider string, id string,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
var result types.CloudIntegration
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and id=$2
|
||||
`,
|
||||
cloudProvider, id,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
Where("org_id = ?", orgId).
|
||||
Where("provider = ?", provider).
|
||||
Where("id = ?", id).
|
||||
Scan(ctx)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
@@ -116,33 +96,22 @@ func (r *cloudProviderAccountsSQLRepository) get(
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
||||
ctx context.Context, cloudProvider string, cloudAccountId string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
var result AccountRecord
|
||||
ctx context.Context, orgId string, provider string, accountId string,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
var result types.CloudIntegration
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
and last_agent_report_json is not NULL
|
||||
and removed_at is NULL
|
||||
`,
|
||||
cloudProvider, cloudAccountId,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
Where("org_id = ?", orgId).
|
||||
Where("provider = ?", provider).
|
||||
Where("account_id = ?", accountId).
|
||||
Where("last_agent_report is not NULL").
|
||||
Where("removed_at is NULL").
|
||||
Scan(ctx)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find connected cloud account %s", cloudAccountId,
|
||||
"couldn't find connected cloud account %s", accountId,
|
||||
))
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
@@ -155,17 +124,18 @@ func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgId string,
|
||||
provider string,
|
||||
id *string,
|
||||
config *AccountConfig,
|
||||
cloudAccountId *string,
|
||||
agentReport *AgentReport,
|
||||
config *types.AccountConfig,
|
||||
accountId *string,
|
||||
agentReport *types.AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
// Insert
|
||||
if id == nil {
|
||||
newId := uuid.NewString()
|
||||
id = &newId
|
||||
temp := valuer.GenerateUUID().StringValue()
|
||||
id = &temp
|
||||
}
|
||||
|
||||
// Prepare clause for setting values in `on conflict do update`
|
||||
@@ -176,19 +146,19 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
|
||||
if config != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("config_json"),
|
||||
onConflictSetStmts, setColStatement("config"),
|
||||
)
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
if accountId != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("cloud_account_id"),
|
||||
onConflictSetStmts, setColStatement("account_id"),
|
||||
)
|
||||
}
|
||||
|
||||
if agentReport != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("last_agent_report_json"),
|
||||
onConflictSetStmts, setColStatement("last_agent_report"),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -198,37 +168,45 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
)
|
||||
}
|
||||
|
||||
// set updated_at to current timestamp if it's an upsert
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("updated_at"),
|
||||
)
|
||||
|
||||
onConflictClause := ""
|
||||
if len(onConflictSetStmts) > 0 {
|
||||
onConflictClause = fmt.Sprintf(
|
||||
"on conflict(cloud_provider, id) do update SET\n%s",
|
||||
"conflict(id, provider, org_id) do update SET\n%s",
|
||||
strings.Join(onConflictSetStmts, ",\n"),
|
||||
)
|
||||
}
|
||||
|
||||
insertQuery := fmt.Sprintf(`
|
||||
INSERT INTO cloud_integrations_accounts (
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
removed_at
|
||||
) values ($1, $2, $3, $4, $5, $6)
|
||||
%s`, onConflictClause,
|
||||
)
|
||||
integration := types.CloudIntegration{
|
||||
OrgID: orgId,
|
||||
Provider: provider,
|
||||
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
Config: config,
|
||||
AccountID: accountId,
|
||||
LastAgentReport: agentReport,
|
||||
RemovedAt: removedAt,
|
||||
}
|
||||
|
||||
_, dbErr := r.store.BunDB().NewInsert().
|
||||
Model(&integration).
|
||||
On(onConflictClause).
|
||||
Exec(ctx)
|
||||
|
||||
_, dbErr := r.db.ExecContext(
|
||||
ctx, insertQuery,
|
||||
cloudProvider, id, config, cloudAccountId, agentReport, removedAt,
|
||||
)
|
||||
if dbErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not upsert cloud account record: %w", dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
upsertedAccount, apiErr := r.get(ctx, cloudProvider, *id)
|
||||
upsertedAccount, apiErr := r.get(ctx, orgId, provider, *id)
|
||||
if apiErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't fetch upserted account by id: %w", apiErr.ToError(),
|
||||
|
||||
@@ -33,12 +33,12 @@ type Controller struct {
|
||||
func NewController(sqlStore sqlstore.SQLStore) (
|
||||
*Controller, error,
|
||||
) {
|
||||
accountsRepo, err := newCloudProviderAccountsRepository(sqlStore.SQLxDB())
|
||||
accountsRepo, err := newCloudProviderAccountsRepository(sqlStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider accounts repo: %w", err)
|
||||
}
|
||||
|
||||
serviceConfigRepo, err := newServiceConfigRepository(sqlStore.SQLxDB())
|
||||
serviceConfigRepo, err := newServiceConfigRepository(sqlStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider service config repo: %w", err)
|
||||
}
|
||||
@@ -49,19 +49,12 @@ func NewController(sqlStore sqlstore.SQLStore) (
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config AccountConfig `json:"config"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
type ConnectedAccountsListResponse struct {
|
||||
Accounts []Account `json:"accounts"`
|
||||
Accounts []types.Account `json:"accounts"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListConnectedAccounts(
|
||||
ctx context.Context, cloudProvider string,
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (
|
||||
*ConnectedAccountsListResponse, *model.ApiError,
|
||||
) {
|
||||
@@ -69,14 +62,14 @@ func (c *Controller) ListConnectedAccounts(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, cloudProvider)
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
|
||||
}
|
||||
|
||||
connectedAccounts := []Account{}
|
||||
connectedAccounts := []types.Account{}
|
||||
for _, a := range accountRecords {
|
||||
connectedAccounts = append(connectedAccounts, a.account())
|
||||
connectedAccounts = append(connectedAccounts, a.Account())
|
||||
}
|
||||
|
||||
return &ConnectedAccountsListResponse{
|
||||
@@ -88,7 +81,7 @@ type GenerateConnectionUrlRequest struct {
|
||||
// Optional. To be specified for updates.
|
||||
AccountId *string `json:"account_id,omitempty"`
|
||||
|
||||
AccountConfig AccountConfig `json:"account_config"`
|
||||
AccountConfig types.AccountConfig `json:"account_config"`
|
||||
|
||||
AgentConfig SigNozAgentConfig `json:"agent_config"`
|
||||
}
|
||||
@@ -109,7 +102,7 @@ type GenerateConnectionUrlResponse struct {
|
||||
}
|
||||
|
||||
func (c *Controller) GenerateConnectionUrl(
|
||||
ctx context.Context, cloudProvider string, req GenerateConnectionUrlRequest,
|
||||
ctx context.Context, orgId string, cloudProvider string, req GenerateConnectionUrlRequest,
|
||||
) (*GenerateConnectionUrlResponse, *model.ApiError) {
|
||||
// Account connection with a simple connection URL may not be available for all providers.
|
||||
if cloudProvider != "aws" {
|
||||
@@ -117,7 +110,7 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.upsert(
|
||||
ctx, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
|
||||
ctx, orgId, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
@@ -135,7 +128,7 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
"param_SigNozIntegrationAgentVersion": agentVersion,
|
||||
"param_SigNozApiUrl": req.AgentConfig.SigNozAPIUrl,
|
||||
"param_SigNozApiKey": req.AgentConfig.SigNozAPIKey,
|
||||
"param_SigNozAccountId": account.Id,
|
||||
"param_SigNozAccountId": account.ID.StringValue(),
|
||||
"param_IngestionUrl": req.AgentConfig.IngestionUrl,
|
||||
"param_IngestionKey": req.AgentConfig.IngestionKey,
|
||||
"stackName": "signoz-integration",
|
||||
@@ -148,19 +141,19 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
}
|
||||
|
||||
return &GenerateConnectionUrlResponse{
|
||||
AccountId: account.Id,
|
||||
AccountId: account.ID.StringValue(),
|
||||
ConnectionUrl: connectionUrl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type AccountStatusResponse struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status AccountStatus `json:"status"`
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status types.AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
func (c *Controller) GetAccountStatus(
|
||||
ctx context.Context, cloudProvider string, accountId string,
|
||||
ctx context.Context, orgId string, cloudProvider string, accountId string,
|
||||
) (
|
||||
*AccountStatusResponse, *model.ApiError,
|
||||
) {
|
||||
@@ -168,23 +161,23 @@ func (c *Controller) GetAccountStatus(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.get(ctx, cloudProvider, accountId)
|
||||
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
resp := AccountStatusResponse{
|
||||
Id: account.Id,
|
||||
CloudAccountId: account.CloudAccountId,
|
||||
Status: account.status(),
|
||||
Id: account.ID.StringValue(),
|
||||
CloudAccountId: account.AccountID,
|
||||
Status: account.Status(),
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
type AgentCheckInRequest struct {
|
||||
AccountId string `json:"account_id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
ID string `json:"account_id"`
|
||||
AccountID string `json:"cloud_account_id"`
|
||||
// Arbitrary cloud specific Agent data
|
||||
Data map[string]any `json:"data,omitempty"`
|
||||
}
|
||||
@@ -204,35 +197,35 @@ type IntegrationConfigForAgent struct {
|
||||
}
|
||||
|
||||
func (c *Controller) CheckInAsAgent(
|
||||
ctx context.Context, cloudProvider string, req AgentCheckInRequest,
|
||||
ctx context.Context, orgId string, cloudProvider string, req AgentCheckInRequest,
|
||||
) (*AgentCheckInResponse, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
existingAccount, apiErr := c.accountsRepo.get(ctx, cloudProvider, req.AccountId)
|
||||
if existingAccount != nil && existingAccount.CloudAccountId != nil && *existingAccount.CloudAccountId != req.CloudAccountId {
|
||||
existingAccount, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, req.ID)
|
||||
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in with new %s account id %s for account %s with existing %s id %s",
|
||||
cloudProvider, req.CloudAccountId, existingAccount.Id, cloudProvider, *existingAccount.CloudAccountId,
|
||||
cloudProvider, req.AccountID, existingAccount.ID.StringValue(), cloudProvider, *existingAccount.AccountID,
|
||||
))
|
||||
}
|
||||
|
||||
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, cloudProvider, req.CloudAccountId)
|
||||
if existingAccount != nil && existingAccount.Id != req.AccountId {
|
||||
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, orgId, cloudProvider, req.AccountID)
|
||||
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in to %s account %s with id %s. already connected with id %s",
|
||||
cloudProvider, req.CloudAccountId, req.AccountId, existingAccount.Id,
|
||||
cloudProvider, req.AccountID, req.ID, existingAccount.ID.StringValue(),
|
||||
))
|
||||
}
|
||||
|
||||
agentReport := AgentReport{
|
||||
agentReport := types.AgentReport{
|
||||
TimestampMillis: time.Now().UnixMilli(),
|
||||
Data: req.Data,
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.upsert(
|
||||
ctx, cloudProvider, &req.AccountId, nil, &req.CloudAccountId, &agentReport, nil,
|
||||
ctx, orgId, cloudProvider, &req.ID, nil, &req.AccountID, &agentReport, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
@@ -265,7 +258,7 @@ func (c *Controller) CheckInAsAgent(
|
||||
}
|
||||
|
||||
svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *account.CloudAccountId,
|
||||
ctx, orgId, account.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
@@ -298,54 +291,55 @@ func (c *Controller) CheckInAsAgent(
|
||||
}
|
||||
|
||||
return &AgentCheckInResponse{
|
||||
AccountId: account.Id,
|
||||
CloudAccountId: *account.CloudAccountId,
|
||||
AccountId: account.ID.StringValue(),
|
||||
CloudAccountId: *account.AccountID,
|
||||
RemovedAt: account.RemovedAt,
|
||||
IntegrationConfig: agentConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type UpdateAccountConfigRequest struct {
|
||||
Config AccountConfig `json:"config"`
|
||||
Config types.AccountConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateAccountConfig(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
cloudProvider string,
|
||||
accountId string,
|
||||
req UpdateAccountConfigRequest,
|
||||
) (*Account, *model.ApiError) {
|
||||
) (*types.Account, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecord, apiErr := c.accountsRepo.upsert(
|
||||
ctx, cloudProvider, &accountId, &req.Config, nil, nil, nil,
|
||||
ctx, orgId, cloudProvider, &accountId, &req.Config, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
}
|
||||
|
||||
account := accountRecord.account()
|
||||
account := accountRecord.Account()
|
||||
|
||||
return &account, nil
|
||||
}
|
||||
|
||||
func (c *Controller) DisconnectAccount(
|
||||
ctx context.Context, cloudProvider string, accountId string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
ctx context.Context, orgId string, cloudProvider string, accountId string,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.get(ctx, cloudProvider, accountId)
|
||||
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
}
|
||||
|
||||
tsNow := time.Now()
|
||||
account, apiErr = c.accountsRepo.upsert(
|
||||
ctx, cloudProvider, &accountId, nil, nil, nil, &tsNow,
|
||||
ctx, orgId, cloudProvider, &accountId, nil, nil, nil, &tsNow,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
@@ -360,6 +354,7 @@ type ListServicesResponse struct {
|
||||
|
||||
func (c *Controller) ListServices(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId *string,
|
||||
) (*ListServicesResponse, *model.ApiError) {
|
||||
@@ -373,10 +368,16 @@ func (c *Controller) ListServices(
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
|
||||
svcConfigs := map[string]*CloudServiceConfig{}
|
||||
svcConfigs := map[string]*types.CloudServiceConfig{}
|
||||
if cloudAccountId != nil {
|
||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't get active account")
|
||||
}
|
||||
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *cloudAccountId,
|
||||
ctx, orgID, activeAccount.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
@@ -400,6 +401,7 @@ func (c *Controller) ListServices(
|
||||
|
||||
func (c *Controller) GetServiceDetails(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
cloudAccountId *string,
|
||||
@@ -415,8 +417,16 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
|
||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't get active account")
|
||||
}
|
||||
|
||||
config, apiErr := c.serviceConfigRepo.get(
|
||||
ctx, cloudProvider, *cloudAccountId, serviceId,
|
||||
ctx, orgID, activeAccount.ID.StringValue(), serviceId,
|
||||
)
|
||||
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't fetch service config")
|
||||
@@ -425,15 +435,22 @@ func (c *Controller) GetServiceDetails(
|
||||
if config != nil {
|
||||
service.Config = config
|
||||
|
||||
enabled := false
|
||||
if config.Metrics != nil && config.Metrics.Enabled {
|
||||
// add links to service dashboards, making them clickable.
|
||||
for i, d := range service.Assets.Dashboards {
|
||||
dashboardUuid := c.dashboardUuid(
|
||||
cloudProvider, serviceId, d.Id,
|
||||
)
|
||||
enabled = true
|
||||
}
|
||||
|
||||
// add links to service dashboards, making them clickable.
|
||||
for i, d := range service.Assets.Dashboards {
|
||||
dashboardUuid := c.dashboardUuid(
|
||||
cloudProvider, serviceId, d.Id,
|
||||
)
|
||||
if enabled {
|
||||
service.Assets.Dashboards[i].Url = fmt.Sprintf(
|
||||
"/dashboard/%s", dashboardUuid,
|
||||
)
|
||||
} else {
|
||||
service.Assets.Dashboards[i].Url = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -443,17 +460,18 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
|
||||
type UpdateServiceConfigRequest struct {
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config types.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type UpdateServiceConfigResponse struct {
|
||||
Id string `json:"id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
Id string `json:"id"`
|
||||
Config types.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateServiceConfig(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
req UpdateServiceConfigRequest,
|
||||
@@ -465,7 +483,7 @@ func (c *Controller) UpdateServiceConfig(
|
||||
|
||||
// can only update config for a connected cloud account id
|
||||
_, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, cloudProvider, req.CloudAccountId,
|
||||
ctx, orgID, cloudProvider, req.CloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't find connected cloud account")
|
||||
@@ -478,7 +496,7 @@ func (c *Controller) UpdateServiceConfig(
|
||||
}
|
||||
|
||||
updatedConfig, apiErr := c.serviceConfigRepo.upsert(
|
||||
ctx, cloudProvider, req.CloudAccountId, serviceId, req.Config,
|
||||
ctx, orgID, cloudProvider, req.CloudAccountId, serviceId, req.Config,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't update service config")
|
||||
@@ -492,13 +510,13 @@ func (c *Controller) UpdateServiceConfig(
|
||||
|
||||
// All dashboards that are available based on cloud integrations configuration
|
||||
// across all cloud providers
|
||||
func (c *Controller) AvailableDashboards(ctx context.Context) (
|
||||
func (c *Controller) AvailableDashboards(ctx context.Context, orgId string) (
|
||||
[]types.Dashboard, *model.ApiError,
|
||||
) {
|
||||
allDashboards := []types.Dashboard{}
|
||||
|
||||
for _, provider := range []string{"aws"} {
|
||||
providerDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, provider)
|
||||
providerDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, provider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, fmt.Sprintf("couldn't get available dashboards for %s", provider),
|
||||
@@ -512,10 +530,10 @@ func (c *Controller) AvailableDashboards(ctx context.Context) (
|
||||
}
|
||||
|
||||
func (c *Controller) AvailableDashboardsForCloudProvider(
|
||||
ctx context.Context, cloudProvider string,
|
||||
ctx context.Context, orgID string, cloudProvider string,
|
||||
) ([]types.Dashboard, *model.ApiError) {
|
||||
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, cloudProvider)
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgID, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list connected cloud accounts")
|
||||
}
|
||||
@@ -524,9 +542,9 @@ func (c *Controller) AvailableDashboardsForCloudProvider(
|
||||
servicesWithAvailableMetrics := map[string]*time.Time{}
|
||||
|
||||
for _, ar := range accountRecords {
|
||||
if ar.CloudAccountId != nil {
|
||||
if ar.AccountID != nil {
|
||||
configsBySvcId, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *ar.CloudAccountId,
|
||||
ctx, orgID, ar.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
@@ -574,6 +592,7 @@ func (c *Controller) AvailableDashboardsForCloudProvider(
|
||||
}
|
||||
func (c *Controller) GetDashboardById(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
dashboardUuid string,
|
||||
) (*types.Dashboard, *model.ApiError) {
|
||||
cloudProvider, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
|
||||
@@ -581,7 +600,7 @@ func (c *Controller) GetDashboardById(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
allDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, cloudProvider)
|
||||
allDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, fmt.Sprintf("couldn't list available dashboards"),
|
||||
|
||||
@@ -4,23 +4,30 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
sqlStore, _ := utils.NewTestSqliteDB(t)
|
||||
sqlStore := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(sqlStore)
|
||||
require.NoError(err)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
require.Nil(apiErr)
|
||||
|
||||
// should be able to generate connection url for
|
||||
// same account id again with updated config
|
||||
testAccountConfig1 := AccountConfig{EnabledRegions: []string{"us-east-1", "us-west-1"}}
|
||||
testAccountConfig1 := types.AccountConfig{EnabledRegions: []string{"us-east-1", "us-west-1"}}
|
||||
resp1, apiErr := controller.GenerateConnectionUrl(
|
||||
context.TODO(), "aws", GenerateConnectionUrlRequest{
|
||||
context.TODO(), user.OrgID, "aws", GenerateConnectionUrlRequest{
|
||||
AccountConfig: testAccountConfig1,
|
||||
AgentConfig: SigNozAgentConfig{Region: "us-east-2"},
|
||||
},
|
||||
@@ -31,14 +38,14 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
|
||||
testAccountId := resp1.AccountId
|
||||
account, apiErr := controller.accountsRepo.get(
|
||||
context.TODO(), "aws", testAccountId,
|
||||
context.TODO(), user.OrgID, "aws", testAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountConfig1, *account.Config)
|
||||
|
||||
testAccountConfig2 := AccountConfig{EnabledRegions: []string{"us-east-2", "us-west-2"}}
|
||||
testAccountConfig2 := types.AccountConfig{EnabledRegions: []string{"us-east-2", "us-west-2"}}
|
||||
resp2, apiErr := controller.GenerateConnectionUrl(
|
||||
context.TODO(), "aws", GenerateConnectionUrlRequest{
|
||||
context.TODO(), user.OrgID, "aws", GenerateConnectionUrlRequest{
|
||||
AccountId: &testAccountId,
|
||||
AccountConfig: testAccountConfig2,
|
||||
AgentConfig: SigNozAgentConfig{Region: "us-east-2"},
|
||||
@@ -48,7 +55,7 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
require.Equal(testAccountId, resp2.AccountId)
|
||||
|
||||
account, apiErr = controller.accountsRepo.get(
|
||||
context.TODO(), "aws", testAccountId,
|
||||
context.TODO(), user.OrgID, "aws", testAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountConfig2, *account.Config)
|
||||
@@ -56,18 +63,21 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
|
||||
func TestAgentCheckIns(t *testing.T) {
|
||||
require := require.New(t)
|
||||
sqlStore, _ := utils.NewTestSqliteDB(t)
|
||||
sqlStore := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(sqlStore)
|
||||
require.NoError(err)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
require.Nil(apiErr)
|
||||
|
||||
// An agent should be able to check in from a cloud account even
|
||||
// if no connection url was requested (no account with agent's account id exists)
|
||||
testAccountId1 := uuid.NewString()
|
||||
testCloudAccountId1 := "546311234"
|
||||
resp1, apiErr := controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId1,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId1,
|
||||
AccountID: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -78,9 +88,9 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
// cloud account id for the same account.
|
||||
testCloudAccountId2 := "99999999"
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId1,
|
||||
CloudAccountId: testCloudAccountId2,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId1,
|
||||
AccountID: testCloudAccountId2,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
@@ -90,18 +100,18 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
// i.e. there can't be 2 connected account records for the same cloud account id
|
||||
// at any point in time.
|
||||
existingConnected, apiErr := controller.accountsRepo.getConnectedCloudAccount(
|
||||
context.TODO(), "aws", testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", testCloudAccountId1,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.NotNil(existingConnected)
|
||||
require.Equal(testCloudAccountId1, *existingConnected.CloudAccountId)
|
||||
require.Equal(testCloudAccountId1, *existingConnected.AccountID)
|
||||
require.Nil(existingConnected.RemovedAt)
|
||||
|
||||
testAccountId2 := uuid.NewString()
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId2,
|
||||
AccountID: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
@@ -109,29 +119,29 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
// After disconnecting existing account record, the agent should be able to
|
||||
// connected for a particular cloud account id
|
||||
_, apiErr = controller.DisconnectAccount(
|
||||
context.TODO(), "aws", testAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", testAccountId1,
|
||||
)
|
||||
|
||||
existingConnected, apiErr = controller.accountsRepo.getConnectedCloudAccount(
|
||||
context.TODO(), "aws", testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", testCloudAccountId1,
|
||||
)
|
||||
require.Nil(existingConnected)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId2,
|
||||
AccountID: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
// should be able to keep checking in
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId2,
|
||||
AccountID: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -139,13 +149,16 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
|
||||
func TestCantDisconnectNonExistentAccount(t *testing.T) {
|
||||
require := require.New(t)
|
||||
sqlStore, _ := utils.NewTestSqliteDB(t)
|
||||
sqlStore := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(sqlStore)
|
||||
require.NoError(err)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
require.Nil(apiErr)
|
||||
|
||||
// Attempting to disconnect a non-existent account should return error
|
||||
account, apiErr := controller.DisconnectAccount(
|
||||
context.TODO(), "aws", uuid.NewString(),
|
||||
context.TODO(), user.OrgID, "aws", uuid.NewString(),
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
@@ -154,15 +167,23 @@ func TestCantDisconnectNonExistentAccount(t *testing.T) {
|
||||
|
||||
func TestConfigureService(t *testing.T) {
|
||||
require := require.New(t)
|
||||
sqlStore, _ := utils.NewTestSqliteDB(t)
|
||||
sqlStore := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(sqlStore)
|
||||
require.NoError(err)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
require.Nil(apiErr)
|
||||
|
||||
// create a connected account
|
||||
testCloudAccountId := "546311234"
|
||||
testConnectedAccount := makeTestConnectedAccount(t, user.OrgID, controller, testCloudAccountId)
|
||||
require.Nil(testConnectedAccount.RemovedAt)
|
||||
require.NotEmpty(testConnectedAccount.AccountID)
|
||||
require.Equal(testCloudAccountId, *testConnectedAccount.AccountID)
|
||||
|
||||
// should start out without any service config
|
||||
svcListResp, apiErr := controller.ListServices(
|
||||
context.TODO(), "aws", &testCloudAccountId,
|
||||
context.TODO(), user.OrgID, "aws", &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
@@ -170,25 +191,20 @@ func TestConfigureService(t *testing.T) {
|
||||
require.Nil(svcListResp.Services[0].Config)
|
||||
|
||||
svcDetails, apiErr := controller.GetServiceDetails(
|
||||
context.TODO(), "aws", testSvcId, &testCloudAccountId,
|
||||
context.TODO(), user.OrgID, "aws", testSvcId, &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testSvcId, svcDetails.Id)
|
||||
require.Nil(svcDetails.Config)
|
||||
|
||||
// should be able to configure a service for a connected account
|
||||
testConnectedAccount := makeTestConnectedAccount(t, controller, testCloudAccountId)
|
||||
require.Nil(testConnectedAccount.RemovedAt)
|
||||
require.NotNil(testConnectedAccount.CloudAccountId)
|
||||
require.Equal(testCloudAccountId, *testConnectedAccount.CloudAccountId)
|
||||
|
||||
testSvcConfig := CloudServiceConfig{
|
||||
Metrics: &CloudServiceMetricsConfig{
|
||||
testSvcConfig := types.CloudServiceConfig{
|
||||
Metrics: &types.CloudServiceMetricsConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
updateSvcConfigResp, apiErr := controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId, UpdateServiceConfigRequest{
|
||||
context.TODO(), user.OrgID, "aws", testSvcId, UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
},
|
||||
@@ -198,14 +214,14 @@ func TestConfigureService(t *testing.T) {
|
||||
require.Equal(testSvcConfig, updateSvcConfigResp.Config)
|
||||
|
||||
svcDetails, apiErr = controller.GetServiceDetails(
|
||||
context.TODO(), "aws", testSvcId, &testCloudAccountId,
|
||||
context.TODO(), user.OrgID, "aws", testSvcId, &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testSvcId, svcDetails.Id)
|
||||
require.Equal(testSvcConfig, *svcDetails.Config)
|
||||
|
||||
svcListResp, apiErr = controller.ListServices(
|
||||
context.TODO(), "aws", &testCloudAccountId,
|
||||
context.TODO(), user.OrgID, "aws", &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
for _, svc := range svcListResp.Services {
|
||||
@@ -216,12 +232,12 @@ func TestConfigureService(t *testing.T) {
|
||||
|
||||
// should not be able to configure service after cloud account has been disconnected
|
||||
_, apiErr = controller.DisconnectAccount(
|
||||
context.TODO(), "aws", testConnectedAccount.Id,
|
||||
context.TODO(), user.OrgID, "aws", testConnectedAccount.ID.StringValue(),
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId,
|
||||
context.TODO(), user.OrgID, "aws", testSvcId,
|
||||
UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
@@ -231,7 +247,7 @@ func TestConfigureService(t *testing.T) {
|
||||
|
||||
// should not be able to configure a service for a cloud account id that is not connected yet
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId,
|
||||
context.TODO(), user.OrgID, "aws", testSvcId,
|
||||
UpdateServiceConfigRequest{
|
||||
CloudAccountId: "9999999999",
|
||||
Config: testSvcConfig,
|
||||
@@ -241,7 +257,7 @@ func TestConfigureService(t *testing.T) {
|
||||
|
||||
// should not be able to set config for an unsupported service
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", "bad-service", UpdateServiceConfigRequest{
|
||||
context.TODO(), user.OrgID, "aws", "bad-service", UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
},
|
||||
@@ -250,22 +266,54 @@ func TestConfigureService(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func makeTestConnectedAccount(t *testing.T, controller *Controller, cloudAccountId string) *AccountRecord {
|
||||
func makeTestConnectedAccount(t *testing.T, orgId string, controller *Controller, cloudAccountId string) *types.CloudIntegration {
|
||||
require := require.New(t)
|
||||
|
||||
// a check in from SigNoz agent creates or updates a connected account.
|
||||
testAccountId := uuid.NewString()
|
||||
resp, apiErr := controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: cloudAccountId,
|
||||
context.TODO(), orgId, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId,
|
||||
AccountID: cloudAccountId,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountId, resp.AccountId)
|
||||
require.Equal(cloudAccountId, resp.CloudAccountId)
|
||||
|
||||
acc, err := controller.accountsRepo.get(context.TODO(), "aws", resp.AccountId)
|
||||
acc, err := controller.accountsRepo.get(context.TODO(), orgId, "aws", resp.AccountId)
|
||||
require.Nil(err)
|
||||
return acc
|
||||
}
|
||||
|
||||
func createTestUser() (*types.User, *model.ApiError) {
|
||||
// Create a test user for auth
|
||||
ctx := context.Background()
|
||||
org, apiErr := dao.DB().CreateOrg(ctx, &types.Organization{
|
||||
Name: "test",
|
||||
})
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
group, apiErr := dao.DB().GetGroupByName(ctx, constants.AdminGroup)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
auth.InitAuthCache(ctx)
|
||||
|
||||
userId := uuid.NewString()
|
||||
return dao.DB().CreateUser(
|
||||
ctx,
|
||||
&types.User{
|
||||
ID: userId,
|
||||
Name: "test",
|
||||
Email: userId[:8] + "test@test.com",
|
||||
Password: "test",
|
||||
OrgID: org.ID,
|
||||
GroupID: group.ID,
|
||||
},
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,123 +1,11 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
// Represents a cloud provider account for cloud integrations
|
||||
type AccountRecord struct {
|
||||
CloudProvider string `json:"cloud_provider" db:"cloud_provider"`
|
||||
Id string `json:"id" db:"id"`
|
||||
Config *AccountConfig `json:"config" db:"config_json"`
|
||||
CloudAccountId *string `json:"cloud_account_id" db:"cloud_account_id"`
|
||||
LastAgentReport *AgentReport `json:"last_agent_report" db:"last_agent_report_json"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
RemovedAt *time.Time `json:"removed_at" db:"removed_at"`
|
||||
}
|
||||
|
||||
type AccountConfig struct {
|
||||
EnabledRegions []string `json:"regions"`
|
||||
}
|
||||
|
||||
func DefaultAccountConfig() AccountConfig {
|
||||
return AccountConfig{
|
||||
EnabledRegions: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *AccountConfig) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *AccountConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud account config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AgentReport struct {
|
||||
TimestampMillis int64 `json:"timestamp_millis"`
|
||||
Data map[string]any `json:"data"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (r *AgentReport) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &r)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (r *AgentReport) Value() (driver.Value, error) {
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize agent report to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
Integration AccountIntegrationStatus `json:"integration"`
|
||||
}
|
||||
|
||||
type AccountIntegrationStatus struct {
|
||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||
}
|
||||
|
||||
func (a *AccountRecord) status() AccountStatus {
|
||||
status := AccountStatus{}
|
||||
if a.LastAgentReport != nil {
|
||||
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
||||
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (a *AccountRecord) account() Account {
|
||||
ca := Account{Id: a.Id, Status: a.status()}
|
||||
|
||||
if a.CloudAccountId != nil {
|
||||
ca.CloudAccountId = *a.CloudAccountId
|
||||
}
|
||||
|
||||
if a.Config != nil {
|
||||
ca.Config = *a.Config
|
||||
} else {
|
||||
ca.Config = DefaultAccountConfig()
|
||||
}
|
||||
|
||||
return ca
|
||||
}
|
||||
|
||||
type CloudServiceSummary struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
@@ -125,7 +13,7 @@ type CloudServiceSummary struct {
|
||||
|
||||
// Present only if the service has been configured in the
|
||||
// context of a cloud provider account.
|
||||
Config *CloudServiceConfig `json:"config,omitempty"`
|
||||
Config *types.CloudServiceConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
type CloudServiceDetails struct {
|
||||
@@ -144,44 +32,6 @@ type CloudServiceDetails struct {
|
||||
TelemetryCollectionStrategy *CloudTelemetryCollectionStrategy `json:"telemetry_collection_strategy"`
|
||||
}
|
||||
|
||||
type CloudServiceConfig struct {
|
||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *CloudServiceConfig) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud service config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudServiceLogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceAssets struct {
|
||||
Dashboards []CloudServiceDashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
@@ -4,161 +4,161 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type serviceConfigRepository interface {
|
||||
get(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
serviceType string,
|
||||
) (*types.CloudServiceConfig, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
config types.CloudServiceConfig,
|
||||
) (*types.CloudServiceConfig, *model.ApiError)
|
||||
|
||||
getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
) (
|
||||
configsBySvcId map[string]*CloudServiceConfig,
|
||||
configsBySvcId map[string]*types.CloudServiceConfig,
|
||||
apiErr *model.ApiError,
|
||||
)
|
||||
}
|
||||
|
||||
func newServiceConfigRepository(db *sqlx.DB) (
|
||||
func newServiceConfigRepository(store sqlstore.SQLStore) (
|
||||
*serviceConfigSQLRepository, error,
|
||||
) {
|
||||
return &serviceConfigSQLRepository{
|
||||
db: db,
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type serviceConfigSQLRepository struct {
|
||||
db *sqlx.DB
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) get(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
serviceType string,
|
||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
var result CloudServiceConfig
|
||||
var result types.CloudIntegrationService
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
config_json
|
||||
from cloud_integrations_service_configs
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
and service_id=$3
|
||||
`,
|
||||
cloudProvider, cloudAccountId, serviceId,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
Join("JOIN cloud_integration ci ON ci.id = cis.cloud_integration_id").
|
||||
Where("ci.org_id = ?", orgID).
|
||||
Where("ci.id = ?", cloudAccountId).
|
||||
Where("cis.type = ?", serviceType).
|
||||
Scan(ctx)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find %s %s config for %s",
|
||||
cloudProvider, serviceId, cloudAccountId,
|
||||
"couldn't find config for cloud account %s",
|
||||
cloudAccountId,
|
||||
))
|
||||
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud service config: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
return &result.Config, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) upsert(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
config types.CloudServiceConfig,
|
||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
query := `
|
||||
INSERT INTO cloud_integrations_service_configs (
|
||||
cloud_provider,
|
||||
cloud_account_id,
|
||||
service_id,
|
||||
config_json
|
||||
) values ($1, $2, $3, $4)
|
||||
on conflict(cloud_provider, cloud_account_id, service_id)
|
||||
do update set config_json=excluded.config_json
|
||||
`
|
||||
_, dbErr := r.db.ExecContext(
|
||||
ctx, query,
|
||||
cloudProvider, cloudAccountId, serviceId, &config,
|
||||
)
|
||||
if dbErr != nil {
|
||||
// get cloud integration id from account id
|
||||
// if the account is not connected, we don't need to upsert the config
|
||||
var cloudIntegrationId string
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model((*types.CloudIntegration)(nil)).
|
||||
Column("id").
|
||||
Where("provider = ?", cloudProvider).
|
||||
Where("account_id = ?", cloudAccountId).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("removed_at is NULL").
|
||||
Where("last_agent_report is not NULL").
|
||||
Scan(ctx, &cloudIntegrationId)
|
||||
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not upsert cloud service config: %w", dbErr,
|
||||
"couldn't query cloud integration id: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
upsertedConfig, apiErr := r.get(ctx, cloudProvider, cloudAccountId, serviceId)
|
||||
if apiErr != nil {
|
||||
serviceConfig := types.CloudIntegrationService{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
Config: config,
|
||||
Type: serviceId,
|
||||
CloudIntegrationID: cloudIntegrationId,
|
||||
}
|
||||
_, err = r.store.BunDB().NewInsert().
|
||||
Model(&serviceConfig).
|
||||
On("conflict(cloud_integration_id, type) do update set config=excluded.config, updated_at=excluded.updated_at").
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't fetch upserted service config: %w", apiErr.ToError(),
|
||||
"could not upsert cloud service config: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return upsertedConfig, nil
|
||||
return &serviceConfig.Config, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
) (map[string]*CloudServiceConfig, *model.ApiError) {
|
||||
) (map[string]*types.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
type ScannedServiceConfigRecord struct {
|
||||
ServiceId string `db:"service_id"`
|
||||
Config CloudServiceConfig `db:"config_json"`
|
||||
}
|
||||
serviceConfigs := []types.CloudIntegrationService{}
|
||||
|
||||
records := []ScannedServiceConfigRecord{}
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &records, `
|
||||
select
|
||||
service_id,
|
||||
config_json
|
||||
from cloud_integrations_service_configs
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
`,
|
||||
cloudProvider, cloudAccountId,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&serviceConfigs).
|
||||
Join("JOIN cloud_integration ci ON ci.id = cis.cloud_integration_id").
|
||||
Where("ci.id = ?", cloudAccountId).
|
||||
Where("ci.org_id = ?", orgID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query service configs from db: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]*CloudServiceConfig{}
|
||||
result := map[string]*types.CloudServiceConfig{}
|
||||
|
||||
for _, r := range records {
|
||||
result[r.ServiceId] = &r.Config
|
||||
for _, r := range serviceConfigs {
|
||||
result[r.Type] = &r.Config
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -37,7 +38,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/explorer"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/inframetrics"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations/thirdPartyApi"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
|
||||
@@ -1082,14 +1082,14 @@ func (aH *APIHandler) getDashboards(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
ic := aH.IntegrationsController
|
||||
installedIntegrationDashboards, err := ic.GetDashboardsForInstalledIntegrations(r.Context())
|
||||
installedIntegrationDashboards, err := ic.GetDashboardsForInstalledIntegrations(r.Context(), claims.OrgID)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to get dashboards for installed integrations", zap.Error(err))
|
||||
} else {
|
||||
allDashboards = append(allDashboards, installedIntegrationDashboards...)
|
||||
}
|
||||
|
||||
cloudIntegrationDashboards, err := aH.CloudIntegrationsController.AvailableDashboards(r.Context())
|
||||
cloudIntegrationDashboards, err := aH.CloudIntegrationsController.AvailableDashboards(r.Context(), claims.OrgID)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to get cloud dashboards", zap.Error(err))
|
||||
} else {
|
||||
@@ -1267,7 +1267,7 @@ func (aH *APIHandler) getDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if aH.CloudIntegrationsController.IsCloudIntegrationDashboardUuid(uuid) {
|
||||
dashboard, apiError = aH.CloudIntegrationsController.GetDashboardById(
|
||||
r.Context(), uuid,
|
||||
r.Context(), claims.OrgID, uuid,
|
||||
)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
@@ -1276,7 +1276,7 @@ func (aH *APIHandler) getDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
} else {
|
||||
dashboard, apiError = aH.IntegrationsController.GetInstalledIntegrationDashboardById(
|
||||
r.Context(), uuid,
|
||||
r.Context(), claims.OrgID, uuid,
|
||||
)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
@@ -2207,6 +2207,11 @@ func (aH *APIHandler) editUser(w http.ResponseWriter, r *http.Request) {
|
||||
old.ProfilePictureURL = update.ProfilePictureURL
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(old.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
_, apiErr = dao.DB().EditUser(ctx, &types.User{
|
||||
ID: old.ID,
|
||||
Name: old.Name,
|
||||
@@ -2238,6 +2243,11 @@ func (aH *APIHandler) deleteUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
if user == nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorNotFound,
|
||||
@@ -3497,9 +3507,14 @@ func (aH *APIHandler) ListIntegrations(
|
||||
for k, values := range r.URL.Query() {
|
||||
params[k] = values[0]
|
||||
}
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.IntegrationsController.ListIntegrations(
|
||||
r.Context(), params,
|
||||
r.Context(), claims.OrgID, params,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "Failed to fetch integrations")
|
||||
@@ -3512,8 +3527,13 @@ func (aH *APIHandler) GetIntegration(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
integrationId := mux.Vars(r)["integrationId"]
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
integration, apiErr := aH.IntegrationsController.GetIntegration(
|
||||
r.Context(), integrationId,
|
||||
r.Context(), claims.OrgID, integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "Failed to fetch integration details")
|
||||
@@ -3527,8 +3547,13 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
integrationId := mux.Vars(r)["integrationId"]
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
isInstalled, apiErr := aH.IntegrationsController.IsIntegrationInstalled(
|
||||
r.Context(), integrationId,
|
||||
r.Context(), claims.OrgID, integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "failed to check if integration is installed")
|
||||
@@ -3542,7 +3567,7 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(
|
||||
}
|
||||
|
||||
connectionTests, apiErr := aH.IntegrationsController.GetIntegrationConnectionTests(
|
||||
r.Context(), integrationId,
|
||||
r.Context(), claims.OrgID, integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "failed to fetch integration connection tests")
|
||||
@@ -3741,8 +3766,14 @@ func (aH *APIHandler) InstallIntegration(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
integration, apiErr := aH.IntegrationsController.Install(
|
||||
r.Context(), &req,
|
||||
r.Context(), claims.OrgID, &req,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
@@ -3763,7 +3794,13 @@ func (aH *APIHandler) UninstallIntegration(
|
||||
return
|
||||
}
|
||||
|
||||
apiErr := aH.IntegrationsController.Uninstall(r.Context(), &req)
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
apiErr := aH.IntegrationsController.Uninstall(r.Context(), claims.OrgID, &req)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
@@ -3819,8 +3856,14 @@ func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.ListConnectedAccounts(
|
||||
r.Context(), cloudProvider,
|
||||
r.Context(), claims.OrgID, cloudProvider,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3841,8 +3884,14 @@ func (aH *APIHandler) CloudIntegrationsGenerateConnectionUrl(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.GenerateConnectionUrl(
|
||||
r.Context(), cloudProvider, req,
|
||||
r.Context(), claims.OrgID, cloudProvider, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3859,8 +3908,14 @@ func (aH *APIHandler) CloudIntegrationsGetAccountStatus(
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.GetAccountStatus(
|
||||
r.Context(), cloudProvider, accountId,
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3881,8 +3936,14 @@ func (aH *APIHandler) CloudIntegrationsAgentCheckIn(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.CheckInAsAgent(
|
||||
r.Context(), cloudProvider, req,
|
||||
r.Context(), claims.OrgID, cloudProvider, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3905,8 +3966,14 @@ func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.UpdateAccountConfig(
|
||||
r.Context(), cloudProvider, accountId, req,
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3923,8 +3990,14 @@ func (aH *APIHandler) CloudIntegrationsDisconnectAccount(
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.DisconnectAccount(
|
||||
r.Context(), cloudProvider, accountId,
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3947,8 +4020,14 @@ func (aH *APIHandler) CloudIntegrationsListServices(
|
||||
cloudAccountId = &cloudAccountIdQP
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.ListServices(
|
||||
r.Context(), cloudProvider, cloudAccountId,
|
||||
r.Context(), claims.OrgID, cloudProvider, cloudAccountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3971,8 +4050,14 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
|
||||
cloudAccountId = &cloudAccountIdQP
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.GetServiceDetails(
|
||||
r.Context(), cloudProvider, serviceId, cloudAccountId,
|
||||
r.Context(), claims.OrgID, cloudProvider, serviceId, cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
@@ -4211,8 +4296,14 @@ func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.UpdateServiceConfig(
|
||||
r.Context(), cloudProvider, serviceId, req,
|
||||
r.Context(), claims.OrgID, cloudProvider, serviceId, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
|
||||
@@ -18,7 +18,7 @@ type Controller struct {
|
||||
func NewController(sqlStore sqlstore.SQLStore) (
|
||||
*Controller, error,
|
||||
) {
|
||||
mgr, err := NewManager(sqlStore.SQLxDB())
|
||||
mgr, err := NewManager(sqlStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create integrations manager: %w", err)
|
||||
}
|
||||
@@ -35,7 +35,7 @@ type IntegrationsListResponse struct {
|
||||
}
|
||||
|
||||
func (c *Controller) ListIntegrations(
|
||||
ctx context.Context, params map[string]string,
|
||||
ctx context.Context, orgId string, params map[string]string,
|
||||
) (
|
||||
*IntegrationsListResponse, *model.ApiError,
|
||||
) {
|
||||
@@ -47,7 +47,7 @@ func (c *Controller) ListIntegrations(
|
||||
}
|
||||
}
|
||||
|
||||
integrations, apiErr := c.mgr.ListIntegrations(ctx, filters)
|
||||
integrations, apiErr := c.mgr.ListIntegrations(ctx, orgId, filters)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -58,16 +58,15 @@ func (c *Controller) ListIntegrations(
|
||||
}
|
||||
|
||||
func (c *Controller) GetIntegration(
|
||||
ctx context.Context, integrationId string,
|
||||
ctx context.Context, orgId string, integrationId string,
|
||||
) (*Integration, *model.ApiError) {
|
||||
return c.mgr.GetIntegration(ctx, integrationId)
|
||||
return c.mgr.GetIntegration(ctx, orgId, integrationId)
|
||||
}
|
||||
|
||||
func (c *Controller) IsIntegrationInstalled(
|
||||
ctx context.Context,
|
||||
integrationId string,
|
||||
ctx context.Context, orgId string, integrationId string,
|
||||
) (bool, *model.ApiError) {
|
||||
installation, apiErr := c.mgr.getInstalledIntegration(ctx, integrationId)
|
||||
installation, apiErr := c.mgr.getInstalledIntegration(ctx, orgId, integrationId)
|
||||
if apiErr != nil {
|
||||
return false, apiErr
|
||||
}
|
||||
@@ -76,9 +75,9 @@ func (c *Controller) IsIntegrationInstalled(
|
||||
}
|
||||
|
||||
func (c *Controller) GetIntegrationConnectionTests(
|
||||
ctx context.Context, integrationId string,
|
||||
ctx context.Context, orgId string, integrationId string,
|
||||
) (*IntegrationConnectionTests, *model.ApiError) {
|
||||
return c.mgr.GetIntegrationConnectionTests(ctx, integrationId)
|
||||
return c.mgr.GetIntegrationConnectionTests(ctx, orgId, integrationId)
|
||||
}
|
||||
|
||||
type InstallIntegrationRequest struct {
|
||||
@@ -87,10 +86,10 @@ type InstallIntegrationRequest struct {
|
||||
}
|
||||
|
||||
func (c *Controller) Install(
|
||||
ctx context.Context, req *InstallIntegrationRequest,
|
||||
ctx context.Context, orgId string, req *InstallIntegrationRequest,
|
||||
) (*IntegrationsListItem, *model.ApiError) {
|
||||
res, apiErr := c.mgr.InstallIntegration(
|
||||
ctx, req.IntegrationId, req.Config,
|
||||
ctx, orgId, req.IntegrationId, req.Config,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
@@ -104,7 +103,7 @@ type UninstallIntegrationRequest struct {
|
||||
}
|
||||
|
||||
func (c *Controller) Uninstall(
|
||||
ctx context.Context, req *UninstallIntegrationRequest,
|
||||
ctx context.Context, orgId string, req *UninstallIntegrationRequest,
|
||||
) *model.ApiError {
|
||||
if len(req.IntegrationId) < 1 {
|
||||
return model.BadRequest(fmt.Errorf(
|
||||
@@ -113,7 +112,7 @@ func (c *Controller) Uninstall(
|
||||
}
|
||||
|
||||
apiErr := c.mgr.UninstallIntegration(
|
||||
ctx, req.IntegrationId,
|
||||
ctx, orgId, req.IntegrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return apiErr
|
||||
@@ -123,19 +122,19 @@ func (c *Controller) Uninstall(
|
||||
}
|
||||
|
||||
func (c *Controller) GetPipelinesForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
ctx context.Context, orgId string,
|
||||
) ([]pipelinetypes.GettablePipeline, *model.ApiError) {
|
||||
return c.mgr.GetPipelinesForInstalledIntegrations(ctx)
|
||||
return c.mgr.GetPipelinesForInstalledIntegrations(ctx, orgId)
|
||||
}
|
||||
|
||||
func (c *Controller) GetDashboardsForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
ctx context.Context, orgId string,
|
||||
) ([]types.Dashboard, *model.ApiError) {
|
||||
return c.mgr.GetDashboardsForInstalledIntegrations(ctx)
|
||||
return c.mgr.GetDashboardsForInstalledIntegrations(ctx, orgId)
|
||||
}
|
||||
|
||||
func (c *Controller) GetInstalledIntegrationDashboardById(
|
||||
ctx context.Context, dashboardUuid string,
|
||||
ctx context.Context, orgId string, dashboardUuid string,
|
||||
) (*types.Dashboard, *model.ApiError) {
|
||||
return c.mgr.GetInstalledIntegrationDashboardById(ctx, dashboardUuid)
|
||||
return c.mgr.GetInstalledIntegrationDashboardById(ctx, orgId, dashboardUuid)
|
||||
}
|
||||
|
||||
@@ -5,15 +5,14 @@ import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type IntegrationAuthor struct {
|
||||
@@ -105,16 +104,9 @@ type IntegrationsListItem struct {
|
||||
IsInstalled bool `json:"is_installed"`
|
||||
}
|
||||
|
||||
type InstalledIntegration struct {
|
||||
IntegrationId string `json:"integration_id" db:"integration_id"`
|
||||
Config InstalledIntegrationConfig `json:"config_json" db:"config_json"`
|
||||
InstalledAt time.Time `json:"installed_at" db:"installed_at"`
|
||||
}
|
||||
type InstalledIntegrationConfig map[string]interface{}
|
||||
|
||||
type Integration struct {
|
||||
IntegrationDetails
|
||||
Installation *InstalledIntegration `json:"installation"`
|
||||
Installation *types.InstalledIntegration `json:"installation"`
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
@@ -122,8 +114,8 @@ type Manager struct {
|
||||
installedIntegrationsRepo InstalledIntegrationsRepo
|
||||
}
|
||||
|
||||
func NewManager(db *sqlx.DB) (*Manager, error) {
|
||||
iiRepo, err := NewInstalledIntegrationsSqliteRepo(db)
|
||||
func NewManager(store sqlstore.SQLStore) (*Manager, error) {
|
||||
iiRepo, err := NewInstalledIntegrationsSqliteRepo(store)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"could not init sqlite DB for installed integrations: %w", err,
|
||||
@@ -142,6 +134,7 @@ type IntegrationsFilter struct {
|
||||
|
||||
func (m *Manager) ListIntegrations(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
filter *IntegrationsFilter,
|
||||
// Expected to have pagination over time.
|
||||
) ([]IntegrationsListItem, *model.ApiError) {
|
||||
@@ -152,22 +145,22 @@ func (m *Manager) ListIntegrations(
|
||||
)
|
||||
}
|
||||
|
||||
installed, apiErr := m.installedIntegrationsRepo.list(ctx)
|
||||
installed, apiErr := m.installedIntegrationsRepo.list(ctx, orgId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "could not fetch installed integrations",
|
||||
)
|
||||
}
|
||||
installedIds := []string{}
|
||||
installedTypes := []string{}
|
||||
for _, ii := range installed {
|
||||
installedIds = append(installedIds, ii.IntegrationId)
|
||||
installedTypes = append(installedTypes, ii.Type)
|
||||
}
|
||||
|
||||
result := []IntegrationsListItem{}
|
||||
for _, ai := range available {
|
||||
result = append(result, IntegrationsListItem{
|
||||
IntegrationSummary: ai.IntegrationSummary,
|
||||
IsInstalled: slices.Contains(installedIds, ai.Id),
|
||||
IsInstalled: slices.Contains(installedTypes, ai.Id),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -188,6 +181,7 @@ func (m *Manager) ListIntegrations(
|
||||
|
||||
func (m *Manager) GetIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) (*Integration, *model.ApiError) {
|
||||
integrationDetails, apiErr := m.getIntegrationDetails(
|
||||
@@ -198,7 +192,7 @@ func (m *Manager) GetIntegration(
|
||||
}
|
||||
|
||||
installation, apiErr := m.getInstalledIntegration(
|
||||
ctx, integrationId,
|
||||
ctx, orgId, integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
@@ -212,6 +206,7 @@ func (m *Manager) GetIntegration(
|
||||
|
||||
func (m *Manager) GetIntegrationConnectionTests(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) (*IntegrationConnectionTests, *model.ApiError) {
|
||||
integrationDetails, apiErr := m.getIntegrationDetails(
|
||||
@@ -225,8 +220,9 @@ func (m *Manager) GetIntegrationConnectionTests(
|
||||
|
||||
func (m *Manager) InstallIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
config InstalledIntegrationConfig,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*IntegrationsListItem, *model.ApiError) {
|
||||
integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId)
|
||||
if apiErr != nil {
|
||||
@@ -234,7 +230,7 @@ func (m *Manager) InstallIntegration(
|
||||
}
|
||||
|
||||
_, apiErr = m.installedIntegrationsRepo.upsert(
|
||||
ctx, integrationId, config,
|
||||
ctx, orgId, integrationId, config,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
@@ -250,15 +246,17 @@ func (m *Manager) InstallIntegration(
|
||||
|
||||
func (m *Manager) UninstallIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) *model.ApiError {
|
||||
return m.installedIntegrationsRepo.delete(ctx, integrationId)
|
||||
return m.installedIntegrationsRepo.delete(ctx, orgId, integrationId)
|
||||
}
|
||||
|
||||
func (m *Manager) GetPipelinesForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
) ([]pipelinetypes.GettablePipeline, *model.ApiError) {
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx)
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx, orgId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -308,6 +306,7 @@ func (m *Manager) parseDashboardUuid(dashboardUuid string) (
|
||||
|
||||
func (m *Manager) GetInstalledIntegrationDashboardById(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
dashboardUuid string,
|
||||
) (*types.Dashboard, *model.ApiError) {
|
||||
integrationId, dashboardId, apiErr := m.parseDashboardUuid(dashboardUuid)
|
||||
@@ -315,7 +314,7 @@ func (m *Manager) GetInstalledIntegrationDashboardById(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
integration, apiErr := m.GetIntegration(ctx, integrationId)
|
||||
integration, apiErr := m.GetIntegration(ctx, orgId, integrationId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -355,8 +354,9 @@ func (m *Manager) GetInstalledIntegrationDashboardById(
|
||||
|
||||
func (m *Manager) GetDashboardsForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
) ([]types.Dashboard, *model.ApiError) {
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx)
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx, orgId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -421,10 +421,11 @@ func (m *Manager) getIntegrationDetails(
|
||||
|
||||
func (m *Manager) getInstalledIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) (*InstalledIntegration, *model.ApiError) {
|
||||
) (*types.InstalledIntegration, *model.ApiError) {
|
||||
iis, apiErr := m.installedIntegrationsRepo.get(
|
||||
ctx, []string{integrationId},
|
||||
ctx, orgId, []string{integrationId},
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, fmt.Sprintf(
|
||||
@@ -441,32 +442,33 @@ func (m *Manager) getInstalledIntegration(
|
||||
|
||||
func (m *Manager) getInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
) (
|
||||
map[string]Integration, *model.ApiError,
|
||||
) {
|
||||
installations, apiErr := m.installedIntegrationsRepo.list(ctx)
|
||||
installations, apiErr := m.installedIntegrationsRepo.list(ctx, orgId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
installedIds := utils.MapSlice(installations, func(i InstalledIntegration) string {
|
||||
return i.IntegrationId
|
||||
installedTypes := utils.MapSlice(installations, func(i types.InstalledIntegration) string {
|
||||
return i.Type
|
||||
})
|
||||
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedIds)
|
||||
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedTypes)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
result := map[string]Integration{}
|
||||
for _, ii := range installations {
|
||||
iDetails, exists := integrationDetails[ii.IntegrationId]
|
||||
iDetails, exists := integrationDetails[ii.Type]
|
||||
if !exists {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't find integration details for %s", ii.IntegrationId,
|
||||
"couldn't find integration details for %s", ii.Type,
|
||||
))
|
||||
}
|
||||
|
||||
result[ii.IntegrationId] = Integration{
|
||||
result[ii.Type] = Integration{
|
||||
Installation: &ii,
|
||||
IntegrationDetails: iDetails,
|
||||
}
|
||||
|
||||
@@ -14,18 +14,23 @@ func TestIntegrationLifecycle(t *testing.T) {
|
||||
mgr := NewTestIntegrationsManager(t)
|
||||
ctx := context.Background()
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
if apiErr != nil {
|
||||
t.Fatalf("could not create test user: %v", apiErr)
|
||||
}
|
||||
|
||||
ii := true
|
||||
installedIntegrationsFilter := &IntegrationsFilter{
|
||||
IsInstalled: &ii,
|
||||
}
|
||||
|
||||
installedIntegrations, apiErr := mgr.ListIntegrations(
|
||||
ctx, installedIntegrationsFilter,
|
||||
ctx, user.OrgID, installedIntegrationsFilter,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal([]IntegrationsListItem{}, installedIntegrations)
|
||||
|
||||
availableIntegrations, apiErr := mgr.ListIntegrations(ctx, nil)
|
||||
availableIntegrations, apiErr := mgr.ListIntegrations(ctx, user.OrgID, nil)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(2, len(availableIntegrations))
|
||||
require.False(availableIntegrations[0].IsInstalled)
|
||||
@@ -33,44 +38,44 @@ func TestIntegrationLifecycle(t *testing.T) {
|
||||
|
||||
testIntegrationConfig := map[string]interface{}{}
|
||||
installed, apiErr := mgr.InstallIntegration(
|
||||
ctx, availableIntegrations[1].Id, testIntegrationConfig,
|
||||
ctx, user.OrgID, availableIntegrations[1].Id, testIntegrationConfig,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(installed.Id, availableIntegrations[1].Id)
|
||||
|
||||
integration, apiErr := mgr.GetIntegration(ctx, availableIntegrations[1].Id)
|
||||
integration, apiErr := mgr.GetIntegration(ctx, user.OrgID, availableIntegrations[1].Id)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(integration.Id, availableIntegrations[1].Id)
|
||||
require.NotNil(integration.Installation)
|
||||
|
||||
installedIntegrations, apiErr = mgr.ListIntegrations(
|
||||
ctx, installedIntegrationsFilter,
|
||||
ctx, user.OrgID, installedIntegrationsFilter,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(1, len(installedIntegrations))
|
||||
require.Equal(availableIntegrations[1].Id, installedIntegrations[0].Id)
|
||||
|
||||
availableIntegrations, apiErr = mgr.ListIntegrations(ctx, nil)
|
||||
availableIntegrations, apiErr = mgr.ListIntegrations(ctx, user.OrgID, nil)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(2, len(availableIntegrations))
|
||||
require.False(availableIntegrations[0].IsInstalled)
|
||||
require.True(availableIntegrations[1].IsInstalled)
|
||||
|
||||
apiErr = mgr.UninstallIntegration(ctx, installed.Id)
|
||||
apiErr = mgr.UninstallIntegration(ctx, user.OrgID, installed.Id)
|
||||
require.Nil(apiErr)
|
||||
|
||||
integration, apiErr = mgr.GetIntegration(ctx, availableIntegrations[1].Id)
|
||||
integration, apiErr = mgr.GetIntegration(ctx, user.OrgID, availableIntegrations[1].Id)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(integration.Id, availableIntegrations[1].Id)
|
||||
require.Nil(integration.Installation)
|
||||
|
||||
installedIntegrations, apiErr = mgr.ListIntegrations(
|
||||
ctx, installedIntegrationsFilter,
|
||||
ctx, user.OrgID, installedIntegrationsFilter,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(0, len(installedIntegrations))
|
||||
|
||||
availableIntegrations, apiErr = mgr.ListIntegrations(ctx, nil)
|
||||
availableIntegrations, apiErr = mgr.ListIntegrations(ctx, user.OrgID, nil)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(2, len(availableIntegrations))
|
||||
require.False(availableIntegrations[0].IsInstalled)
|
||||
|
||||
@@ -2,51 +2,33 @@ package integrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
// For serializing from db
|
||||
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
||||
if data, ok := src.([]byte); ok {
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
||||
filterSetJson, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not serialize integration config to JSON")
|
||||
}
|
||||
return filterSetJson, nil
|
||||
}
|
||||
|
||||
type InstalledIntegrationsRepo interface {
|
||||
list(context.Context) ([]InstalledIntegration, *model.ApiError)
|
||||
list(ctx context.Context, orgId string) ([]types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
get(
|
||||
ctx context.Context, integrationIds []string,
|
||||
) (map[string]InstalledIntegration, *model.ApiError)
|
||||
ctx context.Context, orgId string, integrationTypes []string,
|
||||
) (map[string]types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
integrationId string,
|
||||
config InstalledIntegrationConfig,
|
||||
) (*InstalledIntegration, *model.ApiError)
|
||||
orgId string,
|
||||
integrationType string,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
delete(ctx context.Context, integrationId string) *model.ApiError
|
||||
delete(ctx context.Context, orgId string, integrationType string) *model.ApiError
|
||||
}
|
||||
|
||||
type AvailableIntegrationsRepo interface {
|
||||
list(context.Context) ([]IntegrationDetails, *model.ApiError)
|
||||
|
||||
get(
|
||||
ctx context.Context, integrationIds []string,
|
||||
ctx context.Context, integrationTypes []string,
|
||||
) (map[string]IntegrationDetails, *model.ApiError)
|
||||
|
||||
// AvailableIntegrationsRepo implementations are expected to cache
|
||||
|
||||
@@ -3,39 +3,37 @@ package integrations
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type InstalledIntegrationsSqliteRepo struct {
|
||||
db *sqlx.DB
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewInstalledIntegrationsSqliteRepo(db *sqlx.DB) (
|
||||
func NewInstalledIntegrationsSqliteRepo(store sqlstore.SQLStore) (
|
||||
*InstalledIntegrationsSqliteRepo, error,
|
||||
) {
|
||||
return &InstalledIntegrationsSqliteRepo{
|
||||
db: db,
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
ctx context.Context,
|
||||
) ([]InstalledIntegration, *model.ApiError) {
|
||||
integrations := []InstalledIntegration{}
|
||||
orgId string,
|
||||
) ([]types.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []types.InstalledIntegration{}
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &integrations, `
|
||||
select
|
||||
integration_id,
|
||||
config_json,
|
||||
installed_at
|
||||
from integrations_installed
|
||||
order by installed_at
|
||||
`,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&integrations).
|
||||
Where("org_id = ?", orgId).
|
||||
Order("installed_at").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query installed integrations: %w", err,
|
||||
@@ -45,38 +43,28 @@ func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
}
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) get(
|
||||
ctx context.Context, integrationIds []string,
|
||||
) (map[string]InstalledIntegration, *model.ApiError) {
|
||||
integrations := []InstalledIntegration{}
|
||||
ctx context.Context, orgId string, integrationTypes []string,
|
||||
) (map[string]types.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []types.InstalledIntegration{}
|
||||
|
||||
idPlaceholders := []string{}
|
||||
idValues := []interface{}{}
|
||||
for _, id := range integrationIds {
|
||||
idPlaceholders = append(idPlaceholders, "?")
|
||||
idValues = append(idValues, id)
|
||||
typeValues := []interface{}{}
|
||||
for _, integrationType := range integrationTypes {
|
||||
typeValues = append(typeValues, integrationType)
|
||||
}
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &integrations, fmt.Sprintf(`
|
||||
select
|
||||
integration_id,
|
||||
config_json,
|
||||
installed_at
|
||||
from integrations_installed
|
||||
where integration_id in (%s)`,
|
||||
strings.Join(idPlaceholders, ", "),
|
||||
),
|
||||
idValues...,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().Model(&integrations).
|
||||
Where("org_id = ?", orgId).
|
||||
Where("type IN (?)", bun.In(typeValues)).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query installed integrations: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]InstalledIntegration{}
|
||||
result := map[string]types.InstalledIntegration{}
|
||||
for _, ii := range integrations {
|
||||
result[ii.IntegrationId] = ii
|
||||
result[ii.Type] = ii
|
||||
}
|
||||
|
||||
return result, nil
|
||||
@@ -84,55 +72,57 @@ func (r *InstalledIntegrationsSqliteRepo) get(
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) upsert(
|
||||
ctx context.Context,
|
||||
integrationId string,
|
||||
config InstalledIntegrationConfig,
|
||||
) (*InstalledIntegration, *model.ApiError) {
|
||||
serializedConfig, err := config.Value()
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"could not serialize integration config: %w", err,
|
||||
))
|
||||
orgId string,
|
||||
integrationType string,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*types.InstalledIntegration, *model.ApiError) {
|
||||
|
||||
integration := types.InstalledIntegration{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
OrgID: orgId,
|
||||
Type: integrationType,
|
||||
Config: config,
|
||||
}
|
||||
|
||||
_, dbErr := r.db.ExecContext(
|
||||
ctx, `
|
||||
INSERT INTO integrations_installed (
|
||||
integration_id,
|
||||
config_json
|
||||
) values ($1, $2)
|
||||
on conflict(integration_id) do update
|
||||
set config_json=excluded.config_json
|
||||
`, integrationId, serializedConfig,
|
||||
)
|
||||
_, dbErr := r.store.BunDB().NewInsert().
|
||||
Model(&integration).
|
||||
On("conflict (type, org_id) DO UPDATE").
|
||||
Set("config = EXCLUDED.config").
|
||||
Exec(ctx)
|
||||
|
||||
if dbErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not insert record for integration installation: %w", dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
res, apiErr := r.get(ctx, []string{integrationId})
|
||||
res, apiErr := r.get(ctx, orgId, []string{integrationType})
|
||||
if apiErr != nil || len(res) < 1 {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "could not fetch installed integration",
|
||||
)
|
||||
}
|
||||
|
||||
installed := res[integrationId]
|
||||
installed := res[integrationType]
|
||||
|
||||
return &installed, nil
|
||||
}
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) delete(
|
||||
ctx context.Context, integrationId string,
|
||||
ctx context.Context, orgId string, integrationType string,
|
||||
) *model.ApiError {
|
||||
_, dbErr := r.db.ExecContext(ctx, `
|
||||
DELETE FROM integrations_installed where integration_id = ?
|
||||
`, integrationId)
|
||||
_, dbErr := r.store.BunDB().NewDelete().
|
||||
Model(&types.InstalledIntegration{}).
|
||||
Where("type = ?", integrationType).
|
||||
Where("org_id = ?", orgId).
|
||||
Exec(ctx)
|
||||
|
||||
if dbErr != nil {
|
||||
return model.InternalError(fmt.Errorf(
|
||||
"could not delete installed integration record for %s: %w",
|
||||
integrationId, dbErr,
|
||||
integrationType, dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,18 +5,22 @@ import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func NewTestIntegrationsManager(t *testing.T) *Manager {
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
|
||||
installedIntegrationsRepo, err := NewInstalledIntegrationsSqliteRepo(testDB.SQLxDB())
|
||||
installedIntegrationsRepo, err := NewInstalledIntegrationsSqliteRepo(testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("could not init sqlite DB for installed integrations: %v", err)
|
||||
}
|
||||
@@ -27,6 +31,38 @@ func NewTestIntegrationsManager(t *testing.T) *Manager {
|
||||
}
|
||||
}
|
||||
|
||||
func createTestUser() (*types.User, *model.ApiError) {
|
||||
// Create a test user for auth
|
||||
ctx := context.Background()
|
||||
org, apiErr := dao.DB().CreateOrg(ctx, &types.Organization{
|
||||
Name: "test",
|
||||
})
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
group, apiErr := dao.DB().GetGroupByName(ctx, constants.AdminGroup)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
auth.InitAuthCache(ctx)
|
||||
|
||||
userId := uuid.NewString()
|
||||
return dao.DB().CreateUser(
|
||||
ctx,
|
||||
&types.User{
|
||||
ID: userId,
|
||||
Name: "test",
|
||||
Email: userId[:8] + "test@test.com",
|
||||
Password: "test",
|
||||
OrgID: org.ID,
|
||||
GroupID: group.ID,
|
||||
},
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
type TestAvailableIntegrationsRepo struct{}
|
||||
|
||||
func (t *TestAvailableIntegrationsRepo) list(
|
||||
|
||||
@@ -25,12 +25,12 @@ import (
|
||||
type LogParsingPipelineController struct {
|
||||
Repo
|
||||
|
||||
GetIntegrationPipelines func(context.Context) ([]pipelinetypes.GettablePipeline, *model.ApiError)
|
||||
GetIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, *model.ApiError)
|
||||
}
|
||||
|
||||
func NewLogParsingPipelinesController(
|
||||
sqlStore sqlstore.SQLStore,
|
||||
getIntegrationPipelines func(context.Context) ([]pipelinetypes.GettablePipeline, *model.ApiError),
|
||||
getIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, *model.ApiError),
|
||||
) (*LogParsingPipelineController, error) {
|
||||
repo := NewRepo(sqlStore)
|
||||
return &LogParsingPipelineController{
|
||||
@@ -164,7 +164,7 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||
result = savedPipelines
|
||||
}
|
||||
|
||||
integrationPipelines, apiErr := ic.GetIntegrationPipelines(ctx)
|
||||
integrationPipelines, apiErr := ic.GetIntegrationPipelines(ctx, defaultOrgID)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "could not get pipelines for installed integrations",
|
||||
|
||||
@@ -131,9 +131,11 @@ func getOperators(ops []pipelinetypes.PipelineOperator) ([]pipelinetypes.Pipelin
|
||||
)
|
||||
}
|
||||
operator.If = fmt.Sprintf(
|
||||
`%s && %s matches "^\\s*{.*}\\s*$"`, parseFromNotNilCheck, operator.ParseFrom,
|
||||
`%s && (
|
||||
(typeOf(%s) == "string" && %s matches "^\\s*{.*}\\s*$" ) ||
|
||||
typeOf(%s) == "map[string]any"
|
||||
)`, parseFromNotNilCheck, operator.ParseFrom, operator.ParseFrom, operator.ParseFrom,
|
||||
)
|
||||
|
||||
} else if operator.Type == "add" {
|
||||
if strings.HasPrefix(operator.Value, "EXPR(") && strings.HasSuffix(operator.Value, ")") {
|
||||
expression := strings.TrimSuffix(strings.TrimPrefix(operator.Value, "EXPR("), ")")
|
||||
|
||||
@@ -646,7 +646,7 @@ func TestMembershipOpInProcessorFieldExpressions(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
testLogs := []model.SignozLog{
|
||||
makeTestSignozLog("test log", map[string]interface{}{
|
||||
makeTestSignozLog("test log", map[string]any{
|
||||
"http.method": "GET",
|
||||
"order.products": `{"ids": ["pid0", "pid1"]}`,
|
||||
}),
|
||||
|
||||
@@ -719,6 +719,21 @@ func parseFilterAttributeKeyRequest(r *http.Request) (*v3.FilterAttributeKeyRequ
|
||||
aggregateOperator := v3.AggregateOperator(r.URL.Query().Get("aggregateOperator"))
|
||||
aggregateAttribute := r.URL.Query().Get("aggregateAttribute")
|
||||
limit, err := strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
tagType := v3.TagType(r.URL.Query().Get("tagType"))
|
||||
|
||||
// empty string is a valid tagType
|
||||
// i.e retrieve all attributes
|
||||
if tagType != "" {
|
||||
// what is happening here?
|
||||
// if tagType is undefined(uh oh javascript) or any invalid value, set it to empty string
|
||||
// instead of failing the request. Ideally, we should fail the request.
|
||||
// but we are not doing that to maintain backward compatibility.
|
||||
if err := tagType.Validate(); err != nil {
|
||||
// if the tagType is invalid, set it to empty string
|
||||
tagType = ""
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
limit = 50
|
||||
}
|
||||
@@ -739,6 +754,7 @@ func parseFilterAttributeKeyRequest(r *http.Request) (*v3.FilterAttributeKeyRequ
|
||||
AggregateAttribute: aggregateAttribute,
|
||||
Limit: limit,
|
||||
SearchText: r.URL.Query().Get("searchText"),
|
||||
TagType: tagType,
|
||||
}
|
||||
return &req, nil
|
||||
}
|
||||
@@ -861,7 +877,7 @@ func chTransformQuery(query string, variables map[string]interface{}) {
|
||||
transformer := chVariables.NewQueryTransformer(query, varsForTransform)
|
||||
transformedQuery, err := transformer.Transform()
|
||||
if err != nil {
|
||||
zap.L().Warn("failed to transform clickhouse query", zap.Error(err))
|
||||
zap.L().Warn("failed to transform clickhouse query", zap.String("query", query), zap.Error(err))
|
||||
}
|
||||
zap.L().Info("transformed clickhouse query", zap.String("transformedQuery", transformedQuery), zap.String("originalQuery", query))
|
||||
}
|
||||
|
||||
@@ -112,6 +112,7 @@ func TestParseFilterAttributeKeyRequest(t *testing.T) {
|
||||
expectedSearchText string
|
||||
expectErr bool
|
||||
errMsg string
|
||||
expectedTagType v3.TagType
|
||||
}{
|
||||
{
|
||||
desc: "valid operator and data source",
|
||||
@@ -168,6 +169,38 @@ func TestParseFilterAttributeKeyRequest(t *testing.T) {
|
||||
expectedDataSource: v3.DataSourceTraces,
|
||||
expectedLimit: 50,
|
||||
},
|
||||
{
|
||||
desc: "invalid tag type",
|
||||
queryString: "aggregateOperator=avg&dataSource=traces&tagType=invalid",
|
||||
expectedOperator: v3.AggregateOperatorAvg,
|
||||
expectedDataSource: v3.DataSourceTraces,
|
||||
expectedTagType: "",
|
||||
expectedLimit: 50,
|
||||
},
|
||||
{
|
||||
desc: "valid tag type",
|
||||
queryString: "aggregateOperator=avg&dataSource=traces&tagType=resource",
|
||||
expectedOperator: v3.AggregateOperatorAvg,
|
||||
expectedDataSource: v3.DataSourceTraces,
|
||||
expectedTagType: v3.TagTypeResource,
|
||||
expectedLimit: 50,
|
||||
},
|
||||
{
|
||||
desc: "valid tag type",
|
||||
queryString: "aggregateOperator=avg&dataSource=traces&tagType=scope",
|
||||
expectedOperator: v3.AggregateOperatorAvg,
|
||||
expectedDataSource: v3.DataSourceTraces,
|
||||
expectedTagType: v3.TagTypeInstrumentationScope,
|
||||
expectedLimit: 50,
|
||||
},
|
||||
{
|
||||
desc: "valid tag type",
|
||||
queryString: "aggregateOperator=avg&dataSource=traces&tagType=tag",
|
||||
expectedOperator: v3.AggregateOperatorAvg,
|
||||
expectedDataSource: v3.DataSourceTraces,
|
||||
expectedTagType: v3.TagTypeTag,
|
||||
expectedLimit: 50,
|
||||
},
|
||||
}
|
||||
|
||||
for _, reqCase := range reqCases {
|
||||
|
||||
@@ -439,7 +439,7 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*types.User,
|
||||
}
|
||||
|
||||
user := &types.User{
|
||||
ID: uuid.NewString(),
|
||||
ID: uuid.New().String(),
|
||||
Name: req.Name,
|
||||
Email: req.Email,
|
||||
Password: hash,
|
||||
@@ -519,7 +519,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
|
||||
}
|
||||
|
||||
user := &types.User{
|
||||
ID: uuid.NewString(),
|
||||
ID: uuid.New().String(),
|
||||
Name: req.Name,
|
||||
Email: req.Email,
|
||||
Password: hash,
|
||||
|
||||
@@ -3,6 +3,7 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
@@ -51,7 +52,7 @@ func InitAuthCache(ctx context.Context) error {
|
||||
func GetUserFromReqContext(ctx context.Context) (*types.GettableUser, error) {
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, errors.New("no claims found in context")
|
||||
return nil, errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "no claims found in context")
|
||||
}
|
||||
|
||||
user := &types.GettableUser{
|
||||
|
||||
@@ -248,6 +248,7 @@ func (q TagType) Validate() error {
|
||||
type FilterAttributeKeyRequest struct {
|
||||
DataSource DataSource `json:"dataSource"`
|
||||
AggregateOperator AggregateOperator `json:"aggregateOperator"`
|
||||
TagType TagType `json:"tagType"`
|
||||
AggregateAttribute string `json:"aggregateAttribute"`
|
||||
SearchText string `json:"searchText"`
|
||||
Limit int `json:"limit"`
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestAWSIntegrationAccountLifecycle(t *testing.T) {
|
||||
)
|
||||
|
||||
// Should be able to generate a connection url from UI - initializing an integration account
|
||||
testAccountConfig := cloudintegrations.AccountConfig{
|
||||
testAccountConfig := types.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-1", "us-east-2"},
|
||||
}
|
||||
connectionUrlResp := testbed.GenerateConnectionUrlFromQS(
|
||||
@@ -65,8 +65,8 @@ func TestAWSIntegrationAccountLifecycle(t *testing.T) {
|
||||
testAWSAccountId := "4563215233"
|
||||
agentCheckInResp := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp.AccountId)
|
||||
@@ -91,20 +91,20 @@ func TestAWSIntegrationAccountLifecycle(t *testing.T) {
|
||||
require.Equal(testAWSAccountId, accountsListResp2.Accounts[0].CloudAccountId)
|
||||
|
||||
// Should be able to update account config from UI
|
||||
testAccountConfig2 := cloudintegrations.AccountConfig{
|
||||
testAccountConfig2 := types.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-2", "us-west-1"},
|
||||
}
|
||||
latestAccount := testbed.UpdateAccountConfigWithQS(
|
||||
"aws", testAccountId, testAccountConfig2,
|
||||
)
|
||||
require.Equal(testAccountId, latestAccount.Id)
|
||||
require.Equal(testAccountId, latestAccount.ID.StringValue())
|
||||
require.Equal(testAccountConfig2, *latestAccount.Config)
|
||||
|
||||
// The agent should now receive latest account config.
|
||||
agentCheckInResp1 := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp1.AccountId)
|
||||
@@ -114,14 +114,14 @@ func TestAWSIntegrationAccountLifecycle(t *testing.T) {
|
||||
// Should be able to disconnect/remove account from UI.
|
||||
tsBeforeDisconnect := time.Now()
|
||||
latestAccount = testbed.DisconnectAccountWithQS("aws", testAccountId)
|
||||
require.Equal(testAccountId, latestAccount.Id)
|
||||
require.Equal(testAccountId, latestAccount.ID.StringValue())
|
||||
require.LessOrEqual(tsBeforeDisconnect, *latestAccount.RemovedAt)
|
||||
|
||||
// The agent should receive the disconnected status in account config post disconnection
|
||||
agentCheckInResp2 := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp2.AccountId)
|
||||
@@ -157,13 +157,13 @@ func TestAWSIntegrationServices(t *testing.T) {
|
||||
testAWSAccountId := "389389489489"
|
||||
testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
|
||||
testSvcConfig := cloudintegrations.CloudServiceConfig{
|
||||
Metrics: &cloudintegrations.CloudServiceMetricsConfig{
|
||||
testSvcConfig := types.CloudServiceConfig{
|
||||
Metrics: &types.CloudServiceMetricsConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
@@ -199,7 +199,7 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
testbed := NewCloudIntegrationsTestBed(t, nil)
|
||||
|
||||
// configure a connected account
|
||||
testAccountConfig := cloudintegrations.AccountConfig{
|
||||
testAccountConfig := types.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-1", "us-east-2"},
|
||||
}
|
||||
connectionUrlResp := testbed.GenerateConnectionUrlFromQS(
|
||||
@@ -218,8 +218,8 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
testAWSAccountId := "389389489489"
|
||||
checkinResp := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -237,14 +237,14 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
|
||||
// helper
|
||||
setServiceConfig := func(svcId string, metricsEnabled bool, logsEnabled bool) {
|
||||
testSvcConfig := cloudintegrations.CloudServiceConfig{}
|
||||
testSvcConfig := types.CloudServiceConfig{}
|
||||
if metricsEnabled {
|
||||
testSvcConfig.Metrics = &cloudintegrations.CloudServiceMetricsConfig{
|
||||
testSvcConfig.Metrics = &types.CloudServiceMetricsConfig{
|
||||
Enabled: metricsEnabled,
|
||||
}
|
||||
}
|
||||
if logsEnabled {
|
||||
testSvcConfig.Logs = &cloudintegrations.CloudServiceLogsConfig{
|
||||
testSvcConfig.Logs = &types.CloudServiceLogsConfig{
|
||||
Enabled: logsEnabled,
|
||||
}
|
||||
}
|
||||
@@ -262,8 +262,8 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
|
||||
checkinResp = testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -292,13 +292,13 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
require.True(strings.HasPrefix(logGroupPrefixes[0], "/aws/rds"))
|
||||
|
||||
// change regions and update service configs and validate config changes for agent
|
||||
testAccountConfig2 := cloudintegrations.AccountConfig{
|
||||
testAccountConfig2 := types.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-2", "us-west-1"},
|
||||
}
|
||||
latestAccount := testbed.UpdateAccountConfigWithQS(
|
||||
"aws", testAccountId, testAccountConfig2,
|
||||
)
|
||||
require.Equal(testAccountId, latestAccount.Id)
|
||||
require.Equal(testAccountId, latestAccount.ID.StringValue())
|
||||
require.Equal(testAccountConfig2, *latestAccount.Config)
|
||||
|
||||
// disable metrics for one and logs for the other.
|
||||
@@ -308,8 +308,8 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
|
||||
checkinResp = testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, checkinResp.AccountId)
|
||||
@@ -453,8 +453,8 @@ func (tb *CloudIntegrationsTestBed) CheckInAsAgentWithQS(
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) UpdateAccountConfigWithQS(
|
||||
cloudProvider string, accountId string, newConfig cloudintegrations.AccountConfig,
|
||||
) *cloudintegrations.AccountRecord {
|
||||
cloudProvider string, accountId string, newConfig types.AccountConfig,
|
||||
) *types.CloudIntegration {
|
||||
respDataJson := tb.RequestQS(
|
||||
fmt.Sprintf(
|
||||
"/api/v1/cloud-integrations/%s/accounts/%s/config",
|
||||
@@ -464,7 +464,7 @@ func (tb *CloudIntegrationsTestBed) UpdateAccountConfigWithQS(
|
||||
},
|
||||
)
|
||||
|
||||
var resp cloudintegrations.AccountRecord
|
||||
var resp types.CloudIntegration
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into Account")
|
||||
@@ -475,7 +475,7 @@ func (tb *CloudIntegrationsTestBed) UpdateAccountConfigWithQS(
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) DisconnectAccountWithQS(
|
||||
cloudProvider string, accountId string,
|
||||
) *cloudintegrations.AccountRecord {
|
||||
) *types.CloudIntegration {
|
||||
respDataJson := tb.RequestQS(
|
||||
fmt.Sprintf(
|
||||
"/api/v1/cloud-integrations/%s/accounts/%s/disconnect",
|
||||
@@ -483,7 +483,7 @@ func (tb *CloudIntegrationsTestBed) DisconnectAccountWithQS(
|
||||
), map[string]any{},
|
||||
)
|
||||
|
||||
var resp cloudintegrations.AccountRecord
|
||||
var resp types.CloudIntegration
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into Account")
|
||||
|
||||
@@ -166,6 +166,7 @@ func createTestUser() (*types.User, *model.ApiError) {
|
||||
auth.InitAuthCache(ctx)
|
||||
|
||||
userId := uuid.NewString()
|
||||
|
||||
return dao.DB().CreateUser(
|
||||
ctx,
|
||||
&types.User{
|
||||
|
||||
@@ -48,9 +48,15 @@ func NewTestSqliteDB(t *testing.T) (sqlStore sqlstore.SQLStore, testDBFilePath s
|
||||
sqlmigration.NewModifyDatetimeFactory(),
|
||||
sqlmigration.NewModifyOrgDomainFactory(),
|
||||
sqlmigration.NewUpdateOrganizationFactory(sqlStore),
|
||||
sqlmigration.NewAddAlertmanagerFactory(sqlStore),
|
||||
sqlmigration.NewUpdateDashboardAndSavedViewsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePatAndOrgDomainsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePipelines(sqlStore),
|
||||
sqlmigration.NewDropLicensesSitesFactory(sqlStore),
|
||||
sqlmigration.NewUpdateInvitesFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePatFactory(sqlStore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
sqlmigration.NewUpdateIntegrationsFactory(sqlStore),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,6 +69,8 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedM
|
||||
sqlmigration.NewUpdatePreferencesFactory(sqlstore),
|
||||
sqlmigration.NewUpdateApdexTtlFactory(sqlstore),
|
||||
sqlmigration.NewUpdateResetPasswordFactory(sqlstore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
sqlmigration.NewUpdateIntegrationsFactory(sqlstore),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
58
pkg/sqlmigration/025_add_virtual_fields.go
Normal file
58
pkg/sqlmigration/025_add_virtual_fields.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type addVirtualFields struct{}
|
||||
|
||||
func NewAddVirtualFieldsFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_virtual_fields"), newAddVirtualFields)
|
||||
}
|
||||
|
||||
func newAddVirtualFields(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
return &addVirtualFields{}, nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Up(ctx context.Context, db *bun.DB) error {
|
||||
// table:virtual_field op:create
|
||||
if _, err := db.NewCreateTable().
|
||||
Model(&struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull"`
|
||||
Expression string `bun:"expression,type:text,notnull"`
|
||||
Description string `bun:"description,type:text"`
|
||||
Signal telemetrytypes.Signal `bun:"signal,type:text,notnull"`
|
||||
OrgID string `bun:"org_id,type:text,notnull"`
|
||||
}{}).
|
||||
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE`).
|
||||
IfNotExists().
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
441
pkg/sqlmigration/026_update_integrations.go
Normal file
441
pkg/sqlmigration/026_update_integrations.go
Normal file
@@ -0,0 +1,441 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type updateIntegrations struct {
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewUpdateIntegrationsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("update_integrations"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return newUpdateIntegrations(ctx, ps, c, sqlstore)
|
||||
})
|
||||
}
|
||||
|
||||
func newUpdateIntegrations(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
|
||||
return &updateIntegrations{
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type existingInstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:integrations_installed"`
|
||||
|
||||
IntegrationID string `bun:"integration_id,pk,type:text"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
InstalledAt time.Time `bun:"installed_at,default:current_timestamp"`
|
||||
}
|
||||
|
||||
type newInstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:installed_integration"`
|
||||
|
||||
types.Identifiable
|
||||
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
||||
Config string `json:"config" bun:"config,type:text"`
|
||||
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type"`
|
||||
}
|
||||
|
||||
type existingCloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integrations_accounts"`
|
||||
|
||||
CloudProvider string `bun:"cloud_provider,type:text,unique:cloud_provider_id"`
|
||||
ID string `bun:"id,type:text,notnull,unique:cloud_provider_id"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
CloudAccountID string `bun:"cloud_account_id,type:text"`
|
||||
LastAgentReportJSON string `bun:"last_agent_report_json,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,notnull,default:current_timestamp"`
|
||||
RemovedAt *time.Time `bun:"removed_at,type:timestamp"`
|
||||
}
|
||||
|
||||
type newCloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
Provider string `json:"provider" bun:"provider,type:text"`
|
||||
Config string `json:"config" bun:"config,type:text"`
|
||||
AccountID string `json:"account_id" bun:"account_id,type:text"`
|
||||
LastAgentReport string `json:"last_agent_report" bun:"last_agent_report,type:text"`
|
||||
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text"`
|
||||
}
|
||||
|
||||
type existingCloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integrations_service_configs,alias:c1"`
|
||||
|
||||
CloudProvider string `bun:"cloud_provider,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
CloudAccountID string `bun:"cloud_account_id,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
ServiceID string `bun:"service_id,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,default:current_timestamp"`
|
||||
}
|
||||
|
||||
type newCloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
Config string `bun:"config,type:text"`
|
||||
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
}
|
||||
|
||||
type StorablePersonalAccessToken struct {
|
||||
bun.BaseModel `bun:"table:personal_access_token"`
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
OrgID string `json:"orgId" bun:"org_id,type:text,notnull"`
|
||||
Role string `json:"role" bun:"role,type:text,notnull,default:'ADMIN'"`
|
||||
UserID string `json:"userId" bun:"user_id,type:text,notnull"`
|
||||
Token string `json:"token" bun:"token,type:text,notnull,unique"`
|
||||
Name string `json:"name" bun:"name,type:text,notnull"`
|
||||
ExpiresAt int64 `json:"expiresAt" bun:"expires_at,notnull,default:0"`
|
||||
LastUsed int64 `json:"lastUsed" bun:"last_used,notnull,default:0"`
|
||||
Revoked bool `json:"revoked" bun:"revoked,notnull,default:false"`
|
||||
UpdatedByUserID string `json:"updatedByUserId" bun:"updated_by_user_id,type:text,notnull,default:''"`
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) Up(ctx context.Context, db *bun.DB) error {
|
||||
|
||||
// begin transaction
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// don't run the migration if there are multiple org ids
|
||||
orgIDs := make([]string, 0)
|
||||
err = migration.store.BunDB().NewSelect().Model((*types.Organization)(nil)).Column("id").Scan(ctx, &orgIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(orgIDs) > 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---
|
||||
// installed integrations
|
||||
// ---
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingInstalledIntegration), new(newInstalledIntegration), []string{OrgReference}, func(ctx context.Context) error {
|
||||
existingIntegrations := make([]*existingInstalledIntegration, 0)
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingIntegrations).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && len(existingIntegrations) > 0 {
|
||||
newIntegrations := migration.
|
||||
CopyOldIntegrationsToNewIntegrations(tx, orgIDs[0], existingIntegrations)
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newIntegrations).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ---
|
||||
// cloud integrations
|
||||
// ---
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingCloudIntegration), new(newCloudIntegration), []string{OrgReference}, func(ctx context.Context) error {
|
||||
existingIntegrations := make([]*existingCloudIntegration, 0)
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingIntegrations).
|
||||
Where("removed_at IS NULL"). // we will only copy the accounts that are not removed
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && len(existingIntegrations) > 0 {
|
||||
newIntegrations := migration.
|
||||
CopyOldCloudIntegrationsToNewCloudIntegrations(tx, orgIDs[0], existingIntegrations)
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newIntegrations).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add unique constraint to cloud_integration table
|
||||
_, err = tx.ExecContext(ctx, `CREATE UNIQUE INDEX IF NOT EXISTS unique_cloud_integration ON cloud_integration (id, provider, org_id)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ---
|
||||
// cloud integration service
|
||||
// ---
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingCloudIntegrationService), new(newCloudIntegrationService), []string{CloudIntegrationReference}, func(ctx context.Context) error {
|
||||
existingServices := make([]*existingCloudIntegrationService, 0)
|
||||
|
||||
// only one service per provider,account id and type
|
||||
// so there won't be any duplicates.
|
||||
// just that these will be enabled as soon as the integration for the account is enabled
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingServices).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && len(existingServices) > 0 {
|
||||
newServices := migration.
|
||||
CopyOldCloudIntegrationServicesToNewCloudIntegrationServices(tx, orgIDs[0], existingServices)
|
||||
if len(newServices) > 0 {
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newServices).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(orgIDs) == 0 {
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy the old aws integration user to the new user
|
||||
err = migration.copyOldAwsIntegrationUser(tx, orgIDs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) CopyOldIntegrationsToNewIntegrations(tx bun.IDB, orgID string, existingIntegrations []*existingInstalledIntegration) []*newInstalledIntegration {
|
||||
newIntegrations := make([]*newInstalledIntegration, 0)
|
||||
|
||||
for _, integration := range existingIntegrations {
|
||||
newIntegrations = append(newIntegrations, &newInstalledIntegration{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
Type: integration.IntegrationID,
|
||||
Config: integration.ConfigJSON,
|
||||
InstalledAt: integration.InstalledAt,
|
||||
OrgID: orgID,
|
||||
})
|
||||
}
|
||||
|
||||
return newIntegrations
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) CopyOldCloudIntegrationsToNewCloudIntegrations(tx bun.IDB, orgID string, existingIntegrations []*existingCloudIntegration) []*newCloudIntegration {
|
||||
newIntegrations := make([]*newCloudIntegration, 0)
|
||||
|
||||
for _, integration := range existingIntegrations {
|
||||
newIntegrations = append(newIntegrations, &newCloudIntegration{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: integration.CreatedAt,
|
||||
UpdatedAt: integration.CreatedAt,
|
||||
},
|
||||
Provider: integration.CloudProvider,
|
||||
AccountID: integration.CloudAccountID,
|
||||
Config: integration.ConfigJSON,
|
||||
LastAgentReport: integration.LastAgentReportJSON,
|
||||
RemovedAt: integration.RemovedAt,
|
||||
OrgID: orgID,
|
||||
})
|
||||
}
|
||||
|
||||
return newIntegrations
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) CopyOldCloudIntegrationServicesToNewCloudIntegrationServices(tx bun.IDB, orgID string, existingServices []*existingCloudIntegrationService) []*newCloudIntegrationService {
|
||||
newServices := make([]*newCloudIntegrationService, 0)
|
||||
|
||||
for _, service := range existingServices {
|
||||
var cloudIntegrationID string
|
||||
err := tx.NewSelect().
|
||||
Model((*newCloudIntegration)(nil)).
|
||||
Column("id").
|
||||
Where("account_id = ?", service.CloudAccountID).
|
||||
Where("provider = ?", service.CloudProvider).
|
||||
Where("org_id = ?", orgID).
|
||||
Scan(context.Background(), &cloudIntegrationID)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
continue
|
||||
}
|
||||
zap.L().Error("failed to get cloud integration id", zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
newServices = append(newServices, &newCloudIntegrationService{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: service.CreatedAt,
|
||||
UpdatedAt: service.CreatedAt,
|
||||
},
|
||||
Type: service.ServiceID,
|
||||
Config: service.ConfigJSON,
|
||||
CloudIntegrationID: cloudIntegrationID,
|
||||
})
|
||||
}
|
||||
|
||||
return newServices
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) copyOldAwsIntegrationUser(tx bun.IDB, orgID string) error {
|
||||
user := &types.User{}
|
||||
err := tx.NewSelect().Model(user).Where("email = ?", "aws-integration@signoz.io").Scan(context.Background())
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// check if the id is already an uuid
|
||||
if _, err := uuid.Parse(user.ID); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// new user
|
||||
newUser := &types.User{
|
||||
ID: uuid.New().String(),
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
OrgID: orgID,
|
||||
Name: user.Name,
|
||||
Email: user.Email,
|
||||
GroupID: user.GroupID,
|
||||
Password: user.Password,
|
||||
}
|
||||
|
||||
// get the pat for old user
|
||||
pat := &StorablePersonalAccessToken{}
|
||||
err = tx.NewSelect().Model(pat).Where("user_id = ? and revoked = false", "aws-integration").Scan(context.Background())
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
// delete the old user
|
||||
_, err = tx.ExecContext(context.Background(), `DELETE FROM users WHERE id = ?`, user.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// new pat
|
||||
newPAT := &StorablePersonalAccessToken{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
OrgID: orgID,
|
||||
UserID: newUser.ID,
|
||||
Token: pat.Token,
|
||||
Name: pat.Name,
|
||||
ExpiresAt: pat.ExpiresAt,
|
||||
LastUsed: pat.LastUsed,
|
||||
Revoked: pat.Revoked,
|
||||
Role: pat.Role,
|
||||
}
|
||||
|
||||
// delete old user
|
||||
_, err = tx.ExecContext(context.Background(), `DELETE FROM users WHERE id = ?`, user.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert the new user
|
||||
_, err = tx.NewInsert().Model(newUser).Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert the new pat
|
||||
_, err = tx.NewInsert().Model(newPAT).Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -26,8 +26,9 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = "org"
|
||||
UserReference = "user"
|
||||
OrgReference = "org"
|
||||
UserReference = "user"
|
||||
CloudIntegrationReference = "cloud_integration"
|
||||
)
|
||||
|
||||
func New(
|
||||
|
||||
@@ -17,13 +17,15 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
Org = "org"
|
||||
User = "user"
|
||||
Org = "org"
|
||||
User = "user"
|
||||
CloudIntegration = "cloud_integration"
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
)
|
||||
|
||||
type dialect struct {
|
||||
@@ -202,6 +204,8 @@ func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.I
|
||||
fkReferences = append(fkReferences, OrgReference)
|
||||
} else if reference == User && !slices.Contains(fkReferences, UserReference) {
|
||||
fkReferences = append(fkReferences, UserReference)
|
||||
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
|
||||
fkReferences = append(fkReferences, CloudIntegrationReference)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
363
pkg/telemetrylogs/condition_builder.go
Normal file
363
pkg/telemetrylogs/condition_builder.go
Normal file
@@ -0,0 +1,363 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
logsV2Columns = map[string]*schema.Column{
|
||||
"ts_bucket_start": {Name: "ts_bucket_start", Type: schema.ColumnTypeUInt64},
|
||||
"resource_fingerprint": {Name: "resource_fingerprint", Type: schema.ColumnTypeString},
|
||||
|
||||
"timestamp": {Name: "timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"observed_timestamp": {Name: "observed_timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"id": {Name: "id", Type: schema.ColumnTypeString},
|
||||
"trace_id": {Name: "trace_id", Type: schema.ColumnTypeString},
|
||||
"span_id": {Name: "span_id", Type: schema.ColumnTypeString},
|
||||
"trace_flags": {Name: "trace_flags", Type: schema.ColumnTypeUInt32},
|
||||
"severity_text": {Name: "severity_text", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"severity_number": {Name: "severity_number", Type: schema.ColumnTypeUInt8},
|
||||
"body": {Name: "body", Type: schema.ColumnTypeString},
|
||||
"attributes_string": {Name: "attributes_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes_number": {Name: "attributes_number", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}},
|
||||
"attributes_bool": {Name: "attributes_bool", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}},
|
||||
"resources_string": {Name: "resources_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"scope_name": {Name: "scope_name", Type: schema.ColumnTypeString},
|
||||
"scope_version": {Name: "scope_version", Type: schema.ColumnTypeString},
|
||||
"scope_string": {Name: "scope_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
var _ qbtypes.ConditionBuilder = &conditionBuilder{}
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return logsV2Columns["resources_string"], nil
|
||||
case telemetrytypes.FieldContextScope:
|
||||
switch key.Name {
|
||||
case "name", "scope.name", "scope_name":
|
||||
return logsV2Columns["scope_name"], nil
|
||||
case "version", "scope.version", "scope_version":
|
||||
return logsV2Columns["scope_version"], nil
|
||||
}
|
||||
return logsV2Columns["scope_string"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
switch key.FieldDataType {
|
||||
case telemetrytypes.FieldDataTypeString:
|
||||
return logsV2Columns["attributes_string"], nil
|
||||
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
|
||||
return logsV2Columns["attributes_number"], nil
|
||||
case telemetrytypes.FieldDataTypeBool:
|
||||
return logsV2Columns["attributes_bool"], nil
|
||||
}
|
||||
case telemetrytypes.FieldContextLog, telemetrytypes.FieldContextUnspecified:
|
||||
col, ok := logsV2Columns[key.Name]
|
||||
if !ok {
|
||||
// check if the key has body JSON search
|
||||
if strings.HasPrefix(key.Name, BodyJSONStringSearchPrefix) {
|
||||
return logsV2Columns["body"], nil
|
||||
}
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
return col, nil
|
||||
}
|
||||
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeUInt8:
|
||||
return column.Name, nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
// should not reach here
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func parseStrValue(valueStr string, operator qbtypes.FilterOperator) (telemetrytypes.FieldDataType, any) {
|
||||
|
||||
valueType := telemetrytypes.FieldDataTypeString
|
||||
|
||||
// return the value as is for the following operators
|
||||
// as they are always string
|
||||
if operator == qbtypes.FilterOperatorContains || operator == qbtypes.FilterOperatorNotContains ||
|
||||
operator == qbtypes.FilterOperatorRegexp || operator == qbtypes.FilterOperatorNotRegexp ||
|
||||
operator == qbtypes.FilterOperatorLike || operator == qbtypes.FilterOperatorNotLike ||
|
||||
operator == qbtypes.FilterOperatorILike || operator == qbtypes.FilterOperatorNotILike {
|
||||
return valueType, valueStr
|
||||
}
|
||||
|
||||
var err error
|
||||
var parsedValue any
|
||||
if parsedValue, err = strconv.ParseBool(valueStr); err == nil {
|
||||
valueType = telemetrytypes.FieldDataTypeBool
|
||||
} else if parsedValue, err = strconv.ParseInt(valueStr, 10, 64); err == nil {
|
||||
valueType = telemetrytypes.FieldDataTypeInt64
|
||||
} else if parsedValue, err = strconv.ParseFloat(valueStr, 64); err == nil {
|
||||
valueType = telemetrytypes.FieldDataTypeFloat64
|
||||
} else {
|
||||
parsedValue = valueStr
|
||||
valueType = telemetrytypes.FieldDataTypeString
|
||||
}
|
||||
|
||||
return valueType, parsedValue
|
||||
}
|
||||
|
||||
func inferDataType(value any, operator qbtypes.FilterOperator, key *telemetrytypes.TelemetryFieldKey) (telemetrytypes.FieldDataType, any) {
|
||||
// check if the value is a int, float, string, bool
|
||||
valueType := telemetrytypes.FieldDataTypeUnspecified
|
||||
switch v := value.(type) {
|
||||
case []any:
|
||||
// take the first element and infer the type
|
||||
if len(v) > 0 {
|
||||
valueType, _ = inferDataType(v[0], operator, key)
|
||||
}
|
||||
return valueType, v
|
||||
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64:
|
||||
valueType = telemetrytypes.FieldDataTypeInt64
|
||||
case float32, float64:
|
||||
valueType = telemetrytypes.FieldDataTypeFloat64
|
||||
case string:
|
||||
valueType, value = parseStrValue(v, operator)
|
||||
case bool:
|
||||
valueType = telemetrytypes.FieldDataTypeBool
|
||||
}
|
||||
|
||||
// check if it is array
|
||||
if strings.HasSuffix(key.Name, "[*]") {
|
||||
valueType = telemetrytypes.FieldDataType{String: valuer.NewString(fmt.Sprintf("[]%s", valueType.StringValue()))}
|
||||
}
|
||||
|
||||
return valueType, value
|
||||
}
|
||||
|
||||
func GetBodyJSONKey(_ context.Context, key *telemetrytypes.TelemetryFieldKey, operator qbtypes.FilterOperator, value any) (string, any) {
|
||||
|
||||
dataType, value := inferDataType(value, operator, key)
|
||||
|
||||
// all body json keys are of the form body.
|
||||
path := strings.Join(strings.Split(key.Name, ".")[1:], ".")
|
||||
|
||||
// for array types, we need to extract the value from the JSON_QUERY
|
||||
if dataType == telemetrytypes.FieldDataTypeArrayInt64 ||
|
||||
dataType == telemetrytypes.FieldDataTypeArrayFloat64 ||
|
||||
dataType == telemetrytypes.FieldDataTypeArrayString ||
|
||||
dataType == telemetrytypes.FieldDataTypeArrayBool ||
|
||||
dataType == telemetrytypes.FieldDataTypeArrayNumber {
|
||||
return fmt.Sprintf("JSONExtract(JSON_QUERY(body, '$.%s'), '%s')", path, dataType.CHDataType()), value
|
||||
}
|
||||
|
||||
// for all other types, we need to extract the value from the JSON_VALUE
|
||||
return fmt.Sprintf("JSONExtract(JSON_VALUE(body, '$.%s'), '%s')", path, dataType.CHDataType()), value
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(key.Name, BodyJSONStringSearchPrefix) {
|
||||
tblFieldName, value = GetBodyJSONKey(ctx, key, operator, value)
|
||||
}
|
||||
|
||||
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThan:
|
||||
return sb.G(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThanOrEq:
|
||||
return sb.GE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThan:
|
||||
return sb.LT(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThanOrEq:
|
||||
return sb.LE(tblFieldName, value), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.Like(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.NotLike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.ILike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.NotILike(tblFieldName, value), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
// between and not between
|
||||
case qbtypes.FilterOperatorBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.Between(tblFieldName, values[0], values[1]), nil
|
||||
case qbtypes.FilterOperatorNotBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.NotBetween(tblFieldName, values[0], values[1]), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.In(tblFieldName, values...), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.NotIn(tblFieldName, values...), nil
|
||||
|
||||
// exists and not exists
|
||||
// but how could you live and have no story to tell
|
||||
// in the UI based query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
var value any
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString, schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}:
|
||||
value = ""
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.ColumnTypeUInt64, schema.ColumnTypeUInt32, schema.ColumnTypeUInt8:
|
||||
value = 0
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if key.Materialized {
|
||||
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
|
||||
}
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("exists operator is not supported for column type %s", column.Type)
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("unsupported operator: %v", operator)
|
||||
}
|
||||
811
pkg/telemetrylogs/condition_builder_test.go
Normal file
811
pkg/telemetrylogs/condition_builder_test.go
Normal file
@@ -0,0 +1,811 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: logsV2Columns["resources_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_version"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - bool type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["timestamp"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["body"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "did_user_login",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple column type - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "timestamp",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes_number['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes_bool['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resources_string['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotEqual,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: float64(100),
|
||||
expectedSQL: "attributes_number['request.duration'] > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: float64(1024),
|
||||
expectedSQL: "attributes_number['request.size'] < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp >= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body NOT LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "admin",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: "invalid",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Between operator - insufficient values",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000)},
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Not Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp NOT BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: []any{"error", "fatal", "critical"},
|
||||
expectedSQL: "severity_text IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: "error",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrInValues,
|
||||
},
|
||||
{
|
||||
name: "Not In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotIn,
|
||||
value: []any{"debug", "info", "trace"},
|
||||
expectedSQL: "severity_text NOT IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "body <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - number field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "value",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConditionMultiple(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
keys []*telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
keys: []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ? AND severity_text = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var err error
|
||||
for _, key := range tc.keys {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting condition for key %s: %v", key.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConditionJSONBodySearch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - int64",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: 200,
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Equal operator - float64",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.duration_ms",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: 405.5,
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.duration_ms'), 'Float64') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.method",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "GET",
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.method'), 'String') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Equal operator - bool",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.success",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: true,
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.success'), 'Bool') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'String') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'String') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater than operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: "200",
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater than operator - int64",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: 200,
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less than operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: "300",
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less than operator - int64",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: 300,
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "200",
|
||||
expectedSQL: "LOWER(JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'String')) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Contains operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotContains,
|
||||
value: "200",
|
||||
expectedSQL: "LOWER(JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'String')) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{"200", "300"},
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - int64",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{400, 500},
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: []any{"200", "300"},
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') IN (?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - int64",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body.http.status_code",
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: []any{401, 404, 500},
|
||||
expectedSQL: "JSONExtract(JSON_VALUE(body, '$.http.status_code'), 'Int64') IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
5
pkg/telemetrylogs/const.go
Normal file
5
pkg/telemetrylogs/const.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package telemetrylogs
|
||||
|
||||
var (
|
||||
BodyJSONStringSearchPrefix = `body.`
|
||||
)
|
||||
9
pkg/telemetrylogs/tables.go
Normal file
9
pkg/telemetrylogs/tables.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package telemetrylogs
|
||||
|
||||
const (
|
||||
DBName = "signoz_logs"
|
||||
LogsV2TableName = "distributed_logs_v2"
|
||||
LogsV2LocalTableName = "logs_v2"
|
||||
TagAttributesV2TableName = "distributed_tag_attributes_v2"
|
||||
TagAttributesV2LocalTableName = "tag_attributes_v2"
|
||||
)
|
||||
149
pkg/telemetrymetadata/condition_builder.go
Normal file
149
pkg/telemetrymetadata/condition_builder.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
attributeMetadataColumns = map[string]*schema.Column{
|
||||
"resource_attributes": {Name: "resource_attributes", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes": {Name: "attributes", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return attributeMetadataColumns["resource_attributes"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
return attributeMetadataColumns["attributes"], nil
|
||||
}
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
// if we don't have a column, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
// if we don't have a table field name, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if key.FieldDataType != telemetrytypes.FieldDataTypeString {
|
||||
// if the field data type is not string, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// key must exists to apply main filter
|
||||
containsExp := fmt.Sprintf("mapContains(%s, %s)", column.Name, sb.Var(key.Name))
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.And(containsExp, sb.E(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.And(containsExp, sb.NE(tblFieldName, value)), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.And(containsExp, sb.Like(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.And(containsExp, sb.NotLike(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.And(containsExp, sb.ILike(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.And(containsExp, sb.NotILike(tblFieldName, value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.And(containsExp, sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value))), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.And(containsExp, sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value))), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(containsExp, exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(containsExp, exp), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.And(containsExp, sb.In(tblFieldName, values...)), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.And(containsExp, sb.NotIn(tblFieldName, values...)), nil
|
||||
|
||||
// exists and not exists
|
||||
// in the query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
switch column.Type {
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
272
pkg/telemetrymetadata/condition_builder_test.go
Normal file
272
pkg/telemetrymetadata/condition_builder_test.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["resource_attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resource_attributes['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE (mapContains(attributes, ?) AND LOWER(attributes['user.id']) LIKE LOWER(?))",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE (mapContains(attributes, ?) AND LOWER(attributes['user.id']) NOT LIKE LOWER(?))",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
691
pkg/telemetrymetadata/metadata.go
Normal file
691
pkg/telemetrymetadata/metadata.go
Normal file
@@ -0,0 +1,691 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFailedToGetTracesKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get traces keys")
|
||||
ErrFailedToGetLogsKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get logs keys")
|
||||
ErrFailedToGetTblStatement = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get tbl statement")
|
||||
ErrFailedToGetMetricsKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get metrics keys")
|
||||
ErrFailedToGetRelatedValues = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get related values")
|
||||
)
|
||||
|
||||
type telemetryMetaStore struct {
|
||||
telemetrystore telemetrystore.TelemetryStore
|
||||
tracesDBName string
|
||||
tracesFieldsTblName string
|
||||
indexV3TblName string
|
||||
metricsDBName string
|
||||
metricsFieldsTblName string
|
||||
timeseries1WTblName string
|
||||
logsDBName string
|
||||
logsFieldsTblName string
|
||||
logsV2TblName string
|
||||
relatedMetadataDBName string
|
||||
relatedMetadataTblName string
|
||||
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
}
|
||||
|
||||
func NewTelemetryMetaStore(
|
||||
telemetrystore telemetrystore.TelemetryStore,
|
||||
tracesDBName string,
|
||||
tracesFieldsTblName string,
|
||||
indexV3TblName string,
|
||||
metricsDBName string,
|
||||
metricsFieldsTblName string,
|
||||
timeseries1WTblName string,
|
||||
logsDBName string,
|
||||
logsV2TblName string,
|
||||
logsFieldsTblName string,
|
||||
relatedMetadataDBName string,
|
||||
relatedMetadataTblName string,
|
||||
) (telemetrytypes.MetadataStore, error) {
|
||||
return &telemetryMetaStore{
|
||||
telemetrystore: telemetrystore,
|
||||
tracesDBName: tracesDBName,
|
||||
tracesFieldsTblName: tracesFieldsTblName,
|
||||
indexV3TblName: indexV3TblName,
|
||||
metricsDBName: metricsDBName,
|
||||
metricsFieldsTblName: metricsFieldsTblName,
|
||||
timeseries1WTblName: timeseries1WTblName,
|
||||
logsDBName: logsDBName,
|
||||
logsV2TblName: logsV2TblName,
|
||||
logsFieldsTblName: logsFieldsTblName,
|
||||
relatedMetadataDBName: relatedMetadataDBName,
|
||||
relatedMetadataTblName: relatedMetadataTblName,
|
||||
|
||||
conditionBuilder: NewConditionBuilder(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tracesTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the traces table
|
||||
func (t *telemetryMetaStore) tracesTblStatementToFieldKeys(ctx context.Context) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", t.tracesDBName, t.indexV3TblName)
|
||||
statements := []telemetrytypes.ShowCreateTableStatement{}
|
||||
err := t.telemetrystore.ClickhouseDB().Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTblStatement.Error())
|
||||
}
|
||||
|
||||
return ExtractFieldKeysFromTblStatement(statements[0].Statement)
|
||||
}
|
||||
|
||||
// getTracesKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the traces table
|
||||
matKeys, err := t.tracesTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
mapOfKeys[key.Name+";"+key.FieldContext.StringValue()+";"+key.FieldDataType.StringValue()] = key
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", `
|
||||
CASE
|
||||
WHEN tag_type = 'spanfield' THEN 1
|
||||
WHEN tag_type = 'resource' THEN 2
|
||||
WHEN tag_type = 'scope' THEN 3
|
||||
WHEN tag_type = 'tag' THEN 4
|
||||
ELSE 5
|
||||
END as priority`).From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
var limit int
|
||||
|
||||
conds := []string{}
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
|
||||
if fieldKeySelector.StartUnixMilli != 0 {
|
||||
conds = append(conds, sb.GE("unix_milli", fieldKeySelector.StartUnixMilli))
|
||||
}
|
||||
if fieldKeySelector.EndUnixMilli != 0 {
|
||||
conds = append(conds, sb.LE("unix_milli", fieldKeySelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
// key part of the selector
|
||||
fieldKeyConds := []string{}
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_data_type", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
conds = append(conds, sb.And(fieldKeyConds...))
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
|
||||
mainSb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", "max(priority) as priority")
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
// if there is no materialised column, create a key with the field context and data type
|
||||
if !ok {
|
||||
key = &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// logsTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the logs table
|
||||
func (t *telemetryMetaStore) logsTblStatementToFieldKeys(ctx context.Context) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", t.logsDBName, t.logsV2TblName)
|
||||
statements := []telemetrytypes.ShowCreateTableStatement{}
|
||||
err := t.telemetrystore.ClickhouseDB().Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTblStatement.Error())
|
||||
}
|
||||
|
||||
return ExtractFieldKeysFromTblStatement(statements[0].Statement)
|
||||
}
|
||||
|
||||
// getLogsKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the logs table
|
||||
matKeys, err := t.logsTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
mapOfKeys[key.Name+";"+key.FieldContext.StringValue()+";"+key.FieldDataType.StringValue()] = key
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", `
|
||||
CASE
|
||||
WHEN tag_type = 'logfield' THEN 1
|
||||
WHEN tag_type = 'resource' THEN 2
|
||||
WHEN tag_type = 'scope' THEN 3
|
||||
WHEN tag_type = 'tag' THEN 4
|
||||
ELSE 5
|
||||
END as priority`).From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
var limit int
|
||||
|
||||
conds := []string{}
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
|
||||
if fieldKeySelector.StartUnixMilli != 0 {
|
||||
conds = append(conds, sb.GE("unix_milli", fieldKeySelector.StartUnixMilli))
|
||||
}
|
||||
if fieldKeySelector.EndUnixMilli != 0 {
|
||||
conds = append(conds, sb.LE("unix_milli", fieldKeySelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
// key part of the selector
|
||||
fieldKeyConds := []string{}
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_data_type", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
conds = append(conds, sb.And(fieldKeyConds...))
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
|
||||
mainSb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", "max(priority) as priority")
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
// if there is no materialised column, create a key with the field context and data type
|
||||
if !ok {
|
||||
key = &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// getMetricsKeys returns the keys from the metrics that match the field selection criteria
|
||||
// TODO(srikanthccv): update the implementation after the dot metrics migration is done
|
||||
func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var whereClause, innerWhereClause string
|
||||
var limit int
|
||||
args := []any{}
|
||||
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
if fieldKeySelector.MetricContext != nil {
|
||||
innerWhereClause += "metric_name IN ? AND"
|
||||
args = append(args, fieldKeySelector.MetricContext.MetricName)
|
||||
}
|
||||
}
|
||||
innerWhereClause += " __normalized = true"
|
||||
|
||||
for idx, fieldKeySelector := range fieldKeySelectors {
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
whereClause += "(distinctTagKey = ? AND distinctTagKey NOT LIKE '\\_\\_%%')"
|
||||
args = append(args, fieldKeySelector.Name)
|
||||
} else {
|
||||
whereClause += "(distinctTagKey ILIKE ? AND distinctTagKey NOT LIKE '\\_\\_%%')"
|
||||
args = append(args, fmt.Sprintf("%%%s%%", fieldKeySelector.Name))
|
||||
}
|
||||
if idx != len(fieldKeySelectors)-1 {
|
||||
whereClause += " OR "
|
||||
}
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
args = append(args, limit)
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
arrayJoin(tagKeys) AS distinctTagKey
|
||||
FROM (
|
||||
SELECT JSONExtractKeys(labels) AS tagKeys
|
||||
FROM %s.%s
|
||||
WHERE `+innerWhereClause+`
|
||||
GROUP BY tagKeys
|
||||
)
|
||||
WHERE `+whereClause+`
|
||||
GROUP BY distinctTagKey
|
||||
LIMIT ?
|
||||
`, t.metricsDBName, t.timeseries1WTblName)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
err = rows.Scan(&name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
key := &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeys(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
var keys []*telemetrytypes.TelemetryFieldKey
|
||||
var err error
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
keys, err = t.getTracesKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalLogs:
|
||||
keys, err = t.getLogsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalMetrics:
|
||||
keys, err = t.getMetricsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
// get traces keys
|
||||
tracesKeys, err := t.getTracesKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, tracesKeys...)
|
||||
|
||||
// get logs keys
|
||||
logsKeys, err := t.getLogsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, logsKeys...)
|
||||
|
||||
// get metrics keys
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, metricsKeys...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range keys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeysMulti(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
|
||||
logsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
tracesSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
metricsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalLogs:
|
||||
logsSelectors = append(logsSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalTraces:
|
||||
tracesSelectors = append(tracesSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
metricsSelectors = append(metricsSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
logsSelectors = append(logsSelectors, fieldKeySelector)
|
||||
tracesSelectors = append(tracesSelectors, fieldKeySelector)
|
||||
metricsSelectors = append(metricsSelectors, fieldKeySelector)
|
||||
}
|
||||
}
|
||||
|
||||
logsKeys, err := t.getLogsKeys(ctx, logsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tracesKeys, err := t.getTracesKeys(ctx, tracesSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, metricsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range logsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
for _, key := range tracesKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
for _, key := range metricsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKey(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
keys, err := t.GetKeys(ctx, fieldKeySelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keys[fieldKeySelector.Name], nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
|
||||
args := []any{}
|
||||
|
||||
var andConditions []string
|
||||
|
||||
andConditions = append(andConditions, `unix_milli >= ?`)
|
||||
args = append(args, fieldValueSelector.StartUnixMilli)
|
||||
|
||||
andConditions = append(andConditions, `unix_milli <= ?`)
|
||||
args = append(args, fieldValueSelector.EndUnixMilli)
|
||||
|
||||
if len(fieldValueSelector.ExistingQuery) != 0 {
|
||||
// TODO(srikanthccv): add the existing query to the where clause
|
||||
}
|
||||
whereClause := strings.Join(andConditions, " AND ")
|
||||
|
||||
key := telemetrytypes.TelemetryFieldKey{
|
||||
Name: fieldValueSelector.Name,
|
||||
Signal: fieldValueSelector.Signal,
|
||||
FieldContext: fieldValueSelector.FieldContext,
|
||||
FieldDataType: fieldValueSelector.FieldDataType,
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): add the select column
|
||||
selectColumn, _ := t.conditionBuilder.GetTableFieldName(ctx, &key)
|
||||
|
||||
args = append(args, fieldValueSelector.Limit)
|
||||
filterSubQuery := fmt.Sprintf(
|
||||
"SELECT DISTINCT %s FROM %s.%s WHERE %s LIMIT ?",
|
||||
selectColumn,
|
||||
t.relatedMetadataDBName,
|
||||
t.relatedMetadataTblName,
|
||||
whereClause,
|
||||
)
|
||||
zap.L().Debug("filterSubQuery for related values", zap.String("query", filterSubQuery), zap.Any("args", args))
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, filterSubQuery, args...)
|
||||
if err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var attributeValues []string
|
||||
for rows.Next() {
|
||||
var value string
|
||||
if err := rows.Scan(&value); err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
}
|
||||
if value != "" {
|
||||
attributeValues = append(attributeValues, value)
|
||||
}
|
||||
}
|
||||
|
||||
return attributeValues, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
return t.getRelatedValues(ctx, fieldValueSelector)
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getSpanFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
|
||||
if fieldValueSelector.Name != "" {
|
||||
sb.Where(sb.E("tag_key", fieldValueSelector.Name))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldValueSelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
sb.Where(sb.E("tag_type", fieldValueSelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldValueSelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
sb.Where(sb.E("tag_data_type", fieldValueSelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getLogFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
|
||||
if fieldValueSelector.Name != "" {
|
||||
sb.Where(sb.E("tag_key", fieldValueSelector.Name))
|
||||
}
|
||||
|
||||
if fieldValueSelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
sb.Where(sb.E("tag_type", fieldValueSelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
sb.Where(sb.E("tag_data_type", fieldValueSelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getMetricFieldValues(_ context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// TODO(srikanthccv): implement this. use new tables?
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetAllValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
var values *telemetrytypes.TelemetryFieldValues
|
||||
var err error
|
||||
switch fieldValueSelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
values, err = t.getSpanFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalLogs:
|
||||
values, err = t.getLogFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
values, err = t.getMetricFieldValues(ctx, fieldValueSelector)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
86
pkg/telemetrymetadata/metadata_test.go
Normal file
86
pkg/telemetrymetadata/metadata_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrytraces"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
)
|
||||
|
||||
type regexMatcher struct {
|
||||
}
|
||||
|
||||
func (m *regexMatcher) Match(expectedSQL, actualSQL string) error {
|
||||
re, err := regexp.Compile(expectedSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !re.MatchString(actualSQL) {
|
||||
return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetKeys(t *testing.T) {
|
||||
mockTelemetryStore := telemetrystoretest.New(telemetrystore.Config{}, ®exMatcher{})
|
||||
mock := mockTelemetryStore.Mock()
|
||||
|
||||
metadata, err := NewTelemetryMetaStore(
|
||||
mockTelemetryStore,
|
||||
telemetrytraces.DBName,
|
||||
telemetrytraces.TagAttributesV2TableName,
|
||||
telemetrytraces.SpanIndexV3TableName,
|
||||
telemetrymetrics.DBName,
|
||||
telemetrymetrics.TimeseriesV41weekTableName,
|
||||
telemetrymetrics.TimeseriesV41weekTableName,
|
||||
telemetrylogs.DBName,
|
||||
telemetrylogs.LogsV2TableName,
|
||||
telemetrylogs.TagAttributesV2TableName,
|
||||
DBName,
|
||||
AttributesMetadataLocalTableName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create telemetry metadata store: %v", err)
|
||||
}
|
||||
|
||||
rows := cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "statement", Type: "String"},
|
||||
}, [][]any{{"CREATE TABLE signoz_traces.signoz_index_v3"}})
|
||||
|
||||
mock.
|
||||
ExpectSelect("SHOW CREATE TABLE signoz_traces.distributed_signoz_index_v3").
|
||||
WillReturnRows(rows)
|
||||
|
||||
query := `SELECT.*`
|
||||
|
||||
mock.ExpectQuery(query).
|
||||
WithArgs("%http.method%", telemetrytypes.FieldContextSpan.TagType(), telemetrytypes.FieldDataTypeString.TagDataType(), 10).
|
||||
WillReturnRows(cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "tag_key", Type: "String"},
|
||||
{Name: "tag_type", Type: "String"},
|
||||
{Name: "tag_data_type", Type: "String"},
|
||||
{Name: "priority", Type: "UInt8"},
|
||||
}, [][]any{{"http.method", "tag", "String", 1}, {"http.method", "tag", "String", 1}}))
|
||||
keys, err := metadata.GetKeys(context.Background(), &telemetrytypes.FieldKeySelector{
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Name: "http.method",
|
||||
Limit: 10,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get keys: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Keys: %v", keys)
|
||||
}
|
||||
132
pkg/telemetrymetadata/stmt_parse.go
Normal file
132
pkg/telemetrymetadata/stmt_parse.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/AfterShip/clickhouse-sql-parser/parser"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// TelemetryFieldVisitor is an AST visitor for extracting telemetry fields
|
||||
type TelemetryFieldVisitor struct {
|
||||
parser.DefaultASTVisitor
|
||||
Fields []*telemetrytypes.TelemetryFieldKey
|
||||
}
|
||||
|
||||
func NewTelemetryFieldVisitor() *TelemetryFieldVisitor {
|
||||
return &TelemetryFieldVisitor{
|
||||
Fields: make([]*telemetrytypes.TelemetryFieldKey, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// VisitColumnDef is called when visiting a column definition
|
||||
func (v *TelemetryFieldVisitor) VisitColumnDef(expr *parser.ColumnDef) error {
|
||||
// Check if this is a materialized column with DEFAULT expression
|
||||
if expr.DefaultExpr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse column name to extract context and data type
|
||||
columnName := expr.Name.String()
|
||||
|
||||
// Remove backticks if present
|
||||
columnName = strings.TrimPrefix(columnName, "`")
|
||||
columnName = strings.TrimSuffix(columnName, "`")
|
||||
|
||||
// Parse the column name to extract components
|
||||
parts := strings.Split(columnName, "_")
|
||||
if len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
context := parts[0]
|
||||
dataType := parts[1]
|
||||
|
||||
// Check if this is a valid telemetry column
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
switch context {
|
||||
case "resource":
|
||||
fieldContext = telemetrytypes.FieldContextResource
|
||||
case "scope":
|
||||
fieldContext = telemetrytypes.FieldContextScope
|
||||
case "attribute":
|
||||
fieldContext = telemetrytypes.FieldContextAttribute
|
||||
default:
|
||||
return nil // Not a telemetry column
|
||||
}
|
||||
|
||||
// Check and convert data type
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
switch dataType {
|
||||
case "string":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeString
|
||||
case "bool":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeBool
|
||||
case "int", "int64":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
case "float", "float64":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
case "number":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
default:
|
||||
return nil // Unknown data type
|
||||
}
|
||||
|
||||
// Extract field name from the DEFAULT expression
|
||||
// The DEFAULT expression should be something like: resources_string['k8s.cluster.name']
|
||||
// We need to extract the key inside the square brackets
|
||||
defaultExprStr := expr.DefaultExpr.String()
|
||||
|
||||
// Look for the pattern: map['key']
|
||||
startIdx := strings.Index(defaultExprStr, "['")
|
||||
endIdx := strings.Index(defaultExprStr, "']")
|
||||
|
||||
if startIdx == -1 || endIdx == -1 || startIdx+2 >= endIdx {
|
||||
return nil // Invalid DEFAULT expression format
|
||||
}
|
||||
|
||||
fieldName := defaultExprStr[startIdx+2 : endIdx]
|
||||
|
||||
// Create and store the TelemetryFieldKey
|
||||
field := telemetrytypes.TelemetryFieldKey{
|
||||
Name: fieldName,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
Materialized: true,
|
||||
}
|
||||
|
||||
v.Fields = append(v.Fields, &field)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExtractFieldKeysFromTblStatement(statement string) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
// Parse the CREATE TABLE statement using the ClickHouse parser
|
||||
p := parser.NewParser(statement)
|
||||
stmts, err := p.ParseStmts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a visitor to collect telemetry fields
|
||||
visitor := NewTelemetryFieldVisitor()
|
||||
|
||||
// Visit each statement
|
||||
for _, stmt := range stmts {
|
||||
// We're looking for CreateTable statements
|
||||
createTable, ok := stmt.(*parser.CreateTable)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Visit the table schema to extract column definitions
|
||||
if createTable.TableSchema != nil {
|
||||
for _, column := range createTable.TableSchema.Columns {
|
||||
if err := column.Accept(visitor); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return visitor.Fields, nil
|
||||
}
|
||||
148
pkg/telemetrymetadata/stmt_parse_test.go
Normal file
148
pkg/telemetrymetadata/stmt_parse_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func TestExtractFieldKeysFromTblStatement(t *testing.T) {
|
||||
|
||||
var statement = `CREATE TABLE signoz_logs.logs_v2
|
||||
(
|
||||
` + "`ts_bucket_start`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`resource_fingerprint`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`timestamp`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`observed_timestamp`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`trace_id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`span_id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`trace_flags`" + ` UInt32,
|
||||
` + "`severity_text`" + ` LowCardinality(String) CODEC(ZSTD(1)),
|
||||
` + "`severity_number`" + ` UInt8,
|
||||
` + "`body`" + ` String CODEC(ZSTD(2)),
|
||||
` + "`attributes_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`attributes_number`" + ` Map(LowCardinality(String), Float64) CODEC(ZSTD(1)),
|
||||
` + "`attributes_bool`" + ` Map(LowCardinality(String), Bool) CODEC(ZSTD(1)),
|
||||
` + "`resources_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`scope_name`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`scope_version`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`scope_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`attribute_number_input_size`" + ` Int64 DEFAULT attributes_number['input_size'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_number_input_size_exists`" + ` Bool DEFAULT if(mapContains(attributes_number, 'input_size') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$iostream`" + ` String DEFAULT attributes_string['log.iostream'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$iostream_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'log.iostream') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$file$$path`" + ` String DEFAULT attributes_string['log.file.path'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$file$$path_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'log.file.path') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$cluster$$name`" + ` String DEFAULT resources_string['k8s.cluster.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$cluster$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.cluster.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$namespace$$name`" + ` String DEFAULT resources_string['k8s.namespace.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$namespace$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.namespace.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$pod$$name`" + ` String DEFAULT resources_string['k8s.pod.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$pod$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.pod.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$node$$name`" + ` String DEFAULT resources_string['k8s.node.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$node$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.node.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$container$$name`" + ` String DEFAULT resources_string['k8s.container.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$container$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.container.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$deployment$$name`" + ` String DEFAULT resources_string['k8s.deployment.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$deployment$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.deployment.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_processor`" + ` String DEFAULT attributes_string['processor'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_processor_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'processor') != 0, true, false) CODEC(ZSTD(1)),
|
||||
INDEX body_idx lower(body) TYPE ngrambf_v1(4, 60000, 5, 0) GRANULARITY 1,
|
||||
INDEX id_minmax id TYPE minmax GRANULARITY 1,
|
||||
INDEX severity_number_idx severity_number TYPE set(25) GRANULARITY 4,
|
||||
INDEX severity_text_idx severity_text TYPE set(25) GRANULARITY 4,
|
||||
INDEX trace_flags_idx trace_flags TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX scope_name_idx scope_name TYPE tokenbf_v1(10240, 3, 0) GRANULARITY 4,
|
||||
INDEX ` + "`resource_string_k8s$$cluster$$name_idx`" + ` ` + "`resource_string_k8s$$cluster$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$namespace$$name_idx`" + ` ` + "`resource_string_k8s$$namespace$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$pod$$name_idx`" + ` ` + "`resource_string_k8s$$pod$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$node$$name_idx`" + ` ` + "`resource_string_k8s$$node$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$container$$name_idx`" + ` ` + "`resource_string_k8s$$container$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$deployment$$name_idx`" + ` ` + "`resource_string_k8s$$deployment$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX attribute_string_processor_idx attribute_string_processor TYPE bloom_filter(0.01) GRANULARITY 64
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
|
||||
PARTITION BY toDate(timestamp / 1000000000)
|
||||
ORDER BY (ts_bucket_start, resource_fingerprint, severity_text, timestamp, id)
|
||||
TTL toDateTime(timestamp / 1000000000) + toIntervalSecond(2592000)
|
||||
SETTINGS ttl_only_drop_parts = 1, index_granularity = 8192`
|
||||
|
||||
keys, err := ExtractFieldKeysFromTblStatement(statement)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to extract field keys from tbl statement: %v", err)
|
||||
}
|
||||
|
||||
// some expected keys
|
||||
expectedKeys := []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "k8s.pod.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.cluster.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.deployment.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.node.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.container.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "processor",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "input_size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "log.iostream",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "log.file.path",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, key := range expectedKeys {
|
||||
if !slices.ContainsFunc(keys, func(k *telemetrytypes.TelemetryFieldKey) bool {
|
||||
return k.Name == key.Name && k.FieldContext == key.FieldContext && k.FieldDataType == key.FieldDataType && k.Materialized == key.Materialized
|
||||
}) {
|
||||
t.Errorf("expected key %v not found", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
7
pkg/telemetrymetadata/tables.go
Normal file
7
pkg/telemetrymetadata/tables.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package telemetrymetadata
|
||||
|
||||
const (
|
||||
DBName = "signoz_metadata"
|
||||
AttributesMetadataTableName = "distributed_attributes_metadata"
|
||||
AttributesMetadataLocalTableName = "attributes_metadata"
|
||||
)
|
||||
21
pkg/telemetrymetrics/tables.go
Normal file
21
pkg/telemetrymetrics/tables.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package telemetrymetrics
|
||||
|
||||
const (
|
||||
DBName = "signoz_metrics"
|
||||
SamplesV4TableName = "distributed_samples_v4"
|
||||
SamplesV4LocalTableName = "samples_v4"
|
||||
SamplesV4Agg5mTableName = "distributed_samples_v4_agg_5m"
|
||||
SamplesV4Agg5mLocalTableName = "samples_v4_agg_5m"
|
||||
SamplesV4Agg30mTableName = "distributed_samples_v4_agg_30m"
|
||||
SamplesV4Agg30mLocalTableName = "samples_v4_agg_30m"
|
||||
ExpHistogramTableName = "distributed_exp_hist"
|
||||
ExpHistogramLocalTableName = "exp_hist"
|
||||
TimeseriesV4TableName = "distributed_time_series_v4"
|
||||
TimeseriesV4LocalTableName = "time_series_v4"
|
||||
TimeseriesV46hrsTableName = "distributed_time_series_v4_6hrs"
|
||||
TimeseriesV46hrsLocalTableName = "time_series_v4_6hrs"
|
||||
TimeseriesV41dayTableName = "distributed_time_series_v4_1day"
|
||||
TimeseriesV41dayLocalTableName = "time_series_v4_1day"
|
||||
TimeseriesV41weekTableName = "distributed_time_series_v4_1week"
|
||||
TimeseriesV41weekLocalTableName = "time_series_v4_1week"
|
||||
)
|
||||
352
pkg/telemetrytraces/condition_builder.go
Normal file
352
pkg/telemetrytraces/condition_builder.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package telemetrytraces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
indexV3Columns = map[string]*schema.Column{
|
||||
"ts_bucket_start": {Name: "ts_bucket_start", Type: schema.ColumnTypeUInt64},
|
||||
"resource_fingerprint": {Name: "resource_fingerprint", Type: schema.ColumnTypeString},
|
||||
|
||||
// intrinsic columns
|
||||
"timestamp": {Name: "timestamp", Type: schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"}},
|
||||
"trace_id": {Name: "trace_id", Type: schema.FixedStringColumnType{Length: 32}},
|
||||
"span_id": {Name: "span_id", Type: schema.ColumnTypeString},
|
||||
"trace_state": {Name: "trace_state", Type: schema.ColumnTypeString},
|
||||
"parent_span_id": {Name: "parent_span_id", Type: schema.ColumnTypeString},
|
||||
"flags": {Name: "flags", Type: schema.ColumnTypeUInt32},
|
||||
"name": {Name: "name", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"kind": {Name: "kind", Type: schema.ColumnTypeInt8},
|
||||
"kind_string": {Name: "kind_string", Type: schema.ColumnTypeString},
|
||||
"duration_nano": {Name: "duration_nano", Type: schema.ColumnTypeUInt64},
|
||||
"status_code": {Name: "status_code", Type: schema.ColumnTypeInt16},
|
||||
"status_message": {Name: "status_message", Type: schema.ColumnTypeString},
|
||||
"status_code_string": {Name: "status_code_string", Type: schema.ColumnTypeString},
|
||||
|
||||
// attributes columns
|
||||
"attributes_string": {Name: "attributes_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes_number": {Name: "attributes_number", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}},
|
||||
"attributes_bool": {Name: "attributes_bool", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}},
|
||||
"resources_string": {Name: "resources_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
|
||||
"events": {Name: "events", Type: schema.ArrayColumnType{
|
||||
ElementType: schema.ColumnTypeString,
|
||||
}},
|
||||
"links": {Name: "links", Type: schema.ColumnTypeString},
|
||||
// derived columns
|
||||
"response_status_code": {Name: "response_status_code", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"external_http_url": {Name: "external_http_url", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_url": {Name: "http_url", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"external_http_method": {Name: "external_http_method", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_method": {Name: "http_method", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_host": {Name: "http_host", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"db_name": {Name: "db_name", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"db_operation": {Name: "db_operation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"has_error": {Name: "has_error", Type: schema.ColumnTypeBool},
|
||||
"is_remote": {Name: "is_remote", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
// materialized columns
|
||||
"resource_string_service$$name": {Name: "resource_string_service$$name", Type: schema.ColumnTypeString},
|
||||
"attribute_string_http$$route": {Name: "attribute_string_http$$route", Type: schema.ColumnTypeString},
|
||||
"attribute_string_messaging$$system": {Name: "attribute_string_messaging$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_messaging$$operation": {Name: "attribute_string_messaging$$operation", Type: schema.ColumnTypeString},
|
||||
"attribute_string_db$$system": {Name: "attribute_string_db$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$system": {Name: "attribute_string_rpc$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$service": {Name: "attribute_string_rpc$$service", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$method": {Name: "attribute_string_rpc$$method", Type: schema.ColumnTypeString},
|
||||
"attribute_string_peer$$service": {Name: "attribute_string_peer$$service", Type: schema.ColumnTypeString},
|
||||
|
||||
// deprecated intrinsic columns
|
||||
"traceID": {Name: "traceID", Type: schema.FixedStringColumnType{Length: 32}},
|
||||
"spanID": {Name: "spanID", Type: schema.ColumnTypeString},
|
||||
"parentSpanID": {Name: "parentSpanID", Type: schema.ColumnTypeString},
|
||||
"spanKind": {Name: "spanKind", Type: schema.ColumnTypeString},
|
||||
"durationNano": {Name: "durationNano", Type: schema.ColumnTypeUInt64},
|
||||
"statusCode": {Name: "statusCode", Type: schema.ColumnTypeInt16},
|
||||
"statusMessage": {Name: "statusMessage", Type: schema.ColumnTypeString},
|
||||
"statusCodeString": {Name: "statusCodeString", Type: schema.ColumnTypeString},
|
||||
|
||||
// deprecated derived columns
|
||||
"references": {Name: "references", Type: schema.ColumnTypeString},
|
||||
"responseStatusCode": {Name: "responseStatusCode", Type: schema.ColumnTypeString},
|
||||
"externalHttpUrl": {Name: "externalHttpUrl", Type: schema.ColumnTypeString},
|
||||
"httpUrl": {Name: "httpUrl", Type: schema.ColumnTypeString},
|
||||
"externalHttpMethod": {Name: "externalHttpMethod", Type: schema.ColumnTypeString},
|
||||
"httpMethod": {Name: "httpMethod", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"httpHost": {Name: "httpHost", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbName": {Name: "dbName", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbOperation": {Name: "dbOperation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"hasError": {Name: "hasError", Type: schema.ColumnTypeBool},
|
||||
"isRemote": {Name: "isRemote", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"serviceName": {Name: "serviceName", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"httpRoute": {Name: "httpRoute", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"msgSystem": {Name: "msgSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"msgOperation": {Name: "msgOperation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbSystem": {Name: "dbSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcSystem": {Name: "rpcSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcService": {Name: "rpcService", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcMethod": {Name: "rpcMethod", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"peerService": {Name: "peerService", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
|
||||
// materialized exists columns
|
||||
"resource_string_service$$name_exists": {Name: "resource_string_service$$name_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_http$$route_exists": {Name: "attribute_string_http$$route_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_messaging$$system_exists": {Name: "attribute_string_messaging$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_messaging$$operation_exists": {Name: "attribute_string_messaging$$operation_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_db$$system_exists": {Name: "attribute_string_db$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$system_exists": {Name: "attribute_string_rpc$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$service_exists": {Name: "attribute_string_rpc$$service_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$method_exists": {Name: "attribute_string_rpc$$method_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_peer$$service_exists": {Name: "attribute_string_peer$$service_exists", Type: schema.ColumnTypeBool},
|
||||
}
|
||||
)
|
||||
|
||||
// interface check
|
||||
var _ qbtypes.ConditionBuilder = &conditionBuilder{}
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return indexV3Columns["resources_string"], nil
|
||||
case telemetrytypes.FieldContextScope:
|
||||
// we don't have scope data stored in the spans yet
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
switch key.FieldDataType {
|
||||
case telemetrytypes.FieldDataTypeString:
|
||||
return indexV3Columns["attributes_string"], nil
|
||||
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
|
||||
return indexV3Columns["attributes_number"], nil
|
||||
case telemetrytypes.FieldDataTypeBool:
|
||||
return indexV3Columns["attributes_bool"], nil
|
||||
}
|
||||
case telemetrytypes.FieldContextSpan:
|
||||
col, ok := indexV3Columns[key.Name]
|
||||
if !ok {
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
return col, nil
|
||||
}
|
||||
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeInt8,
|
||||
schema.ColumnTypeInt16,
|
||||
schema.ColumnTypeBool,
|
||||
schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"},
|
||||
schema.FixedStringColumnType{Length: 32}:
|
||||
return column.Name, nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
// should not reach here
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThan:
|
||||
return sb.G(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThanOrEq:
|
||||
return sb.GE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThan:
|
||||
return sb.LT(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThanOrEq:
|
||||
return sb.LE(tblFieldName, value), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.Like(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.NotLike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.ILike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.NotILike(tblFieldName, value), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
|
||||
// between and not between
|
||||
case qbtypes.FilterOperatorBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.Between(tblFieldName, values[0], values[1]), nil
|
||||
case qbtypes.FilterOperatorNotBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.NotBetween(tblFieldName, values[0], values[1]), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.In(tblFieldName, values...), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.NotIn(tblFieldName, values...), nil
|
||||
|
||||
// exists and not exists
|
||||
// in the query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
var value any
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.FixedStringColumnType{Length: 32},
|
||||
schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"}:
|
||||
value = ""
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeUInt8,
|
||||
schema.ColumnTypeInt8,
|
||||
schema.ColumnTypeInt16,
|
||||
schema.ColumnTypeBool:
|
||||
value = 0
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if key.Materialized {
|
||||
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
|
||||
}
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("exists operator is not supported for column type %s", column.Type)
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
298
pkg/telemetrytraces/condition_builder_test.go
Normal file
298
pkg/telemetrytraces/condition_builder_test.go
Normal file
@@ -0,0 +1,298 @@
|
||||
package telemetrytraces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple column type - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
expectedResult: "timestamp",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes_number['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes_bool['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resources_string['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Not Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotEqual,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: float64(100),
|
||||
expectedSQL: "attributes_number['request.duration'] > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: float64(1024),
|
||||
expectedSQL: "attributes_number['request.size'] < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp >= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: "invalid",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Between operator - insufficient values",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000)},
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Not Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp NOT BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "admin",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "value",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
10
pkg/telemetrytraces/tables.go
Normal file
10
pkg/telemetrytraces/tables.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package telemetrytraces
|
||||
|
||||
const (
|
||||
DBName = "signoz_traces"
|
||||
SpanIndexV3TableName = "distributed_signoz_index_v3"
|
||||
SpanIndexV3LocalTableName = "signoz_index_v3"
|
||||
TagAttributesV2TableName = "distributed_tag_attributes_v2"
|
||||
TagAttributesV2LocalTableName = "tag_attributes_v2"
|
||||
TopLevelOperationsTableName = "distributed_top_level_operations"
|
||||
)
|
||||
@@ -1,37 +1,246 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type Integration struct {
|
||||
bun.BaseModel `bun:"table:integrations_installed"`
|
||||
type IntegrationUserEmail string
|
||||
|
||||
IntegrationID string `bun:"integration_id,pk,type:text"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
InstalledAt time.Time `bun:"installed_at,default:current_timestamp"`
|
||||
const (
|
||||
AWSIntegrationUserEmail IntegrationUserEmail = "aws-integration@signoz.io"
|
||||
)
|
||||
|
||||
var AllIntegrationUserEmails = []IntegrationUserEmail{
|
||||
AWSIntegrationUserEmail,
|
||||
}
|
||||
|
||||
type CloudIntegrationAccount struct {
|
||||
bun.BaseModel `bun:"table:cloud_integrations_accounts"`
|
||||
// --------------------------------------------------------------------------
|
||||
// Normal integration uses just the installed_integration table
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
CloudProvider string `bun:"cloud_provider,type:text,unique:cloud_provider_id"`
|
||||
ID string `bun:"id,type:text,notnull,unique:cloud_provider_id"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
CloudAccountID string `bun:"cloud_account_id,type:text"`
|
||||
LastAgentReportJSON string `bun:"last_agent_report_json,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,notnull,default:current_timestamp"`
|
||||
RemovedAt time.Time `bun:"removed_at,type:timestamp"`
|
||||
type InstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:installed_integration"`
|
||||
|
||||
Identifiable
|
||||
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
||||
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
|
||||
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type CloudIntegrationServiceConfig struct {
|
||||
bun.BaseModel `bun:"table:cloud_integrations_service_configs"`
|
||||
type InstalledIntegrationConfig map[string]interface{}
|
||||
|
||||
CloudProvider string `bun:"cloud_provider,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
CloudAccountID string `bun:"cloud_account_id,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
ServiceID string `bun:"service_id,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,default:current_timestamp"`
|
||||
// For serializing from db
|
||||
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
||||
filterSetJson, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not serialize integration config to JSON")
|
||||
}
|
||||
return filterSetJson, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Cloud integration uses the cloud_integration table
|
||||
// and cloud_integrations_service table
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
type CloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
|
||||
Config *AccountConfig `json:"config" bun:"config,type:text"`
|
||||
AccountID *string `json:"account_id" bun:"account_id,type:text"`
|
||||
LastAgentReport *AgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
|
||||
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
|
||||
OrgID string `bun:"org_id,type:text,unique:provider_id"`
|
||||
}
|
||||
|
||||
func (a *CloudIntegration) Status() AccountStatus {
|
||||
status := AccountStatus{}
|
||||
if a.LastAgentReport != nil {
|
||||
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
||||
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (a *CloudIntegration) Account() Account {
|
||||
ca := Account{Id: a.ID.StringValue(), Status: a.Status()}
|
||||
|
||||
if a.AccountID != nil {
|
||||
ca.CloudAccountId = *a.AccountID
|
||||
}
|
||||
|
||||
if a.Config != nil {
|
||||
ca.Config = *a.Config
|
||||
} else {
|
||||
ca.Config = DefaultAccountConfig()
|
||||
}
|
||||
return ca
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config AccountConfig `json:"config"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
Integration AccountIntegrationStatus `json:"integration"`
|
||||
}
|
||||
|
||||
type AccountIntegrationStatus struct {
|
||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||
}
|
||||
|
||||
func DefaultAccountConfig() AccountConfig {
|
||||
return AccountConfig{
|
||||
EnabledRegions: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
type AccountConfig struct {
|
||||
EnabledRegions []string `json:"regions"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *AccountConfig) Scan(src any) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *AccountConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud account config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AgentReport struct {
|
||||
TimestampMillis int64 `json:"timestamp_millis"`
|
||||
Data map[string]any `json:"data"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (r *AgentReport) Scan(src any) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, r)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (r *AgentReport) Value() (driver.Value, error) {
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize agent report to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
Config CloudServiceConfig `bun:"config,type:text"`
|
||||
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type CloudServiceLogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceConfig struct {
|
||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *CloudServiceConfig) Scan(src any) error {
|
||||
var data []byte
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
data = src
|
||||
case string:
|
||||
data = []byte(src)
|
||||
default:
|
||||
return fmt.Errorf("tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud service config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,9 @@ package preferencetypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -129,6 +131,24 @@ func NewDefaultPreferenceMap() map[string]Preference {
|
||||
IsDiscreteValues: true,
|
||||
AllowedScopes: []string{"user"},
|
||||
},
|
||||
"TRACES_QUICK_FILTERS_MAPPING": {
|
||||
Key: "TRACES_QUICK_FILTERS_MAPPING",
|
||||
Name: "Quick Filters Mapping for traces",
|
||||
Description: "Structured keys for Quick filters",
|
||||
ValueType: "json",
|
||||
DefaultValue: "[]",
|
||||
IsDiscreteValues: false,
|
||||
AllowedScopes: []string{"org"},
|
||||
},
|
||||
"LOGS_QUICK_FILTERS_MAPPING": {
|
||||
Key: "LOGS_QUICK_FILTERS_MAPPING",
|
||||
Name: "Quick Filters Mapping for logs",
|
||||
Description: "Structured keys for Quick filters",
|
||||
ValueType: "json",
|
||||
DefaultValue: "[]",
|
||||
IsDiscreteValues: false,
|
||||
AllowedScopes: []string{"org"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,6 +242,22 @@ func (p *Preference) IsValidValue(preferenceValue interface{}) error {
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, fmt.Sprintf("the preference value is not in the range specified, min: %v , max:%v", p.Range.Min, p.Range.Max))
|
||||
}
|
||||
}
|
||||
case PreferenceValueTypeAttributeKeys:
|
||||
strVal, ok := preferenceValue.(string)
|
||||
if !ok {
|
||||
return p.ErrorValueTypeMismatch()
|
||||
}
|
||||
|
||||
var parsed []v3.AttributeKey
|
||||
if err := json.Unmarshal([]byte(strVal), &parsed); err != nil {
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid attribute_keys JSON format: %v", err)
|
||||
}
|
||||
|
||||
for _, attr := range parsed {
|
||||
if err := attr.Validate(); err != nil {
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid attribute key: %v", err)
|
||||
}
|
||||
}
|
||||
case PreferenceValueTypeString:
|
||||
_, ok := preferenceValue.(string)
|
||||
if !ok {
|
||||
@@ -275,6 +311,17 @@ func (p *Preference) SanitizeValue(preferenceValue interface{}) interface{} {
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
case PreferenceValueTypeAttributeKeys:
|
||||
switch val := preferenceValue.(type) {
|
||||
case string:
|
||||
var result interface{}
|
||||
if err := json.Unmarshal([]byte(val), &result); err == nil {
|
||||
return result
|
||||
}
|
||||
return []interface{}{}
|
||||
default:
|
||||
return []interface{}{}
|
||||
}
|
||||
default:
|
||||
return preferenceValue
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package preferencetypes
|
||||
|
||||
const (
|
||||
PreferenceValueTypeInteger string = "integer"
|
||||
PreferenceValueTypeFloat string = "float"
|
||||
PreferenceValueTypeString string = "string"
|
||||
PreferenceValueTypeBoolean string = "boolean"
|
||||
PreferenceValueTypeInteger string = "integer"
|
||||
PreferenceValueTypeFloat string = "float"
|
||||
PreferenceValueTypeString string = "string"
|
||||
PreferenceValueTypeBoolean string = "boolean"
|
||||
PreferenceValueTypeAttributeKeys string = "json"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -4,10 +4,17 @@ import (
|
||||
"context"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrColumnNotFound = errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "column not found")
|
||||
ErrBetweenValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values")
|
||||
ErrInValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) in operator requires a list of values")
|
||||
)
|
||||
|
||||
// FilterOperator is the operator for the filter.
|
||||
type FilterOperator int
|
||||
|
||||
|
||||
@@ -27,6 +27,18 @@ type TelemetryFieldKey struct {
|
||||
Materialized bool `json:"materialized,omitempty"`
|
||||
}
|
||||
|
||||
func (f TelemetryFieldKey) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString(fmt.Sprintf("name=%s", f.Name))
|
||||
if f.FieldContext != FieldContextUnspecified {
|
||||
sb.WriteString(fmt.Sprintf(",context=%s", f.FieldContext.String))
|
||||
}
|
||||
if f.FieldDataType != FieldDataTypeUnspecified {
|
||||
sb.WriteString(fmt.Sprintf(",type=%s", f.FieldDataType.StringValue()))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// GetFieldKeyFromKeyText returns a TelemetryFieldKey from a key text.
|
||||
// The key text is expected to be in the format of `fieldContext.fieldName:fieldDataType` in the search query.
|
||||
func GetFieldKeyFromKeyText(key string) TelemetryFieldKey {
|
||||
@@ -86,12 +98,12 @@ func GetFieldKeyFromKeyText(key string) TelemetryFieldKey {
|
||||
return fieldKeySelector
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnName(key TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("%s_%s_%s", key.FieldContext, key.FieldDataType.String, strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
func FieldKeyToMaterializedColumnName(key *TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("%s_%s_%s", key.FieldContext.String, fieldDataTypes[key.FieldDataType.StringValue()].StringValue(), strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnNameForExists(key TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("%s_%s_%s_exists", key.FieldContext, key.FieldDataType.String, strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
func FieldKeyToMaterializedColumnNameForExists(key *TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("%s_%s_%s_exists", key.FieldContext.String, fieldDataTypes[key.FieldDataType.StringValue()].StringValue(), strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
}
|
||||
|
||||
type TelemetryFieldValues struct {
|
||||
@@ -123,3 +135,52 @@ type FieldValueSelector struct {
|
||||
Value string `json:"value"`
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
func DataTypeCollisionHandledFieldName(key *TelemetryFieldKey, value any, tblFieldName string) (string, any) {
|
||||
// This block of code exists to handle the data type collisions
|
||||
// We don't want to fail the requests when there is a key with more than one data type
|
||||
// Let's take an example of `http.status_code`, and consider user sent a string value and number value
|
||||
// When they search for `http.status_code=200`, we will search across both the number columns and string columns
|
||||
// and return the results from both the columns
|
||||
// While we expect user not to send the mixed data types, it inevitably happens
|
||||
// So we handle the data type collisions here
|
||||
switch key.FieldDataType {
|
||||
case FieldDataTypeString:
|
||||
switch value.(type) {
|
||||
case float64:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toFloat64OrNull(%s)`, tblFieldName)
|
||||
case []any:
|
||||
areFloats := true
|
||||
for _, v := range value.([]any) {
|
||||
if _, ok := v.(float64); !ok {
|
||||
areFloats = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if areFloats {
|
||||
tblFieldName = fmt.Sprintf(`toFloat64OrNull(%s)`, tblFieldName)
|
||||
}
|
||||
case bool:
|
||||
// we don't have a toBoolOrNull in ClickHouse, so we need to convert the bool to a string
|
||||
value = fmt.Sprintf("%t", value)
|
||||
case string:
|
||||
// nothing to do
|
||||
}
|
||||
case FieldDataTypeFloat64, FieldDataTypeInt64, FieldDataTypeNumber:
|
||||
switch value.(type) {
|
||||
case string:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toString(%s)`, tblFieldName)
|
||||
case float64:
|
||||
// nothing to do
|
||||
}
|
||||
case FieldDataTypeBool:
|
||||
switch value.(type) {
|
||||
case string:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toString(%s)`, tblFieldName)
|
||||
}
|
||||
}
|
||||
return tblFieldName, value
|
||||
}
|
||||
|
||||
@@ -23,6 +23,14 @@ var (
|
||||
FieldDataTypeNumber = FieldDataType{valuer.NewString("number")}
|
||||
FieldDataTypeUnspecified = FieldDataType{valuer.NewString("")}
|
||||
|
||||
FieldDataTypeArrayString = FieldDataType{valuer.NewString("[]string")}
|
||||
FieldDataTypeArrayFloat64 = FieldDataType{valuer.NewString("[]float64")}
|
||||
FieldDataTypeArrayBool = FieldDataType{valuer.NewString("[]bool")}
|
||||
|
||||
// int64 and number are synonyms for float64
|
||||
FieldDataTypeArrayInt64 = FieldDataType{valuer.NewString("[]int64")}
|
||||
FieldDataTypeArrayNumber = FieldDataType{valuer.NewString("[]number")}
|
||||
|
||||
// Map string representations to FieldDataType values
|
||||
// We want to handle all the possible string representations of the data types.
|
||||
// Even if the user uses some non-standard representation, we want to be able to
|
||||
@@ -53,9 +61,43 @@ var (
|
||||
"double": FieldDataTypeNumber,
|
||||
"decimal": FieldDataTypeNumber,
|
||||
"number": FieldDataTypeNumber,
|
||||
|
||||
// Array types
|
||||
"[]string": FieldDataTypeArrayString,
|
||||
"[]int64": FieldDataTypeArrayInt64,
|
||||
"[]float64": FieldDataTypeArrayFloat64,
|
||||
"[]number": FieldDataTypeArrayNumber,
|
||||
"[]bool": FieldDataTypeArrayBool,
|
||||
|
||||
// c-style array types
|
||||
"string[]": FieldDataTypeArrayString,
|
||||
"int64[]": FieldDataTypeArrayInt64,
|
||||
"float64[]": FieldDataTypeArrayFloat64,
|
||||
"number[]": FieldDataTypeArrayNumber,
|
||||
"bool[]": FieldDataTypeArrayBool,
|
||||
}
|
||||
|
||||
fieldDataTypeToCHDataType = map[FieldDataType]string{
|
||||
FieldDataTypeString: "String",
|
||||
FieldDataTypeBool: "Bool",
|
||||
FieldDataTypeNumber: "Float64",
|
||||
FieldDataTypeInt64: "Int64",
|
||||
FieldDataTypeFloat64: "Float64",
|
||||
|
||||
FieldDataTypeArrayString: "Array(String)",
|
||||
FieldDataTypeArrayInt64: "Array(Int64)",
|
||||
FieldDataTypeArrayFloat64: "Array(Float64)",
|
||||
FieldDataTypeArrayBool: "Array(Bool)",
|
||||
}
|
||||
)
|
||||
|
||||
func (f FieldDataType) CHDataType() string {
|
||||
if chDataType, ok := fieldDataTypeToCHDataType[f]; ok {
|
||||
return chDataType
|
||||
}
|
||||
return "String"
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (f *FieldDataType) UnmarshalJSON(data []byte) error {
|
||||
var str string
|
||||
|
||||
21
pkg/types/telemetrytypes/virtualfield.go
Normal file
21
pkg/types/telemetrytypes/virtualfield.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type VirtualField struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull" json:"name"`
|
||||
Expression string `bun:"expression,type:text,notnull" json:"expression"`
|
||||
Description string `bun:"description,type:text" json:"description"`
|
||||
Signal Signal `bun:"signal,type:text,notnull" json:"signal"`
|
||||
OrgID valuer.UUID `bun:"org_id,type:text,notnull" json:"orgId"`
|
||||
}
|
||||
48
tests/integration/conftest.py
Normal file
48
tests/integration/conftest.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import pytest
|
||||
|
||||
pytest_plugins = [
|
||||
"fixtures.auth",
|
||||
"fixtures.clickhouse",
|
||||
"fixtures.fs",
|
||||
"fixtures.http",
|
||||
"fixtures.migrator",
|
||||
"fixtures.network",
|
||||
"fixtures.postgres",
|
||||
"fixtures.sql",
|
||||
"fixtures.sqlite",
|
||||
"fixtures.zookeeper",
|
||||
"fixtures.signoz",
|
||||
]
|
||||
|
||||
|
||||
def pytest_addoption(parser: pytest.Parser):
|
||||
parser.addoption(
|
||||
"--sqlstore-provider",
|
||||
action="store",
|
||||
default="postgres",
|
||||
help="sqlstore provider",
|
||||
)
|
||||
parser.addoption(
|
||||
"--postgres-version",
|
||||
action="store",
|
||||
default="15",
|
||||
help="postgres version",
|
||||
)
|
||||
parser.addoption(
|
||||
"--clickhouse-version",
|
||||
action="store",
|
||||
default="24.1.2-alpine",
|
||||
help="clickhouse version",
|
||||
)
|
||||
parser.addoption(
|
||||
"--zookeeper-version",
|
||||
action="store",
|
||||
default="3.7.1",
|
||||
help="zookeeper version",
|
||||
)
|
||||
parser.addoption(
|
||||
"--schema-migrator-version",
|
||||
action="store",
|
||||
default="v0.111.38",
|
||||
help="schema migrator version",
|
||||
)
|
||||
0
tests/integration/fixtures/__init__.py
Normal file
0
tests/integration/fixtures/__init__.py
Normal file
44
tests/integration/fixtures/auth.py
Normal file
44
tests/integration/fixtures/auth.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from http import HTTPStatus
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
|
||||
|
||||
@pytest.fixture(name="create_first_user", scope="function")
|
||||
def create_first_user(signoz: types.SigNoz) -> None:
|
||||
def _create_user(name: str, email: str, password: str) -> None:
|
||||
response = requests.post(
|
||||
signoz.self.host_config.get("/api/v1/register"),
|
||||
json={
|
||||
"name": name,
|
||||
"orgId": "",
|
||||
"orgName": "",
|
||||
"email": email,
|
||||
"password": password,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
return _create_user
|
||||
|
||||
|
||||
@pytest.fixture(name="get_jwt_token", scope="module")
|
||||
def get_jwt_token(signoz: types.SigNoz) -> str:
|
||||
def _get_jwt_token(email: str, password: str) -> str:
|
||||
response = requests.post(
|
||||
signoz.self.host_config.get("/api/v1/login"),
|
||||
json={
|
||||
"email": email,
|
||||
"password": password,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
return response.json()["accessJwt"]
|
||||
|
||||
return _get_jwt_token
|
||||
111
tests/integration/fixtures/clickhouse.py
Normal file
111
tests/integration/fixtures/clickhouse.py
Normal file
@@ -0,0 +1,111 @@
|
||||
import os
|
||||
from typing import Any, Generator
|
||||
|
||||
import clickhouse_driver
|
||||
import pytest
|
||||
from testcontainers.clickhouse import ClickHouseContainer
|
||||
from testcontainers.core.container import Network
|
||||
|
||||
from fixtures import types
|
||||
|
||||
|
||||
@pytest.fixture(name="clickhouse", scope="package")
|
||||
def clickhouse(
|
||||
tmpfs: Generator[types.LegacyPath, Any, None],
|
||||
network: Network,
|
||||
zookeeper: types.TestContainerDocker,
|
||||
request: pytest.FixtureRequest,
|
||||
) -> types.TestContainerClickhouse:
|
||||
"""
|
||||
Package-scoped fixture for Clickhouse TestContainer.
|
||||
"""
|
||||
version = request.config.getoption("--clickhouse-version")
|
||||
|
||||
container = ClickHouseContainer(
|
||||
image=f"clickhouse/clickhouse-server:{version}",
|
||||
port=9000,
|
||||
username="signoz",
|
||||
password="password",
|
||||
)
|
||||
|
||||
cluster_config = f"""
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<level>information</level>
|
||||
<formatting>
|
||||
<type>json</type>
|
||||
</formatting>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>3</count>
|
||||
<console>1</console>
|
||||
</logger>
|
||||
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
<replica>01</replica>
|
||||
</macros>
|
||||
|
||||
<zookeeper>
|
||||
<node>
|
||||
<host>{zookeeper.container_config.address}</host>
|
||||
<port>{zookeeper.container_config.port}</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
|
||||
<distributed_ddl>
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
<profile>default</profile>
|
||||
</distributed_ddl>
|
||||
</clickhouse>
|
||||
"""
|
||||
|
||||
tmp_dir = tmpfs("clickhouse")
|
||||
cluster_config_file_path = os.path.join(tmp_dir, "cluster.xml")
|
||||
with open(cluster_config_file_path, "w", encoding="utf-8") as f:
|
||||
f.write(cluster_config)
|
||||
|
||||
container.with_volume_mapping(
|
||||
cluster_config_file_path, "/etc/clickhouse-server/config.d/cluster.xml"
|
||||
)
|
||||
container.with_network(network)
|
||||
container.start()
|
||||
|
||||
connection = clickhouse_driver.connect(
|
||||
user=container.username,
|
||||
password=container.password,
|
||||
host=container.get_container_host_ip(),
|
||||
port=container.get_exposed_port(9000),
|
||||
)
|
||||
|
||||
def stop():
|
||||
connection.close()
|
||||
container.stop(delete_volume=True)
|
||||
|
||||
request.addfinalizer(stop)
|
||||
|
||||
return types.TestContainerClickhouse(
|
||||
container=container,
|
||||
host_config=types.TestContainerUrlConfig(
|
||||
"tcp", container.get_container_host_ip(), container.get_exposed_port(9000)
|
||||
),
|
||||
container_config=types.TestContainerUrlConfig(
|
||||
"tcp", container.get_wrapped_container().name, 9000
|
||||
),
|
||||
conn=connection,
|
||||
env={
|
||||
"SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN": f"tcp://{container.username}:{container.password}@{container.get_wrapped_container().name}:{9000}" # pylint: disable=line-too-long
|
||||
},
|
||||
)
|
||||
15
tests/integration/fixtures/fs.py
Normal file
15
tests/integration/fixtures/fs.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from typing import Any, Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from fixtures import types
|
||||
|
||||
|
||||
@pytest.fixture(scope="package")
|
||||
def tmpfs(
|
||||
tmp_path_factory: pytest.TempPathFactory,
|
||||
) -> Generator[types.LegacyPath, Any, None]:
|
||||
def _tmp(basename: str):
|
||||
return tmp_path_factory.mktemp(basename)
|
||||
|
||||
yield _tmp
|
||||
53
tests/integration/fixtures/http.py
Normal file
53
tests/integration/fixtures/http.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
from testcontainers.core.container import Network
|
||||
from wiremock.client import (
|
||||
Mapping,
|
||||
Mappings,
|
||||
)
|
||||
from wiremock.constants import Config
|
||||
from wiremock.testing.testcontainer import WireMockContainer
|
||||
|
||||
from fixtures import types
|
||||
|
||||
|
||||
@pytest.fixture(name="zeus", scope="package")
|
||||
def zeus(
|
||||
network: Network, request: pytest.FixtureRequest
|
||||
) -> types.TestContainerWiremock:
|
||||
"""
|
||||
Package-scoped fixture for running zeus
|
||||
"""
|
||||
container = WireMockContainer(image="wiremock/wiremock:2.35.1-1", secure=False)
|
||||
container.with_network(network)
|
||||
|
||||
container.start()
|
||||
|
||||
def stop():
|
||||
container.stop(delete_volume=True)
|
||||
|
||||
request.addfinalizer(stop)
|
||||
|
||||
return types.TestContainerWiremock(
|
||||
container=container,
|
||||
host_config=types.TestContainerUrlConfig(
|
||||
"http", container.get_container_host_ip(), container.get_exposed_port(8080)
|
||||
),
|
||||
container_config=types.TestContainerUrlConfig(
|
||||
"http", container.get_wrapped_container().name, 8080
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="make_http_mocks", scope="function")
|
||||
def make_http_mocks():
|
||||
def _make_http_mocks(container: WireMockContainer, mappings: List[Mapping]):
|
||||
Config.base_url = container.get_url("__admin")
|
||||
|
||||
for mapping in mappings:
|
||||
Mappings.create_mapping(mapping=mapping)
|
||||
|
||||
yield _make_http_mocks
|
||||
|
||||
Mappings.delete_all_mappings()
|
||||
55
tests/integration/fixtures/migrator.py
Normal file
55
tests/integration/fixtures/migrator.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import docker
|
||||
import pytest
|
||||
from testcontainers.core.container import Network
|
||||
|
||||
from fixtures import types
|
||||
|
||||
|
||||
@pytest.fixture(name="migrator", scope="package")
|
||||
def migrator(
|
||||
network: Network,
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest,
|
||||
) -> None:
|
||||
"""
|
||||
Package-scoped fixture for running schema migrations.
|
||||
"""
|
||||
version = request.config.getoption("--schema-migrator-version")
|
||||
|
||||
client = docker.from_env()
|
||||
|
||||
container = client.containers.run(
|
||||
image=f"signoz/signoz-schema-migrator:{version}",
|
||||
command=f"sync --replication=true --cluster-name=cluster --up= --dsn={clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN"]}", # pylint: disable=line-too-long
|
||||
detach=True,
|
||||
auto_remove=False,
|
||||
network=network.id,
|
||||
)
|
||||
|
||||
result = container.wait()
|
||||
|
||||
if result["StatusCode"] != 0:
|
||||
logs = container.logs().decode(encoding="utf-8")
|
||||
container.remove()
|
||||
print(logs)
|
||||
raise RuntimeError("failed to run migrations on clickhouse")
|
||||
|
||||
container.remove()
|
||||
|
||||
container = client.containers.run(
|
||||
image=f"signoz/signoz-schema-migrator:{version}",
|
||||
command=f"async --replication=true --cluster-name=cluster --up= --dsn={clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN"]}", # pylint: disable=line-too-long
|
||||
detach=True,
|
||||
auto_remove=False,
|
||||
network=network.id,
|
||||
)
|
||||
|
||||
result = container.wait()
|
||||
|
||||
if result["StatusCode"] != 0:
|
||||
logs = container.logs().decode(encoding="utf-8")
|
||||
container.remove()
|
||||
print(logs)
|
||||
raise RuntimeError("failed to run migrations on clickhouse")
|
||||
|
||||
container.remove()
|
||||
18
tests/integration/fixtures/network.py
Normal file
18
tests/integration/fixtures/network.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import pytest
|
||||
from testcontainers.core.container import Network
|
||||
|
||||
|
||||
@pytest.fixture(name="network", scope="package")
|
||||
def network(request: pytest.FixtureRequest) -> Network:
|
||||
"""
|
||||
Package-Scoped fixture for creating a network
|
||||
"""
|
||||
nw = Network()
|
||||
nw.create()
|
||||
|
||||
def stop():
|
||||
nw.remove()
|
||||
|
||||
request.addfinalizer(stop)
|
||||
|
||||
return nw
|
||||
58
tests/integration/fixtures/postgres.py
Normal file
58
tests/integration/fixtures/postgres.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import psycopg2
|
||||
import pytest
|
||||
from testcontainers.core.container import Network
|
||||
from testcontainers.postgres import PostgresContainer
|
||||
|
||||
from fixtures import types
|
||||
|
||||
|
||||
@pytest.fixture(name="postgres", scope="package")
|
||||
def postgres(
|
||||
network: Network, request: pytest.FixtureRequest
|
||||
) -> types.TestContainerSQL:
|
||||
"""
|
||||
Package-scoped fixture for PostgreSQL TestContainer.
|
||||
"""
|
||||
version = request.config.getoption("--postgres-version")
|
||||
|
||||
container = PostgresContainer(
|
||||
image=f"postgres:{version}",
|
||||
port=5432,
|
||||
username="signoz",
|
||||
password="password",
|
||||
dbname="signoz",
|
||||
driver="psycopg2",
|
||||
network=network.id,
|
||||
)
|
||||
container.start()
|
||||
|
||||
connection = psycopg2.connect(
|
||||
dbname=container.dbname,
|
||||
user=container.username,
|
||||
password=container.password,
|
||||
host=container.get_container_host_ip(),
|
||||
port=container.get_exposed_port(5432),
|
||||
)
|
||||
|
||||
def stop():
|
||||
connection.close()
|
||||
container.stop(delete_volume=True)
|
||||
|
||||
request.addfinalizer(stop)
|
||||
|
||||
return types.TestContainerSQL(
|
||||
container=container,
|
||||
host_config=types.TestContainerUrlConfig(
|
||||
"postgresql",
|
||||
container.get_container_host_ip(),
|
||||
container.get_exposed_port(5432),
|
||||
),
|
||||
container_config=types.TestContainerUrlConfig(
|
||||
"postgresql", container.get_wrapped_container().name, 5432
|
||||
),
|
||||
conn=connection,
|
||||
env={
|
||||
"SIGNOZ_SQLSTORE_PROVIDER": "postgres",
|
||||
"SIGNOZ_SQLSTORE_POSTGRES_DSN": f"postgresql://{container.username}:{container.password}@{container.get_wrapped_container().name}:{5432}/{container.dbname}", # pylint: disable=line-too-long
|
||||
},
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user