Compare commits
15 Commits
v0.78.1
...
chore/rest
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9f5b16c37 | ||
|
|
55837dacb5 | ||
|
|
14248968cb | ||
|
|
a436ae900c | ||
|
|
5dd02a5b8e | ||
|
|
c0f01e4cb9 | ||
|
|
fed84cb50a | ||
|
|
80545c4d07 | ||
|
|
0b1faec092 | ||
|
|
ba6f31b1c3 | ||
|
|
eed92978a4 | ||
|
|
41cbd316b5 | ||
|
|
8d7d33393d | ||
|
|
8d143b44b1 | ||
|
|
423aebd6eb |
18
.github/workflows/build-community.yaml
vendored
@@ -2,10 +2,9 @@ name: build-community
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -19,7 +18,6 @@ jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
@@ -38,7 +36,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: ${{ inputs.PRIMUS_REF }}
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
@@ -47,14 +45,6 @@ jobs:
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
run: |
|
||||
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "providers=gcp" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
@@ -88,4 +78,4 @@ jobs:
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
|
||||
12
.github/workflows/build-enterprise.yaml
vendored
@@ -2,8 +2,6 @@ name: build-enterprise
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
@@ -38,7 +36,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: ${{ inputs.PRIMUS_REF }}
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
@@ -75,7 +73,7 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: enterprise-dotenv-${{ github.sha }}
|
||||
key: dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
@@ -83,9 +81,9 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
@@ -95,7 +93,7 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
|
||||
131
.github/workflows/build-staging.yaml
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
name: build-staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
deployment: ${{ steps.build-info.outputs.deployment }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
|
||||
staging_label="${{ github.event.label.name }}"
|
||||
if [[ "${staging_label}" == "staging:"* ]]; then
|
||||
deployment=${staging_label#"staging:"}
|
||||
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
|
||||
deployment="staging"
|
||||
else
|
||||
echo "error: not able to determine deployment - please verify the PR label or the branch"
|
||||
exit 1
|
||||
fi
|
||||
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [prepare, go-build]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_ENVIRONMENT: staging
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
13
.github/workflows/staging-deployment.yaml
vendored
@@ -36,12 +36,17 @@ jobs:
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
docker system prune --force --all
|
||||
OTELCOL_TAG=$(curl -s https://api.github.com/repos/SigNoz/signoz-otel-collector/releases/latest | jq -r '.tag_name // "not-found"')
|
||||
if [[ "${OTELCOL_TAG}" == "not-found" ]]; then
|
||||
echo "warning: unable to determine latest SigNoz OtelCollector release tag, skipping latest otelcol deployment"
|
||||
else
|
||||
export OTELCOL_TAG=${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-otel-collector:${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-schema-migrator:${OTELCOL_TAG}
|
||||
fi
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
docker system prune --force
|
||||
docker system prune --force --all
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
@@ -162,12 +161,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
||||
// find domain from email
|
||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
var emailDomain string
|
||||
emailComponents := strings.Split(email, "@")
|
||||
if len(emailComponents) > 0 {
|
||||
emailDomain = emailComponents[1]
|
||||
}
|
||||
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
|
||||
zap.L().Error("failed to get org domain from email", zap.String("email", email), zap.Error(apierr.ToError()))
|
||||
return resp, apierr
|
||||
}
|
||||
|
||||
|
||||
@@ -11,9 +11,12 @@ const logEvent = async (
|
||||
rateLimited?: boolean,
|
||||
): Promise<SuccessResponse<EventSuccessPayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
// add tenant_url to attributes
|
||||
const { hostname } = window.location;
|
||||
const updatedAttributes = { ...attributes, tenant_url: hostname };
|
||||
const response = await axios.post('/event', {
|
||||
eventName,
|
||||
attributes,
|
||||
attributes: updatedAttributes,
|
||||
eventType: eventType || 'track',
|
||||
rateLimited: rateLimited || false, // TODO: Update this once we have a proper way to handle rate limiting
|
||||
});
|
||||
|
||||
@@ -8,6 +8,5 @@ export enum FeatureKeys {
|
||||
PREMIUM_SUPPORT = 'PREMIUM_SUPPORT',
|
||||
ANOMALY_DETECTION = 'ANOMALY_DETECTION',
|
||||
ONBOARDING_V3 = 'ONBOARDING_V3',
|
||||
THIRD_PARTY_API = 'THIRD_PARTY_API',
|
||||
TRACE_FUNNELS = 'TRACE_FUNNELS',
|
||||
}
|
||||
|
||||
@@ -284,16 +284,6 @@ function SideNav(): JSX.Element {
|
||||
manageLicenseMenuItem,
|
||||
];
|
||||
|
||||
const isApiMonitoringEnabled = featureFlags?.find(
|
||||
(flag) => flag.name === FeatureKeys.THIRD_PARTY_API,
|
||||
)?.active;
|
||||
|
||||
if (!isApiMonitoringEnabled) {
|
||||
updatedMenuItems = updatedMenuItems.filter(
|
||||
(item) => item.key !== ROUTES.API_MONITORING,
|
||||
);
|
||||
}
|
||||
|
||||
if (isCloudUser || isEnterpriseSelfHostedUser) {
|
||||
const isOnboardingEnabled =
|
||||
featureFlags?.find((feature) => feature.name === FeatureKeys.ONBOARDING)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
@@ -27,7 +28,7 @@ func validateCloudProviderName(name string) *model.ApiError {
|
||||
|
||||
type Controller struct {
|
||||
accountsRepo cloudProviderAccountsRepository
|
||||
serviceConfigRepo serviceConfigRepository
|
||||
serviceConfigRepo ServiceConfigDatabase
|
||||
}
|
||||
|
||||
func NewController(sqlStore sqlstore.SQLStore) (
|
||||
@@ -200,7 +201,7 @@ type AgentCheckInResponse struct {
|
||||
type IntegrationConfigForAgent struct {
|
||||
EnabledRegions []string `json:"enabled_regions"`
|
||||
|
||||
TelemetryCollectionStrategy *CloudTelemetryCollectionStrategy `json:"telemetry,omitempty"`
|
||||
TelemetryCollectionStrategy *CompiledCollectionStrategy `json:"telemetry,omitempty"`
|
||||
}
|
||||
|
||||
func (c *Controller) CheckInAsAgent(
|
||||
@@ -239,7 +240,7 @@ func (c *Controller) CheckInAsAgent(
|
||||
}
|
||||
|
||||
// prepare and return integration config to be consumed by agent
|
||||
telemetryCollectionStrategy, err := NewCloudTelemetryCollectionStrategy(cloudProvider)
|
||||
compliedStrategy, err := NewCompiledCollectionStrategy(cloudProvider)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't init telemetry collection strategy: %w", err,
|
||||
@@ -248,21 +249,17 @@ func (c *Controller) CheckInAsAgent(
|
||||
|
||||
agentConfig := IntegrationConfigForAgent{
|
||||
EnabledRegions: []string{},
|
||||
TelemetryCollectionStrategy: telemetryCollectionStrategy,
|
||||
TelemetryCollectionStrategy: compliedStrategy,
|
||||
}
|
||||
|
||||
if account.Config != nil && account.Config.EnabledRegions != nil {
|
||||
agentConfig.EnabledRegions = account.Config.EnabledRegions
|
||||
}
|
||||
|
||||
services, apiErr := listCloudProviderServices(cloudProvider)
|
||||
services, apiErr := services.Map(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
svcDetailsById := map[string]*CloudServiceDetails{}
|
||||
for _, svcDetails := range services {
|
||||
svcDetailsById[svcDetails.Id] = &svcDetails
|
||||
}
|
||||
|
||||
svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *account.CloudAccountId,
|
||||
@@ -278,22 +275,16 @@ func (c *Controller) CheckInAsAgent(
|
||||
slices.Sort(configuredSvcIds)
|
||||
|
||||
for _, svcId := range configuredSvcIds {
|
||||
svcDetails := svcDetailsById[svcId]
|
||||
svcConfig := svcConfigs[svcId]
|
||||
definition, ok := services[svcId]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
config := svcConfigs[svcId]
|
||||
|
||||
if svcDetails != nil {
|
||||
metricsEnabled := svcConfig.Metrics != nil && svcConfig.Metrics.Enabled
|
||||
logsEnabled := svcConfig.Logs != nil && svcConfig.Logs.Enabled
|
||||
if logsEnabled || metricsEnabled {
|
||||
err := agentConfig.TelemetryCollectionStrategy.AddServiceStrategy(
|
||||
svcDetails.TelemetryCollectionStrategy, logsEnabled, metricsEnabled,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't add service telemetry collection strategy: %w", err,
|
||||
))
|
||||
}
|
||||
}
|
||||
err := AddServiceStrategy(compliedStrategy, definition.Strategy, config)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(
|
||||
fmt.Errorf("couldn't add service telemetry collection strategy: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -355,7 +346,7 @@ func (c *Controller) DisconnectAccount(
|
||||
}
|
||||
|
||||
type ListServicesResponse struct {
|
||||
Services []CloudServiceSummary `json:"services"`
|
||||
Services []ServiceSummary `json:"services"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListServices(
|
||||
@@ -363,17 +354,16 @@ func (c *Controller) ListServices(
|
||||
cloudProvider string,
|
||||
cloudAccountId *string,
|
||||
) (*ListServicesResponse, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
services, apiErr := listCloudProviderServices(cloudProvider)
|
||||
definitions, apiErr := services.List(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
|
||||
svcConfigs := map[string]*CloudServiceConfig{}
|
||||
svcConfigs := map[string]*ServiceConfig{}
|
||||
if cloudAccountId != nil {
|
||||
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *cloudAccountId,
|
||||
@@ -385,9 +375,11 @@ func (c *Controller) ListServices(
|
||||
}
|
||||
}
|
||||
|
||||
summaries := []CloudServiceSummary{}
|
||||
for _, s := range services {
|
||||
summary := s.CloudServiceSummary
|
||||
summaries := []ServiceSummary{}
|
||||
for _, def := range definitions {
|
||||
summary := ServiceSummary{
|
||||
Metadata: def.Metadata,
|
||||
}
|
||||
summary.Config = svcConfigs[summary.Id]
|
||||
|
||||
summaries = append(summaries, summary)
|
||||
@@ -403,17 +395,21 @@ func (c *Controller) GetServiceDetails(
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
cloudAccountId *string,
|
||||
) (*CloudServiceDetails, *model.ApiError) {
|
||||
) (*ServiceDetails, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
service, apiErr := getCloudProviderService(cloudProvider, serviceId)
|
||||
definition, apiErr := services.GetServiceDefinition(cloudProvider, serviceId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
details := ServiceDetails{
|
||||
Definition: *definition,
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
config, apiErr := c.serviceConfigRepo.get(
|
||||
ctx, cloudProvider, *cloudAccountId, serviceId,
|
||||
@@ -423,15 +419,15 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
|
||||
if config != nil {
|
||||
service.Config = config
|
||||
details.Config = config
|
||||
|
||||
if config.Metrics != nil && config.Metrics.Enabled {
|
||||
// add links to service dashboards, making them clickable.
|
||||
for i, d := range service.Assets.Dashboards {
|
||||
for i, d := range details.Assets.Dashboards {
|
||||
dashboardUuid := c.dashboardUuid(
|
||||
cloudProvider, serviceId, d.Id,
|
||||
)
|
||||
service.Assets.Dashboards[i].Url = fmt.Sprintf(
|
||||
details.Assets.Dashboards[i].Url = fmt.Sprintf(
|
||||
"/dashboard/%s", dashboardUuid,
|
||||
)
|
||||
}
|
||||
@@ -439,17 +435,17 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
}
|
||||
|
||||
return service, nil
|
||||
return &details, nil
|
||||
}
|
||||
|
||||
type UpdateServiceConfigRequest struct {
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config ServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type UpdateServiceConfigResponse struct {
|
||||
Id string `json:"id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
Id string `json:"id"`
|
||||
Config ServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateServiceConfig(
|
||||
@@ -458,7 +454,6 @@ func (c *Controller) UpdateServiceConfig(
|
||||
serviceId string,
|
||||
req UpdateServiceConfigRequest,
|
||||
) (*UpdateServiceConfigResponse, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -472,7 +467,7 @@ func (c *Controller) UpdateServiceConfig(
|
||||
}
|
||||
|
||||
// can only update config for a valid service.
|
||||
_, apiErr = getCloudProviderService(cloudProvider, serviceId)
|
||||
_, apiErr = services.GetServiceDefinition(cloudProvider, serviceId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "unsupported service")
|
||||
}
|
||||
@@ -540,7 +535,7 @@ func (c *Controller) AvailableDashboardsForCloudProvider(
|
||||
}
|
||||
}
|
||||
|
||||
allServices, apiErr := listCloudProviderServices(cloudProvider)
|
||||
allServices, apiErr := services.List(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
@@ -182,8 +182,8 @@ func TestConfigureService(t *testing.T) {
|
||||
require.NotNil(testConnectedAccount.CloudAccountId)
|
||||
require.Equal(testCloudAccountId, *testConnectedAccount.CloudAccountId)
|
||||
|
||||
testSvcConfig := CloudServiceConfig{
|
||||
Metrics: &CloudServiceMetricsConfig{
|
||||
testSvcConfig := ServiceConfig{
|
||||
Metrics: &MetricsConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -6,9 +6,22 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
)
|
||||
|
||||
type ServiceSummary struct {
|
||||
services.Metadata
|
||||
|
||||
Config *ServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type ServiceDetails struct {
|
||||
services.Definition
|
||||
|
||||
Config *ServiceConfig `json:"config"`
|
||||
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// Represents a cloud provider account for cloud integrations
|
||||
type AccountRecord struct {
|
||||
CloudProvider string `json:"cloud_provider" db:"cloud_provider"`
|
||||
@@ -118,39 +131,13 @@ func (a *AccountRecord) account() Account {
|
||||
return ca
|
||||
}
|
||||
|
||||
type CloudServiceSummary struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Icon string `json:"icon"`
|
||||
|
||||
// Present only if the service has been configured in the
|
||||
// context of a cloud provider account.
|
||||
Config *CloudServiceConfig `json:"config,omitempty"`
|
||||
type ServiceConfig struct {
|
||||
Logs *LogsConfig `json:"logs,omitempty"`
|
||||
Metrics *MetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
type CloudServiceDetails struct {
|
||||
CloudServiceSummary
|
||||
|
||||
Overview string `json:"overview"` // markdown
|
||||
|
||||
Assets CloudServiceAssets `json:"assets"`
|
||||
|
||||
SupportedSignals SupportedSignals `json:"supported_signals"`
|
||||
|
||||
DataCollected DataCollectedForService `json:"data_collected"`
|
||||
|
||||
ConnectionStatus *CloudServiceConnectionStatus `json:"status,omitempty"`
|
||||
|
||||
TelemetryCollectionStrategy *CloudTelemetryCollectionStrategy `json:"telemetry_collection_strategy"`
|
||||
}
|
||||
|
||||
type CloudServiceConfig struct {
|
||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *CloudServiceConfig) Scan(src any) error {
|
||||
// Serialization from db
|
||||
func (c *ServiceConfig) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
@@ -159,8 +146,8 @@ func (c *CloudServiceConfig) Scan(src any) error {
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
// Serializing to db
|
||||
func (c *ServiceConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -174,51 +161,16 @@ func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudServiceLogsConfig struct {
|
||||
type LogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
||||
}
|
||||
|
||||
type MetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceAssets struct {
|
||||
Dashboards []CloudServiceDashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
type CloudServiceDashboard struct {
|
||||
Id string `json:"id"`
|
||||
Url string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Image string `json:"image"`
|
||||
Definition *types.DashboardData `json:"definition,omitempty"`
|
||||
}
|
||||
|
||||
type SupportedSignals struct {
|
||||
Logs bool `json:"logs"`
|
||||
Metrics bool `json:"metrics"`
|
||||
}
|
||||
|
||||
type DataCollectedForService struct {
|
||||
Logs []CollectedLogAttribute `json:"logs"`
|
||||
Metrics []CollectedMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
type CollectedLogAttribute struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type CollectedMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CloudServiceConnectionStatus struct {
|
||||
type ServiceConnectionStatus struct {
|
||||
Logs *SignalConnectionStatus `json:"logs"`
|
||||
Metrics *SignalConnectionStatus `json:"metrics"`
|
||||
}
|
||||
@@ -228,23 +180,14 @@ type SignalConnectionStatus struct {
|
||||
LastReceivedFrom string `json:"last_received_from"` // resource identifier
|
||||
}
|
||||
|
||||
type CloudTelemetryCollectionStrategy struct {
|
||||
Provider string `json:"provider"`
|
||||
type CompiledCollectionStrategy = services.CollectionStrategy
|
||||
|
||||
AWSMetrics *AWSMetricsCollectionStrategy `json:"aws_metrics,omitempty"`
|
||||
AWSLogs *AWSLogsCollectionStrategy `json:"aws_logs,omitempty"`
|
||||
}
|
||||
|
||||
func NewCloudTelemetryCollectionStrategy(provider string) (*CloudTelemetryCollectionStrategy, error) {
|
||||
func NewCompiledCollectionStrategy(provider string) (*CompiledCollectionStrategy, error) {
|
||||
if provider == "aws" {
|
||||
return &CloudTelemetryCollectionStrategy{
|
||||
Provider: "aws",
|
||||
AWSMetrics: &AWSMetricsCollectionStrategy{
|
||||
CloudwatchMetricsStreamFilters: []CloudwatchMetricStreamFilter{},
|
||||
},
|
||||
AWSLogs: &AWSLogsCollectionStrategy{
|
||||
CloudwatchLogsSubscriptions: []CloudwatchLogsSubscriptionConfig{},
|
||||
},
|
||||
return &CompiledCollectionStrategy{
|
||||
Provider: "aws",
|
||||
AWSMetrics: &services.AWSMetricsStrategy{},
|
||||
AWSLogs: &services.AWSLogsStrategy{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -252,24 +195,27 @@ func NewCloudTelemetryCollectionStrategy(provider string) (*CloudTelemetryCollec
|
||||
}
|
||||
|
||||
// Helper for accumulating strategies for enabled services.
|
||||
func (cs *CloudTelemetryCollectionStrategy) AddServiceStrategy(
|
||||
svcStrategy *CloudTelemetryCollectionStrategy,
|
||||
logsEnabled bool,
|
||||
metricsEnabled bool,
|
||||
) error {
|
||||
if svcStrategy.Provider != cs.Provider {
|
||||
func AddServiceStrategy(cs *CompiledCollectionStrategy,
|
||||
definitionStrat *services.CollectionStrategy, config *ServiceConfig) error {
|
||||
if definitionStrat.Provider != cs.Provider {
|
||||
return fmt.Errorf(
|
||||
"can't add %s service strategy to strategy for %s",
|
||||
svcStrategy.Provider, cs.Provider,
|
||||
definitionStrat.Provider, cs.Provider,
|
||||
)
|
||||
}
|
||||
|
||||
if cs.Provider == "aws" {
|
||||
if logsEnabled {
|
||||
cs.AWSLogs.AddServiceStrategy(svcStrategy.AWSLogs)
|
||||
if config.Logs != nil && config.Logs.Enabled && definitionStrat.AWSLogs != nil {
|
||||
cs.AWSLogs.Subscriptions = append(
|
||||
cs.AWSLogs.Subscriptions,
|
||||
definitionStrat.AWSLogs.Subscriptions...,
|
||||
)
|
||||
}
|
||||
if metricsEnabled {
|
||||
cs.AWSMetrics.AddServiceStrategy(svcStrategy.AWSMetrics)
|
||||
if config.Metrics != nil && config.Metrics.Enabled && definitionStrat.AWSMetrics != nil {
|
||||
cs.AWSMetrics.StreamFilters = append(
|
||||
cs.AWSMetrics.StreamFilters,
|
||||
definitionStrat.AWSMetrics.StreamFilters...,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -277,57 +223,3 @@ func (cs *CloudTelemetryCollectionStrategy) AddServiceStrategy(
|
||||
return fmt.Errorf("unsupported cloud provider: %s", cs.Provider)
|
||||
|
||||
}
|
||||
|
||||
type AWSMetricsCollectionStrategy struct {
|
||||
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
||||
CloudwatchMetricsStreamFilters []CloudwatchMetricStreamFilter `json:"cloudwatch_metric_stream_filters"`
|
||||
}
|
||||
|
||||
type CloudwatchMetricStreamFilter struct {
|
||||
// json tags here are in the shape expected by AWS API as detailed at
|
||||
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
}
|
||||
|
||||
func (amc *AWSMetricsCollectionStrategy) AddServiceStrategy(
|
||||
svcStrategy *AWSMetricsCollectionStrategy,
|
||||
) error {
|
||||
if svcStrategy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
amc.CloudwatchMetricsStreamFilters = append(
|
||||
amc.CloudwatchMetricsStreamFilters,
|
||||
svcStrategy.CloudwatchMetricsStreamFilters...,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
type AWSLogsCollectionStrategy struct {
|
||||
CloudwatchLogsSubscriptions []CloudwatchLogsSubscriptionConfig `json:"cloudwatch_logs_subscriptions"`
|
||||
}
|
||||
|
||||
type CloudwatchLogsSubscriptionConfig struct {
|
||||
// subscribe to all logs groups with specified prefix.
|
||||
// eg: `/aws/rds/`
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||
// "" implies no filtering is required.
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
}
|
||||
|
||||
func (alc *AWSLogsCollectionStrategy) AddServiceStrategy(
|
||||
svcStrategy *AWSLogsCollectionStrategy,
|
||||
) error {
|
||||
if svcStrategy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
alc.CloudwatchLogsSubscriptions = append(
|
||||
alc.CloudwatchLogsSubscriptions,
|
||||
svcStrategy.CloudwatchLogsSubscriptions...,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,28 +9,28 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type serviceConfigRepository interface {
|
||||
type ServiceConfigDatabase interface {
|
||||
get(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
) (*ServiceConfig, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
config ServiceConfig,
|
||||
) (*ServiceConfig, *model.ApiError)
|
||||
|
||||
getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
) (
|
||||
configsBySvcId map[string]*CloudServiceConfig,
|
||||
configsBySvcId map[string]*ServiceConfig,
|
||||
apiErr *model.ApiError,
|
||||
)
|
||||
}
|
||||
@@ -52,9 +52,9 @@ func (r *serviceConfigSQLRepository) get(
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
) (*ServiceConfig, *model.ApiError) {
|
||||
|
||||
var result CloudServiceConfig
|
||||
var result ServiceConfig
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
@@ -68,17 +68,13 @@ func (r *serviceConfigSQLRepository) get(
|
||||
`,
|
||||
cloudProvider, cloudAccountId, serviceId,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(
|
||||
fmt.Errorf("couldn't find %s %s config for %s", cloudProvider, serviceId, cloudAccountId))
|
||||
}
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find %s %s config for %s",
|
||||
cloudProvider, serviceId, cloudAccountId,
|
||||
))
|
||||
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud service config: %w", err,
|
||||
))
|
||||
return nil, model.InternalError(fmt.Errorf("couldn't query cloud service config: %w", err))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
@@ -90,8 +86,8 @@ func (r *serviceConfigSQLRepository) upsert(
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
config ServiceConfig,
|
||||
) (*ServiceConfig, *model.ApiError) {
|
||||
|
||||
query := `
|
||||
INSERT INTO cloud_integrations_service_configs (
|
||||
@@ -128,11 +124,11 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
) (map[string]*CloudServiceConfig, *model.ApiError) {
|
||||
) (map[string]*ServiceConfig, *model.ApiError) {
|
||||
|
||||
type ScannedServiceConfigRecord struct {
|
||||
ServiceId string `db:"service_id"`
|
||||
Config CloudServiceConfig `db:"config_json"`
|
||||
ServiceId string `db:"service_id"`
|
||||
Config ServiceConfig `db:"config_json"`
|
||||
}
|
||||
|
||||
records := []ScannedServiceConfigRecord{}
|
||||
@@ -155,7 +151,7 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]*CloudServiceConfig{}
|
||||
result := map[string]*ServiceConfig{}
|
||||
|
||||
for _, r := range records {
|
||||
result[r.ServiceId] = &r.Config
|
||||
|
Before Width: | Height: | Size: 125 KiB After Width: | Height: | Size: 125 KiB |
|
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
|
Before Width: | Height: | Size: 198 KiB After Width: | Height: | Size: 198 KiB |
|
Before Width: | Height: | Size: 2.4 KiB After Width: | Height: | Size: 2.4 KiB |
|
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 94 KiB |
|
Before Width: | Height: | Size: 805 B After Width: | Height: | Size: 805 B |
|
Before Width: | Height: | Size: 131 KiB After Width: | Height: | Size: 131 KiB |
|
Before Width: | Height: | Size: 965 B After Width: | Height: | Size: 965 B |
|
Before Width: | Height: | Size: 371 KiB After Width: | Height: | Size: 371 KiB |
|
Before Width: | Height: | Size: 6.0 KiB After Width: | Height: | Size: 6.0 KiB |
|
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |
|
Before Width: | Height: | Size: 2.7 KiB After Width: | Height: | Size: 2.7 KiB |
@@ -0,0 +1 @@
|
||||
<svg width="256" height="256" viewBox="0 0 256 256" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" preserveAspectRatio="xMidYMid"><title>AWS Simple Storage Service (S3)</title><defs><linearGradient x1="0%" y1="100%" x2="100%" y2="0%" id="a"><stop stop-color="#1B660F" offset="0%"/><stop stop-color="#6CAE3E" offset="100%"/></linearGradient></defs><g><rect fill="url(#a)" width="256" height="256"/><path d="M194.67488,137.25632 L195.90368,128.60352 C207.23488,135.39072 207.38208,138.19392 207.378964,138.27072 C207.35968,138.28672 205.42688,139.89952 194.67488,137.25632 L194.67488,137.25632 Z M188.45728,135.52832 C168.87328,129.60192 141.59968,117.08992 130.56288,111.87392 C130.56288,111.82912 130.57568,111.78752 130.57568,111.74272 C130.57568,107.50272 127.12608,104.05312 122.88288,104.05312 C118.64608,104.05312 115.19648,107.50272 115.19648,111.74272 C115.19648,115.98272 118.64608,119.43232 122.88288,119.43232 C124.74528,119.43232 126.43488,118.73792 127.76928,117.63392 C140.75488,123.78112 167.81728,136.11072 187.54528,141.93472 L179.74368,196.99392 C179.72128,197.14432 179.71168,197.29472 179.71168,197.44512 C179.71168,202.29312 158.24928,211.19872 123.18048,211.19872 C87.74048,211.19872 66.05088,202.29312 66.05088,197.44512 C66.05088,197.29792 66.04128,197.15392 66.02208,197.00992 L49.72128,77.94752 C63.83008,87.65952 94.17568,92.79872 123.19968,92.79872 C152.17888,92.79872 182.47328,87.67872 196.61088,77.99552 L188.45728,135.52832 Z M47.99968,65.52832 C48.23008,61.31712 72.42848,44.79872 123.19968,44.79872 C173.96448,44.79872 198.16608,61.31392 198.39968,65.52832 L198.39968,66.96512 C195.61568,76.40832 164.25568,86.39872 123.19968,86.39872 C82.07328,86.39872 50.69728,76.37632 47.99968,66.92032 L47.99968,65.52832 Z M204.79968,65.59872 C204.79968,54.51072 173.01088,38.39872 123.19968,38.39872 C73.38848,38.39872 41.59968,54.51072 41.59968,65.59872 L41.90048,68.01152 L59.65408,197.68832 C60.07968,212.19072 98.75488,217.59872 123.18048,217.59872 C153.49088,217.59872 185.69248,210.62912 186.10848,197.69792 L193.77568,143.62752 C198.04128,144.64832 201.55168,145.16992 204.37088,145.16992 C208.15648,145.16992 210.71648,144.24512 212.26848,142.39552 C213.54208,140.87872 214.02848,139.04192 213.66368,137.08672 C212.83488,132.65792 207.57728,127.88352 196.87008,121.77472 L204.47328,68.13632 L204.79968,65.59872 Z" fill="#FFFFFF"/></g></svg>
|
||||
|
After Width: | Height: | Size: 2.3 KiB |
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"id": "s3sync",
|
||||
"title": "S3 Sync",
|
||||
"icon": "file://icon.svg",
|
||||
"overview": "file://overview.md",
|
||||
"supported_signals": {
|
||||
"metrics": false,
|
||||
"logs": true
|
||||
},
|
||||
"data_collected": {
|
||||
"logs": [
|
||||
{
|
||||
"name": "Account ID",
|
||||
"path": "resources.aws.account.id",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Account Region",
|
||||
"path": "resources.aws.account.region",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Bucket Name",
|
||||
"path": "resources.aws.bucket.name",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Object Key",
|
||||
"path": "attributes.aws.object.key",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Object S3 Event Time",
|
||||
"path": "attributes.aws.object.event_time",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Log line number in source file",
|
||||
"path": "attributes.log.source.line",
|
||||
"type": "number"
|
||||
}
|
||||
]
|
||||
},
|
||||
"telemetry_collection_strategy": {
|
||||
},
|
||||
"assets": {
|
||||
"dashboards": []
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
### Sync logs stored in AWS S3 with SigNoz
|
||||
|
||||
Collect logs stored by AWS Services in S3 and explore them in Signoz.
|
||||
94
pkg/query-service/app/cloudintegrations/services/models.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
type Metadata struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Icon string `json:"icon"`
|
||||
|
||||
// Present only if the service has been configured in the
|
||||
// context of a cloud provider account.
|
||||
// Config *CloudServiceConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
type Definition struct {
|
||||
Metadata
|
||||
|
||||
Overview string `json:"overview"` // markdown
|
||||
|
||||
Assets Assets `json:"assets"`
|
||||
|
||||
SupportedSignals SupportedSignals `json:"supported_signals"`
|
||||
|
||||
DataCollected DataCollected `json:"data_collected"`
|
||||
|
||||
Strategy *CollectionStrategy `json:"telemetry_collection_strategy"`
|
||||
}
|
||||
|
||||
type Assets struct {
|
||||
Dashboards []Dashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
type SupportedSignals struct {
|
||||
Logs bool `json:"logs"`
|
||||
Metrics bool `json:"metrics"`
|
||||
}
|
||||
|
||||
type DataCollected struct {
|
||||
Logs []CollectedLogAttribute `json:"logs"`
|
||||
Metrics []CollectedMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
type CollectedLogAttribute struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type CollectedMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CollectionStrategy struct {
|
||||
Provider string `json:"provider"`
|
||||
|
||||
AWSMetrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
|
||||
AWSLogs *AWSLogsStrategy `json:"aws_logs,omitempty"`
|
||||
}
|
||||
|
||||
type AWSMetricsStrategy struct {
|
||||
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
||||
StreamFilters []struct {
|
||||
// json tags here are in the shape expected by AWS API as detailed at
|
||||
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
} `json:"cloudwatch_metric_stream_filters"`
|
||||
}
|
||||
|
||||
type AWSLogsStrategy struct {
|
||||
Subscriptions []struct {
|
||||
// subscribe to all logs groups with specified prefix.
|
||||
// eg: `/aws/rds/`
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||
// "" implies no filtering is required.
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
} `json:"cloudwatch_logs_subscriptions"`
|
||||
}
|
||||
|
||||
type Dashboard struct {
|
||||
Id string `json:"id"`
|
||||
Url string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Image string `json:"image"`
|
||||
Definition *types.DashboardData `json:"definition,omitempty"`
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package cloudintegrations
|
||||
package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -15,11 +15,11 @@ import (
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func listCloudProviderServices(
|
||||
func List(
|
||||
cloudProvider string,
|
||||
) ([]CloudServiceDetails, *model.ApiError) {
|
||||
cloudServices := availableServices[cloudProvider]
|
||||
if cloudServices == nil {
|
||||
) ([]Definition, *model.ApiError) {
|
||||
cloudServices, found := supportedServices[cloudProvider]
|
||||
if !found || cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudProvider,
|
||||
))
|
||||
@@ -33,10 +33,21 @@ func listCloudProviderServices(
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func getCloudProviderService(
|
||||
func Map(cloudprovider string) (map[string]Definition, *model.ApiError) {
|
||||
cloudServices, found := supportedServices[cloudprovider]
|
||||
if !found || cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudprovider,
|
||||
))
|
||||
}
|
||||
|
||||
return cloudServices, nil
|
||||
}
|
||||
|
||||
func GetServiceDefinition(
|
||||
cloudProvider string, serviceId string,
|
||||
) (*CloudServiceDetails, *model.ApiError) {
|
||||
cloudServices := availableServices[cloudProvider]
|
||||
) (*Definition, *model.ApiError) {
|
||||
cloudServices := supportedServices[cloudProvider]
|
||||
if cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudProvider,
|
||||
@@ -57,7 +68,7 @@ func getCloudProviderService(
|
||||
|
||||
// Service details read from ./serviceDefinitions
|
||||
// { "providerName": { "service_id": {...}} }
|
||||
var availableServices map[string]map[string]CloudServiceDetails
|
||||
var supportedServices map[string]map[string]Definition
|
||||
|
||||
func init() {
|
||||
err := readAllServiceDefinitions()
|
||||
@@ -68,15 +79,15 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed serviceDefinitions/*
|
||||
var serviceDefinitionFiles embed.FS
|
||||
//go:embed definitions/*
|
||||
var definitionFiles embed.FS
|
||||
|
||||
func readAllServiceDefinitions() error {
|
||||
availableServices = map[string]map[string]CloudServiceDetails{}
|
||||
supportedServices = map[string]map[string]Definition{}
|
||||
|
||||
rootDirName := "serviceDefinitions"
|
||||
rootDirName := "definitions"
|
||||
|
||||
cloudProviderDirs, err := fs.ReadDir(serviceDefinitionFiles, rootDirName)
|
||||
cloudProviderDirs, err := fs.ReadDir(definitionFiles, rootDirName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't read dirs in %s: %w", rootDirName, err)
|
||||
}
|
||||
@@ -98,21 +109,21 @@ func readAllServiceDefinitions() error {
|
||||
return fmt.Errorf("no %s services could be read", cloudProvider)
|
||||
}
|
||||
|
||||
availableServices[cloudProvider] = cloudServices
|
||||
supportedServices[cloudProvider] = cloudServices
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath string) (
|
||||
map[string]CloudServiceDetails, error,
|
||||
map[string]Definition, error,
|
||||
) {
|
||||
svcDefDirs, err := fs.ReadDir(serviceDefinitionFiles, cloudProviderDirPath)
|
||||
svcDefDirs, err := fs.ReadDir(definitionFiles, cloudProviderDirPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list integrations dirs: %w", err)
|
||||
}
|
||||
|
||||
svcDefs := map[string]CloudServiceDetails{}
|
||||
svcDefs := map[string]Definition{}
|
||||
|
||||
for _, d := range svcDefDirs {
|
||||
if !d.IsDir() {
|
||||
@@ -137,10 +148,10 @@ func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath st
|
||||
return svcDefs, nil
|
||||
}
|
||||
|
||||
func readServiceDefinition(cloudProvider string, svcDirpath string) (*CloudServiceDetails, error) {
|
||||
func readServiceDefinition(cloudProvider string, svcDirpath string) (*Definition, error) {
|
||||
integrationJsonPath := path.Join(svcDirpath, "integration.json")
|
||||
|
||||
serializedSpec, err := serviceDefinitionFiles.ReadFile(integrationJsonPath)
|
||||
serializedSpec, err := definitionFiles.ReadFile(integrationJsonPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't find integration.json in %s: %w",
|
||||
@@ -157,7 +168,7 @@ func readServiceDefinition(cloudProvider string, svcDirpath string) (*CloudServi
|
||||
}
|
||||
|
||||
hydrated, err := integrations.HydrateFileUris(
|
||||
integrationSpec, serviceDefinitionFiles, svcDirpath,
|
||||
integrationSpec, definitionFiles, svcDirpath,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
@@ -167,7 +178,7 @@ func readServiceDefinition(cloudProvider string, svcDirpath string) (*CloudServi
|
||||
}
|
||||
hydratedSpec := hydrated.(map[string]any)
|
||||
|
||||
serviceDef, err := ParseStructWithJsonTagsFromMap[CloudServiceDetails](hydratedSpec)
|
||||
serviceDef, err := ParseStructWithJsonTagsFromMap[Definition](hydratedSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't parse hydrated JSON spec read from %s: %w",
|
||||
@@ -180,13 +191,13 @@ func readServiceDefinition(cloudProvider string, svcDirpath string) (*CloudServi
|
||||
return nil, fmt.Errorf("invalid service definition %s: %w", serviceDef.Id, err)
|
||||
}
|
||||
|
||||
serviceDef.TelemetryCollectionStrategy.Provider = cloudProvider
|
||||
serviceDef.Strategy.Provider = cloudProvider
|
||||
|
||||
return serviceDef, nil
|
||||
|
||||
}
|
||||
|
||||
func validateServiceDefinition(s *CloudServiceDetails) error {
|
||||
func validateServiceDefinition(s *Definition) error {
|
||||
// Validate dashboard data
|
||||
seenDashboardIds := map[string]interface{}{}
|
||||
for _, dd := range s.Assets.Dashboards {
|
||||
@@ -196,7 +207,7 @@ func validateServiceDefinition(s *CloudServiceDetails) error {
|
||||
seenDashboardIds[dd.Id] = nil
|
||||
}
|
||||
|
||||
if s.TelemetryCollectionStrategy == nil {
|
||||
if s.Strategy == nil {
|
||||
return fmt.Errorf("telemetry_collection_strategy is required")
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package cloudintegrations
|
||||
package services
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -11,22 +11,22 @@ func TestAvailableServices(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// should be able to list available services.
|
||||
_, apiErr := listCloudProviderServices("bad-cloud-provider")
|
||||
_, apiErr := List("bad-cloud-provider")
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
awsSvcs, apiErr := listCloudProviderServices("aws")
|
||||
awsSvcs, apiErr := List("aws")
|
||||
require.Nil(apiErr)
|
||||
require.Greater(len(awsSvcs), 0)
|
||||
|
||||
// should be able to get details of a service
|
||||
_, apiErr = getCloudProviderService(
|
||||
_, apiErr = GetServiceDefinition(
|
||||
"aws", "bad-service-id",
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
svc, apiErr := getCloudProviderService(
|
||||
svc, apiErr := GetServiceDefinition(
|
||||
"aws", awsSvcs[0].Id,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -3998,8 +3999,8 @@ func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
svcDetails *cloudintegrations.CloudServiceDetails,
|
||||
) (*cloudintegrations.CloudServiceConnectionStatus, *model.ApiError) {
|
||||
svcDetails *cloudintegrations.ServiceDetails,
|
||||
) (*cloudintegrations.ServiceConnectionStatus, *model.ApiError) {
|
||||
if cloudProvider != "aws" {
|
||||
// TODO(Raj): Make connection check generic for all providers in a follow up change
|
||||
return nil, model.BadRequest(
|
||||
@@ -4007,14 +4008,14 @@ func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
|
||||
)
|
||||
}
|
||||
|
||||
telemetryCollectionStrategy := svcDetails.TelemetryCollectionStrategy
|
||||
telemetryCollectionStrategy := svcDetails.Strategy
|
||||
if telemetryCollectionStrategy == nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"service doesn't have telemetry collection strategy: %s", svcDetails.Id,
|
||||
))
|
||||
}
|
||||
|
||||
result := &cloudintegrations.CloudServiceConnectionStatus{}
|
||||
result := &cloudintegrations.ServiceConnectionStatus{}
|
||||
errors := []*model.ApiError{}
|
||||
var resultLock sync.Mutex
|
||||
|
||||
@@ -4074,10 +4075,10 @@ func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
|
||||
func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudAccountId string,
|
||||
strategy *cloudintegrations.AWSMetricsCollectionStrategy,
|
||||
metricsCollectedBySvc []cloudintegrations.CollectedMetric,
|
||||
strategy *services.AWSMetricsStrategy,
|
||||
metricsCollectedBySvc []services.CollectedMetric,
|
||||
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
|
||||
if strategy == nil || len(strategy.CloudwatchMetricsStreamFilters) < 1 {
|
||||
if strategy == nil || len(strategy.StreamFilters) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -4086,7 +4087,7 @@ func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
|
||||
"cloud_account_id": cloudAccountId,
|
||||
}
|
||||
|
||||
metricsNamespace := strategy.CloudwatchMetricsStreamFilters[0].Namespace
|
||||
metricsNamespace := strategy.StreamFilters[0].Namespace
|
||||
metricsNamespaceParts := strings.Split(metricsNamespace, "/")
|
||||
|
||||
if len(metricsNamespaceParts) >= 2 {
|
||||
@@ -4123,13 +4124,13 @@ func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
|
||||
func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudAccountId string,
|
||||
strategy *cloudintegrations.AWSLogsCollectionStrategy,
|
||||
strategy *services.AWSLogsStrategy,
|
||||
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
|
||||
if strategy == nil || len(strategy.CloudwatchLogsSubscriptions) < 1 {
|
||||
if strategy == nil || len(strategy.Subscriptions) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logGroupNamePrefix := strategy.CloudwatchLogsSubscriptions[0].LogGroupNamePrefix
|
||||
logGroupNamePrefix := strategy.Subscriptions[0].LogGroupNamePrefix
|
||||
if len(logGroupNamePrefix) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ func NewTestSqliteDB(t *testing.T) (sqlStore sqlstore.SQLStore, testDBFilePath s
|
||||
sqlmigration.NewUpdateDashboardAndSavedViewsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePatAndOrgDomainsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePipelines(sqlStore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,6 +69,7 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedM
|
||||
sqlmigration.NewUpdatePreferencesFactory(sqlstore),
|
||||
sqlmigration.NewUpdateApdexTtlFactory(sqlstore),
|
||||
sqlmigration.NewUpdateResetPasswordFactory(sqlstore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
58
pkg/sqlmigration/025_add_virtual_fields.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type addVirtualFields struct{}
|
||||
|
||||
func NewAddVirtualFieldsFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_virtual_fields"), newAddVirtualFields)
|
||||
}
|
||||
|
||||
func newAddVirtualFields(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
return &addVirtualFields{}, nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Up(ctx context.Context, db *bun.DB) error {
|
||||
// table:virtual_field op:create
|
||||
if _, err := db.NewCreateTable().
|
||||
Model(&struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull"`
|
||||
Expression string `bun:"expression,type:text,notnull"`
|
||||
Description string `bun:"description,type:text"`
|
||||
Signal telemetrytypes.Signal `bun:"signal,type:text,notnull"`
|
||||
OrgID string `bun:"org_id,type:text,notnull"`
|
||||
}{}).
|
||||
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE`).
|
||||
IfNotExists().
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
275
pkg/telemetrylogs/condition_builder.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
logsV2Columns = map[string]*schema.Column{
|
||||
"ts_bucket_start": {Name: "ts_bucket_start", Type: schema.ColumnTypeUInt64},
|
||||
"resource_fingerprint": {Name: "resource_fingerprint", Type: schema.ColumnTypeString},
|
||||
|
||||
"timestamp": {Name: "timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"observed_timestamp": {Name: "observed_timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"id": {Name: "id", Type: schema.ColumnTypeString},
|
||||
"trace_id": {Name: "trace_id", Type: schema.ColumnTypeString},
|
||||
"span_id": {Name: "span_id", Type: schema.ColumnTypeString},
|
||||
"trace_flags": {Name: "trace_flags", Type: schema.ColumnTypeUInt32},
|
||||
"severity_text": {Name: "severity_text", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"severity_number": {Name: "severity_number", Type: schema.ColumnTypeUInt8},
|
||||
"body": {Name: "body", Type: schema.ColumnTypeString},
|
||||
"attributes_string": {Name: "attributes_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes_number": {Name: "attributes_number", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}},
|
||||
"attributes_bool": {Name: "attributes_bool", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}},
|
||||
"resources_string": {Name: "resources_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"scope_name": {Name: "scope_name", Type: schema.ColumnTypeString},
|
||||
"scope_version": {Name: "scope_version", Type: schema.ColumnTypeString},
|
||||
"scope_string": {Name: "scope_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
var _ qbtypes.ConditionBuilder = &conditionBuilder{}
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return logsV2Columns["resources_string"], nil
|
||||
case telemetrytypes.FieldContextScope:
|
||||
switch key.Name {
|
||||
case "name", "scope.name", "scope_name":
|
||||
return logsV2Columns["scope_name"], nil
|
||||
case "version", "scope.version", "scope_version":
|
||||
return logsV2Columns["scope_version"], nil
|
||||
}
|
||||
return logsV2Columns["scope_string"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
switch key.FieldDataType {
|
||||
case telemetrytypes.FieldDataTypeString:
|
||||
return logsV2Columns["attributes_string"], nil
|
||||
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
|
||||
return logsV2Columns["attributes_number"], nil
|
||||
case telemetrytypes.FieldDataTypeBool:
|
||||
return logsV2Columns["attributes_bool"], nil
|
||||
}
|
||||
case telemetrytypes.FieldContextLog:
|
||||
col, ok := logsV2Columns[key.Name]
|
||||
if !ok {
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
return col, nil
|
||||
}
|
||||
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeUInt8:
|
||||
return column.Name, nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
// should not reach here
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThan:
|
||||
return sb.G(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThanOrEq:
|
||||
return sb.GE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThan:
|
||||
return sb.LT(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThanOrEq:
|
||||
return sb.LE(tblFieldName, value), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.Like(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.NotLike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.ILike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.NotILike(tblFieldName, value), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
// between and not between
|
||||
case qbtypes.FilterOperatorBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.Between(tblFieldName, values[0], values[1]), nil
|
||||
case qbtypes.FilterOperatorNotBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.NotBetween(tblFieldName, values[0], values[1]), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.In(tblFieldName, values...), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.NotIn(tblFieldName, values...), nil
|
||||
|
||||
// exists and not exists
|
||||
// but how could you live and have no story to tell
|
||||
// in the UI based query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
var value any
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString, schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}:
|
||||
value = ""
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.ColumnTypeUInt64, schema.ColumnTypeUInt32, schema.ColumnTypeUInt8:
|
||||
value = 0
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if key.Materialized {
|
||||
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
|
||||
}
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("exists operator is not supported for column type %s", column.Type)
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("unsupported operator: %v", operator)
|
||||
}
|
||||
620
pkg/telemetrylogs/condition_builder_test.go
Normal file
@@ -0,0 +1,620 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: logsV2Columns["resources_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_version"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - bool type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["timestamp"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["body"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "did_user_login",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple column type - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "timestamp",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes_number['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes_bool['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resources_string['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotEqual,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: float64(100),
|
||||
expectedSQL: "attributes_number['request.duration'] > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: float64(1024),
|
||||
expectedSQL: "attributes_number['request.size'] < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp >= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body NOT LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "admin",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: "invalid",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Between operator - insufficient values",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000)},
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Not Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp NOT BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: []any{"error", "fatal", "critical"},
|
||||
expectedSQL: "severity_text IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: "error",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrInValues,
|
||||
},
|
||||
{
|
||||
name: "Not In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotIn,
|
||||
value: []any{"debug", "info", "trace"},
|
||||
expectedSQL: "severity_text NOT IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "body <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - number field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "value",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConditionMultiple(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
keys []*telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
keys: []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ? AND severity_text = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var err error
|
||||
for _, key := range tc.keys {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting condition for key %s: %v", key.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
9
pkg/telemetrylogs/tables.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package telemetrylogs
|
||||
|
||||
const (
|
||||
DBName = "signoz_logs"
|
||||
LogsV2TableName = "distributed_logs_v2"
|
||||
LogsV2LocalTableName = "logs_v2"
|
||||
TagAttributesV2TableName = "distributed_tag_attributes_v2"
|
||||
TagAttributesV2LocalTableName = "tag_attributes_v2"
|
||||
)
|
||||
149
pkg/telemetrymetadata/condition_builder.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
attributeMetadataColumns = map[string]*schema.Column{
|
||||
"resource_attributes": {Name: "resource_attributes", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes": {Name: "attributes", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return attributeMetadataColumns["resource_attributes"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
return attributeMetadataColumns["attributes"], nil
|
||||
}
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
// if we don't have a column, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
// if we don't have a table field name, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if key.FieldDataType != telemetrytypes.FieldDataTypeString {
|
||||
// if the field data type is not string, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// key must exists to apply main filter
|
||||
containsExp := fmt.Sprintf("mapContains(%s, %s)", column.Name, sb.Var(key.Name))
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.And(containsExp, sb.E(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.And(containsExp, sb.NE(tblFieldName, value)), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.And(containsExp, sb.Like(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.And(containsExp, sb.NotLike(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.And(containsExp, sb.ILike(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.And(containsExp, sb.NotILike(tblFieldName, value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.And(containsExp, sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value))), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.And(containsExp, sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value))), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(containsExp, exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(containsExp, exp), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.And(containsExp, sb.In(tblFieldName, values...)), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.And(containsExp, sb.NotIn(tblFieldName, values...)), nil
|
||||
|
||||
// exists and not exists
|
||||
// in the query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
switch column.Type {
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
272
pkg/telemetrymetadata/condition_builder_test.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["resource_attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resource_attributes['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE (mapContains(attributes, ?) AND LOWER(attributes['user.id']) LIKE LOWER(?))",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE (mapContains(attributes, ?) AND LOWER(attributes['user.id']) NOT LIKE LOWER(?))",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
691
pkg/telemetrymetadata/metadata.go
Normal file
@@ -0,0 +1,691 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFailedToGetTracesKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get traces keys")
|
||||
ErrFailedToGetLogsKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get logs keys")
|
||||
ErrFailedToGetTblStatement = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get tbl statement")
|
||||
ErrFailedToGetMetricsKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get metrics keys")
|
||||
ErrFailedToGetRelatedValues = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get related values")
|
||||
)
|
||||
|
||||
type telemetryMetaStore struct {
|
||||
telemetrystore telemetrystore.TelemetryStore
|
||||
tracesDBName string
|
||||
tracesFieldsTblName string
|
||||
indexV3TblName string
|
||||
metricsDBName string
|
||||
metricsFieldsTblName string
|
||||
timeseries1WTblName string
|
||||
logsDBName string
|
||||
logsFieldsTblName string
|
||||
logsV2TblName string
|
||||
relatedMetadataDBName string
|
||||
relatedMetadataTblName string
|
||||
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
}
|
||||
|
||||
func NewTelemetryMetaStore(
|
||||
telemetrystore telemetrystore.TelemetryStore,
|
||||
tracesDBName string,
|
||||
tracesFieldsTblName string,
|
||||
indexV3TblName string,
|
||||
metricsDBName string,
|
||||
metricsFieldsTblName string,
|
||||
timeseries1WTblName string,
|
||||
logsDBName string,
|
||||
logsV2TblName string,
|
||||
logsFieldsTblName string,
|
||||
relatedMetadataDBName string,
|
||||
relatedMetadataTblName string,
|
||||
) (telemetrytypes.MetadataStore, error) {
|
||||
return &telemetryMetaStore{
|
||||
telemetrystore: telemetrystore,
|
||||
tracesDBName: tracesDBName,
|
||||
tracesFieldsTblName: tracesFieldsTblName,
|
||||
indexV3TblName: indexV3TblName,
|
||||
metricsDBName: metricsDBName,
|
||||
metricsFieldsTblName: metricsFieldsTblName,
|
||||
timeseries1WTblName: timeseries1WTblName,
|
||||
logsDBName: logsDBName,
|
||||
logsV2TblName: logsV2TblName,
|
||||
logsFieldsTblName: logsFieldsTblName,
|
||||
relatedMetadataDBName: relatedMetadataDBName,
|
||||
relatedMetadataTblName: relatedMetadataTblName,
|
||||
|
||||
conditionBuilder: NewConditionBuilder(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tracesTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the traces table
|
||||
func (t *telemetryMetaStore) tracesTblStatementToFieldKeys(ctx context.Context) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", t.tracesDBName, t.indexV3TblName)
|
||||
statements := []telemetrytypes.ShowCreateTableStatement{}
|
||||
err := t.telemetrystore.ClickhouseDB().Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTblStatement.Error())
|
||||
}
|
||||
|
||||
return ExtractFieldKeysFromTblStatement(statements[0].Statement)
|
||||
}
|
||||
|
||||
// getTracesKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the traces table
|
||||
matKeys, err := t.tracesTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
mapOfKeys[key.Name+";"+key.FieldContext.StringValue()+";"+key.FieldDataType.StringValue()] = key
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", `
|
||||
CASE
|
||||
WHEN tag_type = 'spanfield' THEN 1
|
||||
WHEN tag_type = 'resource' THEN 2
|
||||
WHEN tag_type = 'scope' THEN 3
|
||||
WHEN tag_type = 'tag' THEN 4
|
||||
ELSE 5
|
||||
END as priority`).From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
var limit int
|
||||
|
||||
conds := []string{}
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
|
||||
if fieldKeySelector.StartUnixMilli != 0 {
|
||||
conds = append(conds, sb.GE("unix_milli", fieldKeySelector.StartUnixMilli))
|
||||
}
|
||||
if fieldKeySelector.EndUnixMilli != 0 {
|
||||
conds = append(conds, sb.LE("unix_milli", fieldKeySelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
// key part of the selector
|
||||
fieldKeyConds := []string{}
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_data_type", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
conds = append(conds, sb.And(fieldKeyConds...))
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
|
||||
mainSb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", "max(priority) as priority")
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
// if there is no materialised column, create a key with the field context and data type
|
||||
if !ok {
|
||||
key = &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// logsTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the logs table
|
||||
func (t *telemetryMetaStore) logsTblStatementToFieldKeys(ctx context.Context) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", t.logsDBName, t.logsV2TblName)
|
||||
statements := []telemetrytypes.ShowCreateTableStatement{}
|
||||
err := t.telemetrystore.ClickhouseDB().Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTblStatement.Error())
|
||||
}
|
||||
|
||||
return ExtractFieldKeysFromTblStatement(statements[0].Statement)
|
||||
}
|
||||
|
||||
// getLogsKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the logs table
|
||||
matKeys, err := t.logsTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
mapOfKeys[key.Name+";"+key.FieldContext.StringValue()+";"+key.FieldDataType.StringValue()] = key
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", `
|
||||
CASE
|
||||
WHEN tag_type = 'logfield' THEN 1
|
||||
WHEN tag_type = 'resource' THEN 2
|
||||
WHEN tag_type = 'scope' THEN 3
|
||||
WHEN tag_type = 'tag' THEN 4
|
||||
ELSE 5
|
||||
END as priority`).From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
var limit int
|
||||
|
||||
conds := []string{}
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
|
||||
if fieldKeySelector.StartUnixMilli != 0 {
|
||||
conds = append(conds, sb.GE("unix_milli", fieldKeySelector.StartUnixMilli))
|
||||
}
|
||||
if fieldKeySelector.EndUnixMilli != 0 {
|
||||
conds = append(conds, sb.LE("unix_milli", fieldKeySelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
// key part of the selector
|
||||
fieldKeyConds := []string{}
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_data_type", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
conds = append(conds, sb.And(fieldKeyConds...))
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
|
||||
mainSb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", "max(priority) as priority")
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
// if there is no materialised column, create a key with the field context and data type
|
||||
if !ok {
|
||||
key = &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// getMetricsKeys returns the keys from the metrics that match the field selection criteria
|
||||
// TODO(srikanthccv): update the implementation after the dot metrics migration is done
|
||||
func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var whereClause, innerWhereClause string
|
||||
var limit int
|
||||
args := []any{}
|
||||
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
if fieldKeySelector.MetricContext != nil {
|
||||
innerWhereClause += "metric_name IN ? AND"
|
||||
args = append(args, fieldKeySelector.MetricContext.MetricName)
|
||||
}
|
||||
}
|
||||
innerWhereClause += " __normalized = true"
|
||||
|
||||
for idx, fieldKeySelector := range fieldKeySelectors {
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
whereClause += "(distinctTagKey = ? AND distinctTagKey NOT LIKE '\\_\\_%%')"
|
||||
args = append(args, fieldKeySelector.Name)
|
||||
} else {
|
||||
whereClause += "(distinctTagKey ILIKE ? AND distinctTagKey NOT LIKE '\\_\\_%%')"
|
||||
args = append(args, fmt.Sprintf("%%%s%%", fieldKeySelector.Name))
|
||||
}
|
||||
if idx != len(fieldKeySelectors)-1 {
|
||||
whereClause += " OR "
|
||||
}
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
args = append(args, limit)
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
arrayJoin(tagKeys) AS distinctTagKey
|
||||
FROM (
|
||||
SELECT JSONExtractKeys(labels) AS tagKeys
|
||||
FROM %s.%s
|
||||
WHERE `+innerWhereClause+`
|
||||
GROUP BY tagKeys
|
||||
)
|
||||
WHERE `+whereClause+`
|
||||
GROUP BY distinctTagKey
|
||||
LIMIT ?
|
||||
`, t.metricsDBName, t.timeseries1WTblName)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
err = rows.Scan(&name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
key := &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeys(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
var keys []*telemetrytypes.TelemetryFieldKey
|
||||
var err error
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
keys, err = t.getTracesKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalLogs:
|
||||
keys, err = t.getLogsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalMetrics:
|
||||
keys, err = t.getMetricsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
// get traces keys
|
||||
tracesKeys, err := t.getTracesKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, tracesKeys...)
|
||||
|
||||
// get logs keys
|
||||
logsKeys, err := t.getLogsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, logsKeys...)
|
||||
|
||||
// get metrics keys
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, metricsKeys...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range keys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeysMulti(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
|
||||
logsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
tracesSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
metricsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalLogs:
|
||||
logsSelectors = append(logsSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalTraces:
|
||||
tracesSelectors = append(tracesSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
metricsSelectors = append(metricsSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
logsSelectors = append(logsSelectors, fieldKeySelector)
|
||||
tracesSelectors = append(tracesSelectors, fieldKeySelector)
|
||||
metricsSelectors = append(metricsSelectors, fieldKeySelector)
|
||||
}
|
||||
}
|
||||
|
||||
logsKeys, err := t.getLogsKeys(ctx, logsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tracesKeys, err := t.getTracesKeys(ctx, tracesSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, metricsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range logsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
for _, key := range tracesKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
for _, key := range metricsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKey(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
keys, err := t.GetKeys(ctx, fieldKeySelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keys[fieldKeySelector.Name], nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
|
||||
args := []any{}
|
||||
|
||||
var andConditions []string
|
||||
|
||||
andConditions = append(andConditions, `unix_milli >= ?`)
|
||||
args = append(args, fieldValueSelector.StartUnixMilli)
|
||||
|
||||
andConditions = append(andConditions, `unix_milli <= ?`)
|
||||
args = append(args, fieldValueSelector.EndUnixMilli)
|
||||
|
||||
if len(fieldValueSelector.ExistingQuery) != 0 {
|
||||
// TODO(srikanthccv): add the existing query to the where clause
|
||||
}
|
||||
whereClause := strings.Join(andConditions, " AND ")
|
||||
|
||||
key := telemetrytypes.TelemetryFieldKey{
|
||||
Name: fieldValueSelector.Name,
|
||||
Signal: fieldValueSelector.Signal,
|
||||
FieldContext: fieldValueSelector.FieldContext,
|
||||
FieldDataType: fieldValueSelector.FieldDataType,
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): add the select column
|
||||
selectColumn, _ := t.conditionBuilder.GetTableFieldName(ctx, &key)
|
||||
|
||||
args = append(args, fieldValueSelector.Limit)
|
||||
filterSubQuery := fmt.Sprintf(
|
||||
"SELECT DISTINCT %s FROM %s.%s WHERE %s LIMIT ?",
|
||||
selectColumn,
|
||||
t.relatedMetadataDBName,
|
||||
t.relatedMetadataTblName,
|
||||
whereClause,
|
||||
)
|
||||
zap.L().Debug("filterSubQuery for related values", zap.String("query", filterSubQuery), zap.Any("args", args))
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, filterSubQuery, args...)
|
||||
if err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var attributeValues []string
|
||||
for rows.Next() {
|
||||
var value string
|
||||
if err := rows.Scan(&value); err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
}
|
||||
if value != "" {
|
||||
attributeValues = append(attributeValues, value)
|
||||
}
|
||||
}
|
||||
|
||||
return attributeValues, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
return t.getRelatedValues(ctx, fieldValueSelector)
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getSpanFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
|
||||
if fieldValueSelector.Name != "" {
|
||||
sb.Where(sb.E("tag_key", fieldValueSelector.Name))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldValueSelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
sb.Where(sb.E("tag_type", fieldValueSelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldValueSelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
sb.Where(sb.E("tag_data_type", fieldValueSelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getLogFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
|
||||
if fieldValueSelector.Name != "" {
|
||||
sb.Where(sb.E("tag_key", fieldValueSelector.Name))
|
||||
}
|
||||
|
||||
if fieldValueSelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
sb.Where(sb.E("tag_type", fieldValueSelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
sb.Where(sb.E("tag_data_type", fieldValueSelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getMetricFieldValues(_ context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// TODO(srikanthccv): implement this. use new tables?
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetAllValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
var values *telemetrytypes.TelemetryFieldValues
|
||||
var err error
|
||||
switch fieldValueSelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
values, err = t.getSpanFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalLogs:
|
||||
values, err = t.getLogFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
values, err = t.getMetricFieldValues(ctx, fieldValueSelector)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
86
pkg/telemetrymetadata/metadata_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrytraces"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
)
|
||||
|
||||
type regexMatcher struct {
|
||||
}
|
||||
|
||||
func (m *regexMatcher) Match(expectedSQL, actualSQL string) error {
|
||||
re, err := regexp.Compile(expectedSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !re.MatchString(actualSQL) {
|
||||
return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetKeys(t *testing.T) {
|
||||
mockTelemetryStore := telemetrystoretest.New(telemetrystore.Config{}, ®exMatcher{})
|
||||
mock := mockTelemetryStore.Mock()
|
||||
|
||||
metadata, err := NewTelemetryMetaStore(
|
||||
mockTelemetryStore,
|
||||
telemetrytraces.DBName,
|
||||
telemetrytraces.TagAttributesV2TableName,
|
||||
telemetrytraces.SpanIndexV3TableName,
|
||||
telemetrymetrics.DBName,
|
||||
telemetrymetrics.TimeseriesV41weekTableName,
|
||||
telemetrymetrics.TimeseriesV41weekTableName,
|
||||
telemetrylogs.DBName,
|
||||
telemetrylogs.LogsV2TableName,
|
||||
telemetrylogs.TagAttributesV2TableName,
|
||||
DBName,
|
||||
AttributesMetadataLocalTableName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create telemetry metadata store: %v", err)
|
||||
}
|
||||
|
||||
rows := cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "statement", Type: "String"},
|
||||
}, [][]any{{"CREATE TABLE signoz_traces.signoz_index_v3"}})
|
||||
|
||||
mock.
|
||||
ExpectSelect("SHOW CREATE TABLE signoz_traces.distributed_signoz_index_v3").
|
||||
WillReturnRows(rows)
|
||||
|
||||
query := `SELECT.*`
|
||||
|
||||
mock.ExpectQuery(query).
|
||||
WithArgs("%http.method%", telemetrytypes.FieldContextSpan.TagType(), telemetrytypes.FieldDataTypeString.TagDataType(), 10).
|
||||
WillReturnRows(cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "tag_key", Type: "String"},
|
||||
{Name: "tag_type", Type: "String"},
|
||||
{Name: "tag_data_type", Type: "String"},
|
||||
{Name: "priority", Type: "UInt8"},
|
||||
}, [][]any{{"http.method", "tag", "String", 1}, {"http.method", "tag", "String", 1}}))
|
||||
keys, err := metadata.GetKeys(context.Background(), &telemetrytypes.FieldKeySelector{
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Name: "http.method",
|
||||
Limit: 10,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get keys: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Keys: %v", keys)
|
||||
}
|
||||
132
pkg/telemetrymetadata/stmt_parse.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/AfterShip/clickhouse-sql-parser/parser"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// TelemetryFieldVisitor is an AST visitor for extracting telemetry fields
|
||||
type TelemetryFieldVisitor struct {
|
||||
parser.DefaultASTVisitor
|
||||
Fields []*telemetrytypes.TelemetryFieldKey
|
||||
}
|
||||
|
||||
func NewTelemetryFieldVisitor() *TelemetryFieldVisitor {
|
||||
return &TelemetryFieldVisitor{
|
||||
Fields: make([]*telemetrytypes.TelemetryFieldKey, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// VisitColumnDef is called when visiting a column definition
|
||||
func (v *TelemetryFieldVisitor) VisitColumnDef(expr *parser.ColumnDef) error {
|
||||
// Check if this is a materialized column with DEFAULT expression
|
||||
if expr.DefaultExpr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse column name to extract context and data type
|
||||
columnName := expr.Name.String()
|
||||
|
||||
// Remove backticks if present
|
||||
columnName = strings.TrimPrefix(columnName, "`")
|
||||
columnName = strings.TrimSuffix(columnName, "`")
|
||||
|
||||
// Parse the column name to extract components
|
||||
parts := strings.Split(columnName, "_")
|
||||
if len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
context := parts[0]
|
||||
dataType := parts[1]
|
||||
|
||||
// Check if this is a valid telemetry column
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
switch context {
|
||||
case "resource":
|
||||
fieldContext = telemetrytypes.FieldContextResource
|
||||
case "scope":
|
||||
fieldContext = telemetrytypes.FieldContextScope
|
||||
case "attribute":
|
||||
fieldContext = telemetrytypes.FieldContextAttribute
|
||||
default:
|
||||
return nil // Not a telemetry column
|
||||
}
|
||||
|
||||
// Check and convert data type
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
switch dataType {
|
||||
case "string":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeString
|
||||
case "bool":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeBool
|
||||
case "int", "int64":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
case "float", "float64":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
case "number":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
default:
|
||||
return nil // Unknown data type
|
||||
}
|
||||
|
||||
// Extract field name from the DEFAULT expression
|
||||
// The DEFAULT expression should be something like: resources_string['k8s.cluster.name']
|
||||
// We need to extract the key inside the square brackets
|
||||
defaultExprStr := expr.DefaultExpr.String()
|
||||
|
||||
// Look for the pattern: map['key']
|
||||
startIdx := strings.Index(defaultExprStr, "['")
|
||||
endIdx := strings.Index(defaultExprStr, "']")
|
||||
|
||||
if startIdx == -1 || endIdx == -1 || startIdx+2 >= endIdx {
|
||||
return nil // Invalid DEFAULT expression format
|
||||
}
|
||||
|
||||
fieldName := defaultExprStr[startIdx+2 : endIdx]
|
||||
|
||||
// Create and store the TelemetryFieldKey
|
||||
field := telemetrytypes.TelemetryFieldKey{
|
||||
Name: fieldName,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
Materialized: true,
|
||||
}
|
||||
|
||||
v.Fields = append(v.Fields, &field)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExtractFieldKeysFromTblStatement(statement string) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
// Parse the CREATE TABLE statement using the ClickHouse parser
|
||||
p := parser.NewParser(statement)
|
||||
stmts, err := p.ParseStmts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a visitor to collect telemetry fields
|
||||
visitor := NewTelemetryFieldVisitor()
|
||||
|
||||
// Visit each statement
|
||||
for _, stmt := range stmts {
|
||||
// We're looking for CreateTable statements
|
||||
createTable, ok := stmt.(*parser.CreateTable)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Visit the table schema to extract column definitions
|
||||
if createTable.TableSchema != nil {
|
||||
for _, column := range createTable.TableSchema.Columns {
|
||||
if err := column.Accept(visitor); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return visitor.Fields, nil
|
||||
}
|
||||
148
pkg/telemetrymetadata/stmt_parse_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func TestExtractFieldKeysFromTblStatement(t *testing.T) {
|
||||
|
||||
var statement = `CREATE TABLE signoz_logs.logs_v2
|
||||
(
|
||||
` + "`ts_bucket_start`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`resource_fingerprint`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`timestamp`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`observed_timestamp`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`trace_id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`span_id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`trace_flags`" + ` UInt32,
|
||||
` + "`severity_text`" + ` LowCardinality(String) CODEC(ZSTD(1)),
|
||||
` + "`severity_number`" + ` UInt8,
|
||||
` + "`body`" + ` String CODEC(ZSTD(2)),
|
||||
` + "`attributes_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`attributes_number`" + ` Map(LowCardinality(String), Float64) CODEC(ZSTD(1)),
|
||||
` + "`attributes_bool`" + ` Map(LowCardinality(String), Bool) CODEC(ZSTD(1)),
|
||||
` + "`resources_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`scope_name`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`scope_version`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`scope_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`attribute_number_input_size`" + ` Int64 DEFAULT attributes_number['input_size'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_number_input_size_exists`" + ` Bool DEFAULT if(mapContains(attributes_number, 'input_size') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$iostream`" + ` String DEFAULT attributes_string['log.iostream'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$iostream_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'log.iostream') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$file$$path`" + ` String DEFAULT attributes_string['log.file.path'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$file$$path_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'log.file.path') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$cluster$$name`" + ` String DEFAULT resources_string['k8s.cluster.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$cluster$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.cluster.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$namespace$$name`" + ` String DEFAULT resources_string['k8s.namespace.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$namespace$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.namespace.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$pod$$name`" + ` String DEFAULT resources_string['k8s.pod.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$pod$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.pod.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$node$$name`" + ` String DEFAULT resources_string['k8s.node.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$node$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.node.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$container$$name`" + ` String DEFAULT resources_string['k8s.container.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$container$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.container.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$deployment$$name`" + ` String DEFAULT resources_string['k8s.deployment.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$deployment$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.deployment.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_processor`" + ` String DEFAULT attributes_string['processor'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_processor_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'processor') != 0, true, false) CODEC(ZSTD(1)),
|
||||
INDEX body_idx lower(body) TYPE ngrambf_v1(4, 60000, 5, 0) GRANULARITY 1,
|
||||
INDEX id_minmax id TYPE minmax GRANULARITY 1,
|
||||
INDEX severity_number_idx severity_number TYPE set(25) GRANULARITY 4,
|
||||
INDEX severity_text_idx severity_text TYPE set(25) GRANULARITY 4,
|
||||
INDEX trace_flags_idx trace_flags TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX scope_name_idx scope_name TYPE tokenbf_v1(10240, 3, 0) GRANULARITY 4,
|
||||
INDEX ` + "`resource_string_k8s$$cluster$$name_idx`" + ` ` + "`resource_string_k8s$$cluster$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$namespace$$name_idx`" + ` ` + "`resource_string_k8s$$namespace$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$pod$$name_idx`" + ` ` + "`resource_string_k8s$$pod$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$node$$name_idx`" + ` ` + "`resource_string_k8s$$node$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$container$$name_idx`" + ` ` + "`resource_string_k8s$$container$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$deployment$$name_idx`" + ` ` + "`resource_string_k8s$$deployment$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX attribute_string_processor_idx attribute_string_processor TYPE bloom_filter(0.01) GRANULARITY 64
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
|
||||
PARTITION BY toDate(timestamp / 1000000000)
|
||||
ORDER BY (ts_bucket_start, resource_fingerprint, severity_text, timestamp, id)
|
||||
TTL toDateTime(timestamp / 1000000000) + toIntervalSecond(2592000)
|
||||
SETTINGS ttl_only_drop_parts = 1, index_granularity = 8192`
|
||||
|
||||
keys, err := ExtractFieldKeysFromTblStatement(statement)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to extract field keys from tbl statement: %v", err)
|
||||
}
|
||||
|
||||
// some expected keys
|
||||
expectedKeys := []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "k8s.pod.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.cluster.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.deployment.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.node.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.container.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "processor",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "input_size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "log.iostream",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "log.file.path",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, key := range expectedKeys {
|
||||
if !slices.ContainsFunc(keys, func(k *telemetrytypes.TelemetryFieldKey) bool {
|
||||
return k.Name == key.Name && k.FieldContext == key.FieldContext && k.FieldDataType == key.FieldDataType && k.Materialized == key.Materialized
|
||||
}) {
|
||||
t.Errorf("expected key %v not found", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
7
pkg/telemetrymetadata/tables.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package telemetrymetadata
|
||||
|
||||
const (
|
||||
DBName = "signoz_metadata"
|
||||
AttributesMetadataTableName = "distributed_attributes_metadata"
|
||||
AttributesMetadataLocalTableName = "attributes_metadata"
|
||||
)
|
||||
21
pkg/telemetrymetrics/tables.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package telemetrymetrics
|
||||
|
||||
const (
|
||||
DBName = "signoz_metrics"
|
||||
SamplesV4TableName = "distributed_samples_v4"
|
||||
SamplesV4LocalTableName = "samples_v4"
|
||||
SamplesV4Agg5mTableName = "distributed_samples_v4_agg_5m"
|
||||
SamplesV4Agg5mLocalTableName = "samples_v4_agg_5m"
|
||||
SamplesV4Agg30mTableName = "distributed_samples_v4_agg_30m"
|
||||
SamplesV4Agg30mLocalTableName = "samples_v4_agg_30m"
|
||||
ExpHistogramTableName = "distributed_exp_hist"
|
||||
ExpHistogramLocalTableName = "exp_hist"
|
||||
TimeseriesV4TableName = "distributed_time_series_v4"
|
||||
TimeseriesV4LocalTableName = "time_series_v4"
|
||||
TimeseriesV46hrsTableName = "distributed_time_series_v4_6hrs"
|
||||
TimeseriesV46hrsLocalTableName = "time_series_v4_6hrs"
|
||||
TimeseriesV41dayTableName = "distributed_time_series_v4_1day"
|
||||
TimeseriesV41dayLocalTableName = "time_series_v4_1day"
|
||||
TimeseriesV41weekTableName = "distributed_time_series_v4_1week"
|
||||
TimeseriesV41weekLocalTableName = "time_series_v4_1week"
|
||||
)
|
||||
352
pkg/telemetrytraces/condition_builder.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package telemetrytraces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
indexV3Columns = map[string]*schema.Column{
|
||||
"ts_bucket_start": {Name: "ts_bucket_start", Type: schema.ColumnTypeUInt64},
|
||||
"resource_fingerprint": {Name: "resource_fingerprint", Type: schema.ColumnTypeString},
|
||||
|
||||
// intrinsic columns
|
||||
"timestamp": {Name: "timestamp", Type: schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"}},
|
||||
"trace_id": {Name: "trace_id", Type: schema.FixedStringColumnType{Length: 32}},
|
||||
"span_id": {Name: "span_id", Type: schema.ColumnTypeString},
|
||||
"trace_state": {Name: "trace_state", Type: schema.ColumnTypeString},
|
||||
"parent_span_id": {Name: "parent_span_id", Type: schema.ColumnTypeString},
|
||||
"flags": {Name: "flags", Type: schema.ColumnTypeUInt32},
|
||||
"name": {Name: "name", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"kind": {Name: "kind", Type: schema.ColumnTypeInt8},
|
||||
"kind_string": {Name: "kind_string", Type: schema.ColumnTypeString},
|
||||
"duration_nano": {Name: "duration_nano", Type: schema.ColumnTypeUInt64},
|
||||
"status_code": {Name: "status_code", Type: schema.ColumnTypeInt16},
|
||||
"status_message": {Name: "status_message", Type: schema.ColumnTypeString},
|
||||
"status_code_string": {Name: "status_code_string", Type: schema.ColumnTypeString},
|
||||
|
||||
// attributes columns
|
||||
"attributes_string": {Name: "attributes_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes_number": {Name: "attributes_number", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}},
|
||||
"attributes_bool": {Name: "attributes_bool", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}},
|
||||
"resources_string": {Name: "resources_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
|
||||
"events": {Name: "events", Type: schema.ArrayColumnType{
|
||||
ElementType: schema.ColumnTypeString,
|
||||
}},
|
||||
"links": {Name: "links", Type: schema.ColumnTypeString},
|
||||
// derived columns
|
||||
"response_status_code": {Name: "response_status_code", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"external_http_url": {Name: "external_http_url", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_url": {Name: "http_url", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"external_http_method": {Name: "external_http_method", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_method": {Name: "http_method", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_host": {Name: "http_host", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"db_name": {Name: "db_name", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"db_operation": {Name: "db_operation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"has_error": {Name: "has_error", Type: schema.ColumnTypeBool},
|
||||
"is_remote": {Name: "is_remote", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
// materialized columns
|
||||
"resource_string_service$$name": {Name: "resource_string_service$$name", Type: schema.ColumnTypeString},
|
||||
"attribute_string_http$$route": {Name: "attribute_string_http$$route", Type: schema.ColumnTypeString},
|
||||
"attribute_string_messaging$$system": {Name: "attribute_string_messaging$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_messaging$$operation": {Name: "attribute_string_messaging$$operation", Type: schema.ColumnTypeString},
|
||||
"attribute_string_db$$system": {Name: "attribute_string_db$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$system": {Name: "attribute_string_rpc$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$service": {Name: "attribute_string_rpc$$service", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$method": {Name: "attribute_string_rpc$$method", Type: schema.ColumnTypeString},
|
||||
"attribute_string_peer$$service": {Name: "attribute_string_peer$$service", Type: schema.ColumnTypeString},
|
||||
|
||||
// deprecated intrinsic columns
|
||||
"traceID": {Name: "traceID", Type: schema.FixedStringColumnType{Length: 32}},
|
||||
"spanID": {Name: "spanID", Type: schema.ColumnTypeString},
|
||||
"parentSpanID": {Name: "parentSpanID", Type: schema.ColumnTypeString},
|
||||
"spanKind": {Name: "spanKind", Type: schema.ColumnTypeString},
|
||||
"durationNano": {Name: "durationNano", Type: schema.ColumnTypeUInt64},
|
||||
"statusCode": {Name: "statusCode", Type: schema.ColumnTypeInt16},
|
||||
"statusMessage": {Name: "statusMessage", Type: schema.ColumnTypeString},
|
||||
"statusCodeString": {Name: "statusCodeString", Type: schema.ColumnTypeString},
|
||||
|
||||
// deprecated derived columns
|
||||
"references": {Name: "references", Type: schema.ColumnTypeString},
|
||||
"responseStatusCode": {Name: "responseStatusCode", Type: schema.ColumnTypeString},
|
||||
"externalHttpUrl": {Name: "externalHttpUrl", Type: schema.ColumnTypeString},
|
||||
"httpUrl": {Name: "httpUrl", Type: schema.ColumnTypeString},
|
||||
"externalHttpMethod": {Name: "externalHttpMethod", Type: schema.ColumnTypeString},
|
||||
"httpMethod": {Name: "httpMethod", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"httpHost": {Name: "httpHost", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbName": {Name: "dbName", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbOperation": {Name: "dbOperation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"hasError": {Name: "hasError", Type: schema.ColumnTypeBool},
|
||||
"isRemote": {Name: "isRemote", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"serviceName": {Name: "serviceName", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"httpRoute": {Name: "httpRoute", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"msgSystem": {Name: "msgSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"msgOperation": {Name: "msgOperation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbSystem": {Name: "dbSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcSystem": {Name: "rpcSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcService": {Name: "rpcService", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcMethod": {Name: "rpcMethod", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"peerService": {Name: "peerService", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
|
||||
// materialized exists columns
|
||||
"resource_string_service$$name_exists": {Name: "resource_string_service$$name_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_http$$route_exists": {Name: "attribute_string_http$$route_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_messaging$$system_exists": {Name: "attribute_string_messaging$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_messaging$$operation_exists": {Name: "attribute_string_messaging$$operation_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_db$$system_exists": {Name: "attribute_string_db$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$system_exists": {Name: "attribute_string_rpc$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$service_exists": {Name: "attribute_string_rpc$$service_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$method_exists": {Name: "attribute_string_rpc$$method_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_peer$$service_exists": {Name: "attribute_string_peer$$service_exists", Type: schema.ColumnTypeBool},
|
||||
}
|
||||
)
|
||||
|
||||
// interface check
|
||||
var _ qbtypes.ConditionBuilder = &conditionBuilder{}
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return indexV3Columns["resources_string"], nil
|
||||
case telemetrytypes.FieldContextScope:
|
||||
// we don't have scope data stored in the spans yet
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
switch key.FieldDataType {
|
||||
case telemetrytypes.FieldDataTypeString:
|
||||
return indexV3Columns["attributes_string"], nil
|
||||
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
|
||||
return indexV3Columns["attributes_number"], nil
|
||||
case telemetrytypes.FieldDataTypeBool:
|
||||
return indexV3Columns["attributes_bool"], nil
|
||||
}
|
||||
case telemetrytypes.FieldContextSpan:
|
||||
col, ok := indexV3Columns[key.Name]
|
||||
if !ok {
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
return col, nil
|
||||
}
|
||||
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeInt8,
|
||||
schema.ColumnTypeInt16,
|
||||
schema.ColumnTypeBool,
|
||||
schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"},
|
||||
schema.FixedStringColumnType{Length: 32}:
|
||||
return column.Name, nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
// should not reach here
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThan:
|
||||
return sb.G(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThanOrEq:
|
||||
return sb.GE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThan:
|
||||
return sb.LT(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThanOrEq:
|
||||
return sb.LE(tblFieldName, value), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.Like(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.NotLike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.ILike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.NotILike(tblFieldName, value), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
|
||||
// between and not between
|
||||
case qbtypes.FilterOperatorBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.Between(tblFieldName, values[0], values[1]), nil
|
||||
case qbtypes.FilterOperatorNotBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.NotBetween(tblFieldName, values[0], values[1]), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.In(tblFieldName, values...), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.NotIn(tblFieldName, values...), nil
|
||||
|
||||
// exists and not exists
|
||||
// in the query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
var value any
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.FixedStringColumnType{Length: 32},
|
||||
schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"}:
|
||||
value = ""
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeUInt8,
|
||||
schema.ColumnTypeInt8,
|
||||
schema.ColumnTypeInt16,
|
||||
schema.ColumnTypeBool:
|
||||
value = 0
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if key.Materialized {
|
||||
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
|
||||
}
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("exists operator is not supported for column type %s", column.Type)
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
298
pkg/telemetrytraces/condition_builder_test.go
Normal file
@@ -0,0 +1,298 @@
|
||||
package telemetrytraces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple column type - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
expectedResult: "timestamp",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes_number['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes_bool['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resources_string['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Not Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotEqual,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: float64(100),
|
||||
expectedSQL: "attributes_number['request.duration'] > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: float64(1024),
|
||||
expectedSQL: "attributes_number['request.size'] < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp >= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: "invalid",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Between operator - insufficient values",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000)},
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Not Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp NOT BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "admin",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "value",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
10
pkg/telemetrytraces/tables.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package telemetrytraces
|
||||
|
||||
const (
|
||||
DBName = "signoz_traces"
|
||||
SpanIndexV3TableName = "distributed_signoz_index_v3"
|
||||
SpanIndexV3LocalTableName = "signoz_index_v3"
|
||||
TagAttributesV2TableName = "distributed_tag_attributes_v2"
|
||||
TagAttributesV2LocalTableName = "tag_attributes_v2"
|
||||
TopLevelOperationsTableName = "distributed_top_level_operations"
|
||||
)
|
||||
@@ -4,10 +4,17 @@ import (
|
||||
"context"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrColumnNotFound = errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "column not found")
|
||||
ErrBetweenValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values")
|
||||
ErrInValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) in operator requires a list of values")
|
||||
)
|
||||
|
||||
// FilterOperator is the operator for the filter.
|
||||
type FilterOperator int
|
||||
|
||||
|
||||
@@ -86,11 +86,11 @@ func GetFieldKeyFromKeyText(key string) TelemetryFieldKey {
|
||||
return fieldKeySelector
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnName(key TelemetryFieldKey) string {
|
||||
func FieldKeyToMaterializedColumnName(key *TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("%s_%s_%s", key.FieldContext, key.FieldDataType.String, strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnNameForExists(key TelemetryFieldKey) string {
|
||||
func FieldKeyToMaterializedColumnNameForExists(key *TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("%s_%s_%s_exists", key.FieldContext, key.FieldDataType.String, strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
}
|
||||
|
||||
@@ -123,3 +123,52 @@ type FieldValueSelector struct {
|
||||
Value string `json:"value"`
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
func DataTypeCollisionHandledFieldName(key *TelemetryFieldKey, value any, tblFieldName string) (string, any) {
|
||||
// This block of code exists to handle the data type collisions
|
||||
// We don't want to fail the requests when there is a key with more than one data type
|
||||
// Let's take an example of `http.status_code`, and consider user sent a string value and number value
|
||||
// When they search for `http.status_code=200`, we will search across both the number columns and string columns
|
||||
// and return the results from both the columns
|
||||
// While we expect user not to send the mixed data types, it inevitably happens
|
||||
// So we handle the data type collisions here
|
||||
switch key.FieldDataType {
|
||||
case FieldDataTypeString:
|
||||
switch value.(type) {
|
||||
case float64:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toFloat64OrNull(%s)`, tblFieldName)
|
||||
case []any:
|
||||
areFloats := true
|
||||
for _, v := range value.([]any) {
|
||||
if _, ok := v.(float64); !ok {
|
||||
areFloats = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if areFloats {
|
||||
tblFieldName = fmt.Sprintf(`toFloat64OrNull(%s)`, tblFieldName)
|
||||
}
|
||||
case bool:
|
||||
// we don't have a toBoolOrNull in ClickHouse, so we need to convert the bool to a string
|
||||
value = fmt.Sprintf("%t", value)
|
||||
case string:
|
||||
// nothing to do
|
||||
}
|
||||
case FieldDataTypeFloat64, FieldDataTypeInt64, FieldDataTypeNumber:
|
||||
switch value.(type) {
|
||||
case string:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toString(%s)`, tblFieldName)
|
||||
case float64:
|
||||
// nothing to do
|
||||
}
|
||||
case FieldDataTypeBool:
|
||||
switch value.(type) {
|
||||
case string:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toString(%s)`, tblFieldName)
|
||||
}
|
||||
}
|
||||
return tblFieldName, value
|
||||
}
|
||||
|
||||
21
pkg/types/telemetrytypes/virtualfield.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type VirtualField struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull" json:"name"`
|
||||
Expression string `bun:"expression,type:text,notnull" json:"expression"`
|
||||
Description string `bun:"description,type:text" json:"description"`
|
||||
Signal Signal `bun:"signal,type:text,notnull" json:"signal"`
|
||||
OrgID valuer.UUID `bun:"org_id,type:text,notnull" json:"orgId"`
|
||||
}
|
||||