Compare commits
7 Commits
feat/issue
...
chore/prom
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
837d64c161 | ||
|
|
1542b9d6e9 | ||
|
|
8455349459 | ||
|
|
c488a24d09 | ||
|
|
9091cf61fd | ||
|
|
eeb2ab3212 | ||
|
|
3f128f0f1d |
7
.github/CODEOWNERS
vendored
7
.github/CODEOWNERS
vendored
@@ -12,4 +12,9 @@
|
||||
/pkg/factory/ @grandwizard28
|
||||
/pkg/types/ @grandwizard28
|
||||
.golangci.yml @grandwizard28
|
||||
**/(zeus|licensing|sqlmigration)/ @vikrantgupta25
|
||||
/pkg/zeus/ @vikrantgupta25
|
||||
/pkg/licensing/ @vikrantgupta25
|
||||
/pkg/sqlmigration/ @vikrantgupta25
|
||||
/ee/zeus/ @vikrantgupta25
|
||||
/ee/licensing/ @vikrantgupta25
|
||||
/ee/sqlmigration/ @vikrantgupta25
|
||||
@@ -100,12 +100,18 @@ services:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
|
||||
configs:
|
||||
- source: clickhouse-config
|
||||
target: /etc/clickhouse-server/config.xml
|
||||
- source: clickhouse-users
|
||||
target: /etc/clickhouse-server/users.xml
|
||||
- source: clickhouse-custom-function
|
||||
target: /etc/clickhouse-server/custom-function.xml
|
||||
- source: clickhouse-cluster
|
||||
target: /etc/clickhouse-server/config.d/cluster.xml
|
||||
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
@@ -117,9 +123,10 @@ services:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
configs:
|
||||
- source: signoz-prometheus-config
|
||||
target: /root/config/prometheus.yml
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
@@ -147,9 +154,11 @@ services:
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
configs:
|
||||
- source: otel-collector-config
|
||||
target: /etc/otel-collector-config.yaml
|
||||
- source: otel-manager-config
|
||||
target: /etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
@@ -186,3 +195,26 @@ volumes:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
|
||||
configs:
|
||||
clickhouse-config:
|
||||
file: ../common/clickhouse/config.xml
|
||||
clickhouse-users:
|
||||
file: ../common/clickhouse/users.xml
|
||||
clickhouse-custom-function:
|
||||
file: ../common/clickhouse/custom-function.xml
|
||||
clickhouse-cluster:
|
||||
file: ../common/clickhouse/cluster.xml
|
||||
|
||||
signoz-prometheus-config:
|
||||
file: ../common/signoz/prometheus.yml
|
||||
# If you have multiple dashboard files, you can list them individually:
|
||||
# dashboard-foo:
|
||||
# file: ../common/dashboards/foo.json
|
||||
# dashboard-bar:
|
||||
# file: ../common/dashboards/bar.json
|
||||
|
||||
otel-collector-config:
|
||||
file: ./otel-collector-config.yaml
|
||||
otel-manager-config:
|
||||
file: ../common/signoz/otel-collector-opamp-config.yaml
|
||||
|
||||
@@ -122,10 +122,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opAmpModel.InitDB(serverOptions.SigNoz.SQLStore, serverOptions.SigNoz.Instrumentation.Logger(), serverOptions.SigNoz.Modules.OrgGetter)
|
||||
|
||||
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore)
|
||||
if err != nil {
|
||||
@@ -143,7 +140,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
serverOptions.SigNoz.SQLStore, integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
serverOptions.SigNoz.SQLStore,
|
||||
integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -151,7 +149,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// initiate agent config handler
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
Store: serverOptions.SigNoz.SQLStore,
|
||||
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -17,19 +17,21 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
Org = "org"
|
||||
User = "user"
|
||||
UserNoCascade = "user_no_cascade"
|
||||
FactorPassword = "factor_password"
|
||||
CloudIntegration = "cloud_integration"
|
||||
Org = "org"
|
||||
User = "user"
|
||||
UserNoCascade = "user_no_cascade"
|
||||
FactorPassword = "factor_password"
|
||||
CloudIntegration = "cloud_integration"
|
||||
AgentConfigVersion = "agent_config_version"
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
UserReferenceNoCascade = `("user_id") REFERENCES "users" ("id")`
|
||||
FactorPasswordReference = `("password_id") REFERENCES "factor_password" ("id")`
|
||||
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
UserReferenceNoCascade = `("user_id") REFERENCES "users" ("id")`
|
||||
FactorPasswordReference = `("password_id") REFERENCES "factor_password" ("id")`
|
||||
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
AgentConfigVersionReference = `("version_id") REFERENCES "agent_config_version" ("id")`
|
||||
)
|
||||
|
||||
type dialect struct{}
|
||||
@@ -274,6 +276,8 @@ func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.I
|
||||
fkReferences = append(fkReferences, FactorPasswordReference)
|
||||
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
|
||||
fkReferences = append(fkReferences, CloudIntegrationReference)
|
||||
} else if reference == AgentConfigVersion && !slices.Contains(fkReferences, AgentConfigVersionReference) {
|
||||
fkReferences = append(fkReferences, AgentConfigVersionReference)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ export const pipelineData: Pipeline = {
|
||||
active: false,
|
||||
is_valid: false,
|
||||
disabled: false,
|
||||
deployStatus: 'DEPLOYED',
|
||||
deployStatus: 'deployed',
|
||||
deployResult: 'Deployment was successful',
|
||||
lastHash: 'log_pipelines:24',
|
||||
lastConf: 'oiwernveroi',
|
||||
@@ -135,7 +135,7 @@ export const pipelineData: Pipeline = {
|
||||
active: false,
|
||||
isValid: false,
|
||||
disabled: false,
|
||||
deployStatus: 'DEPLOYED',
|
||||
deployStatus: 'deployed',
|
||||
deployResult: 'Deployment was successful',
|
||||
lastHash: 'log_pipelines:24',
|
||||
lastConf: 'eovineroiv',
|
||||
@@ -150,7 +150,7 @@ export const pipelineData: Pipeline = {
|
||||
active: false,
|
||||
isValid: false,
|
||||
disabled: false,
|
||||
deployStatus: 'DEPLOYED',
|
||||
deployStatus: 'deployed',
|
||||
deployResult: 'Deployment was successful',
|
||||
lastHash: 'log_pipelines:23',
|
||||
lastConf: 'eivrounreovi',
|
||||
@@ -169,7 +169,7 @@ export const pipelineDataHistory: Pipeline['history'] = [
|
||||
active: false,
|
||||
isValid: false,
|
||||
disabled: false,
|
||||
deployStatus: 'DEPLOYED',
|
||||
deployStatus: 'deployed',
|
||||
deployResult: 'Deployment was successful',
|
||||
lastHash: 'log_pipelines:24',
|
||||
lastConf: 'eovineroiv',
|
||||
@@ -184,7 +184,7 @@ export const pipelineDataHistory: Pipeline['history'] = [
|
||||
active: false,
|
||||
isValid: false,
|
||||
disabled: false,
|
||||
deployStatus: 'IN_PROGRESS',
|
||||
deployStatus: 'in_progress',
|
||||
deployResult: 'Deployment is in progress',
|
||||
lastHash: 'log_pipelines:23',
|
||||
lastConf: 'eivrounreovi',
|
||||
@@ -199,7 +199,7 @@ export const pipelineDataHistory: Pipeline['history'] = [
|
||||
active: false,
|
||||
isValid: false,
|
||||
disabled: false,
|
||||
deployStatus: 'DIRTY',
|
||||
deployStatus: 'dirty',
|
||||
deployResult: 'Deployment is dirty',
|
||||
lastHash: 'log_pipelines:23',
|
||||
lastConf: 'eivrounreovi',
|
||||
@@ -214,7 +214,7 @@ export const pipelineDataHistory: Pipeline['history'] = [
|
||||
active: false,
|
||||
isValid: false,
|
||||
disabled: false,
|
||||
deployStatus: 'FAILED',
|
||||
deployStatus: 'failed',
|
||||
deployResult: 'Deployment failed',
|
||||
lastHash: 'log_pipelines:23',
|
||||
lastConf: 'eivrounreovi',
|
||||
@@ -229,7 +229,7 @@ export const pipelineDataHistory: Pipeline['history'] = [
|
||||
active: false,
|
||||
isValid: false,
|
||||
disabled: false,
|
||||
deployStatus: 'UNKNOWN',
|
||||
deployStatus: 'unknown',
|
||||
deployResult: '',
|
||||
lastHash: 'log_pipelines:23',
|
||||
lastConf: 'eivrounreovi',
|
||||
|
||||
@@ -9,15 +9,15 @@ import { Spin } from 'antd';
|
||||
|
||||
export function getDeploymentStage(value: string): string {
|
||||
switch (value) {
|
||||
case 'IN_PROGRESS':
|
||||
case 'in_progress':
|
||||
return 'In Progress';
|
||||
case 'DEPLOYED':
|
||||
case 'deployed':
|
||||
return 'Deployed';
|
||||
case 'DIRTY':
|
||||
case 'dirty':
|
||||
return 'Dirty';
|
||||
case 'FAILED':
|
||||
case 'failed':
|
||||
return 'Failed';
|
||||
case 'UNKNOWN':
|
||||
case 'unknown':
|
||||
return 'Unknown';
|
||||
default:
|
||||
return '';
|
||||
@@ -26,17 +26,17 @@ export function getDeploymentStage(value: string): string {
|
||||
|
||||
export function getDeploymentStageIcon(value: string): JSX.Element {
|
||||
switch (value) {
|
||||
case 'IN_PROGRESS':
|
||||
case 'in_progress':
|
||||
return (
|
||||
<Spin indicator={<LoadingOutlined style={{ fontSize: 15 }} spin />} />
|
||||
);
|
||||
case 'DEPLOYED':
|
||||
case 'deployed':
|
||||
return <CheckCircleFilled />;
|
||||
case 'DIRTY':
|
||||
case 'dirty':
|
||||
return <ExclamationCircleFilled />;
|
||||
case 'FAILED':
|
||||
case 'failed':
|
||||
return <CloseCircleFilled />;
|
||||
case 'UNKNOWN':
|
||||
case 'unknown':
|
||||
return <MinusCircleFilled />;
|
||||
default:
|
||||
return <span />;
|
||||
|
||||
208
go.mod
208
go.mod
@@ -19,7 +19,7 @@ require (
|
||||
github.com/go-openapi/strfmt v0.23.0
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/go-redis/redismock/v8 v8.11.5
|
||||
github.com/go-viper/mapstructure/v2 v2.1.0
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1
|
||||
github.com/gojek/heimdall/v7 v7.0.3
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/google/uuid v1.6.0
|
||||
@@ -31,7 +31,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/knadh/koanf v1.5.0
|
||||
github.com/knadh/koanf/v2 v2.1.1
|
||||
github.com/knadh/koanf/v2 v2.1.2
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/open-telemetry/opamp-go v0.5.0
|
||||
@@ -39,10 +39,10 @@ require (
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/alertmanager v0.28.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.61.0
|
||||
github.com/prometheus/prometheus v0.300.1
|
||||
github.com/prometheus/alertmanager v0.28.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.63.0
|
||||
github.com/prometheus/prometheus v0.304.1
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/russellhaering/gosaml2 v0.9.0
|
||||
github.com/russellhaering/goxmldsig v1.2.0
|
||||
@@ -57,42 +57,42 @@ require (
|
||||
github.com/uptrace/bun v1.2.9
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.9
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.9
|
||||
go.opentelemetry.io/collector/confmap v1.17.0
|
||||
go.opentelemetry.io/collector/pdata v1.17.0
|
||||
go.opentelemetry.io/collector/processor v0.111.0
|
||||
go.opentelemetry.io/collector/confmap v1.30.0
|
||||
go.opentelemetry.io/collector/otelcol v0.124.0
|
||||
go.opentelemetry.io/collector/pdata v1.34.0
|
||||
go.opentelemetry.io/contrib/config v0.10.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0
|
||||
go.opentelemetry.io/otel v1.34.0
|
||||
go.opentelemetry.io/otel/metric v1.34.0
|
||||
go.opentelemetry.io/otel/sdk v1.34.0
|
||||
go.opentelemetry.io/otel/trace v1.34.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/metric v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.38.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/oauth2 v0.24.0
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
|
||||
golang.org/x/oauth2 v0.29.0
|
||||
golang.org/x/sync v0.14.0
|
||||
golang.org/x/text v0.25.0
|
||||
google.golang.org/protobuf v1.36.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/apimachinery v0.31.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.13.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect
|
||||
cloud.google.com/go/auth v0.16.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/ClickHouse/ch-go v0.63.1 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.7 // indirect
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
|
||||
github.com/beevik/etree v1.1.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -110,7 +110,7 @@ require (
|
||||
github.com/expr-lang/expr v1.17.0 // indirect
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/go-faster/city v1.0.1 // indirect
|
||||
github.com/go-faster/errors v0.7.1 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
@@ -130,15 +130,15 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/gojek/valkyrie v0.0.0-20180215180059-6aee720afcdf // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect
|
||||
@@ -160,7 +160,7 @@ require (
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-syslog/v4 v4.2.0 // indirect
|
||||
github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect
|
||||
@@ -170,7 +170,7 @@ require (
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mdlayher/socket v0.4.1 // indirect
|
||||
github.com/mdlayher/vsock v1.2.1 // indirect
|
||||
github.com/miekg/dns v1.1.62 // indirect
|
||||
github.com/miekg/dns v1.1.65 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
@@ -180,28 +180,33 @@ require (
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/oklog/ulid/v2 v2.1.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 // indirect
|
||||
github.com/paulmach/orb v0.11.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/exporter-toolkit v0.13.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/exporter-toolkit v0.14.0 // indirect
|
||||
github.com/prometheus/otlptranslator v0.0.0-20250320144820-d800c8b0eb07 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0 // indirect
|
||||
github.com/prometheus/sigv4 v0.1.2 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/segmentio/backo-go v1.0.1 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.24.9 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.3 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
|
||||
github.com/smarty/assertions v1.15.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
@@ -216,65 +221,78 @@ require (
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/component v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/component/componentprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/component v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/component/componenttest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/converter/expandconverter v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 // indirect
|
||||
go.opentelemetry.io/collector/connector v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/extension v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.30.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/connector v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/connector/connectortest v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/connector/xconnector v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/consumererror v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter/exportertest v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter/xexporter v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/extension v1.30.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.17.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/globalgates v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/otelcol v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.116.0 // indirect
|
||||
go.opentelemetry.io/collector/service v0.111.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/extensioncapabilities v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/extensiontest v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/fanoutconsumer v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline/xpipeline v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/processor v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/processor/processorhelper v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/processor/processortest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/receiverhelper v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/receivertest v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/xreceiver v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/service v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/service/hostcapabilities v0.124.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/otelconf v0.15.0 // indirect
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
golang.org/x/tools v0.28.0 // indirect
|
||||
gonum.org/v1/gonum v0.15.1 // indirect
|
||||
google.golang.org/api v0.213.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/grpc v1.69.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
gonum.org/v1/gonum v0.16.0 // indirect
|
||||
google.golang.org/api v0.230.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
|
||||
google.golang.org/grpc v1.72.2 // indirect
|
||||
gopkg.in/telebot.v3 v3.3.8 // indirect
|
||||
k8s.io/client-go v0.31.3 // indirect
|
||||
k8s.io/client-go v0.32.3 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
)
|
||||
|
||||
572
go.sum
572
go.sum
@@ -29,10 +29,10 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs=
|
||||
cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
|
||||
cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU=
|
||||
cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
@@ -67,22 +67,22 @@ filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4
|
||||
github.com/AfterShip/clickhouse-sql-parser v0.4.7 h1:zbdC0UooWLwBvOi5CeyCA42AWm6lMYuBVy6XnMzmF+c=
|
||||
github.com/AfterShip/clickhouse-sql-parser v0.4.7/go.mod h1:W0Z82wJWkJxz2RVun/RMwxue3g7ut47Xxl+SFqdJGus=
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0/go.mod h1:kUjrAo8bgEwLeZ/CmHqNl3Z/kPm7y6FKfxxK0izYUg4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/ch-go v0.63.1 h1:s2JyZvWLTCSAGdtjMBBmAgQQHMco6pawLJMOXi0FODM=
|
||||
@@ -126,9 +126,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
|
||||
@@ -153,6 +152,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR
|
||||
github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -174,8 +175,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
|
||||
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk=
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/coder/quartz v0.1.2 h1:PVhc9sJimTdKd3VbygXtS4826EOCpB1fXoRlLnCrE+s=
|
||||
github.com/coder/quartz v0.1.2/go.mod h1:vsiCc+AHViMKH2CQpGIpFgdHIEQsxwm8yCscqKmzbRA=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||
@@ -184,7 +185,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -194,12 +195,12 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8=
|
||||
github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
|
||||
github.com/digitalocean/godo v1.144.0 h1:rDCsmpwcDe5egFQ3Ae45HTde685/GzX037mWRMPufW0=
|
||||
github.com/digitalocean/godo v1.144.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
|
||||
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I=
|
||||
github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -228,11 +229,12 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE=
|
||||
github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||
github.com/expr-lang/expr v1.17.0 h1:+vpszOyzKLQXC9VF+wA8cVA0tlA984/Wabc/1hF9Whg=
|
||||
github.com/expr-lang/expr v1.17.0/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
|
||||
@@ -251,8 +253,8 @@ github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUork
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
@@ -310,16 +312,16 @@ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-redis/redismock/v8 v8.11.5 h1:RJFIiua58hrBrSpXhnGX3on79AU3S271H4ZhRI1wyVo=
|
||||
github.com/go-redis/redismock/v8 v8.11.5/go.mod h1:UaAU9dEe1C+eGr+FHV5prCWIt0hafyPWbGMEWE0UWdA=
|
||||
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
|
||||
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
|
||||
github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
|
||||
github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
|
||||
github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
|
||||
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
||||
@@ -372,8 +374,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
@@ -394,8 +396,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -422,13 +425,13 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
|
||||
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
|
||||
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
||||
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
@@ -436,11 +439,12 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0
|
||||
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
|
||||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
|
||||
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw=
|
||||
github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
|
||||
github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
|
||||
github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
@@ -454,12 +458,12 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
|
||||
github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ=
|
||||
github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
|
||||
github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
|
||||
github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
|
||||
github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=
|
||||
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
@@ -520,8 +524,8 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
|
||||
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/memberlist v0.5.1 h1:mk5dRuzeDNis2bi6LLoQIXfMH7JQvAzt3mQD0vNZZUo=
|
||||
github.com/hashicorp/memberlist v0.5.1/go.mod h1:zGDXV6AqbDTKTM6yxW0I4+JtFzZAJVoIPvss4hV8F24=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
|
||||
github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
@@ -530,8 +534,8 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI
|
||||
github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.21.0 h1:wUpQT+fgAxIcdMtFvuCJ78ziqc/VARubpOQPQyj4Q84=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.21.0/go.mod h1:WSM7w+9tT86sJTNcF8a/oHljC3HUmQfcLxYsgx6PpSc=
|
||||
github.com/hjson/hjson-go/v4 v4.0.0 h1:wlm6IYYqHjOdXH1gHev4VoXCaW20HdQAGCxdOEEg2cs=
|
||||
github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -543,12 +547,10 @@ github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU
|
||||
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.3 h1:q33Sw1ZqsvqDkFaKG53dGk7BCOvPCPbGZpYqsF6tdjw=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.3/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
@@ -588,18 +590,18 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
|
||||
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
|
||||
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs=
|
||||
github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs=
|
||||
github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM=
|
||||
github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es=
|
||||
github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ=
|
||||
github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -626,8 +628,8 @@ github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY=
|
||||
github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY=
|
||||
github.com/linode/linodego v1.49.0 h1:MNd3qwvQzbXB5mCpvdCqlUIu1RPA9oC+50LyB9kK+GQ=
|
||||
github.com/linode/linodego v1.49.0/go.mod h1:B+HAM3//4w1wOS0BwdaQBKwBxlfe6kYJ7bSC6jJ/xtc=
|
||||
github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74 h1:1KuuSOy4ZNgW0KA2oYIngXVFhQcXxhLqCVK7cBcldkk=
|
||||
github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
|
||||
@@ -665,8 +667,8 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
|
||||
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||
github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=
|
||||
github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||
@@ -710,6 +712,8 @@ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
||||
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
|
||||
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
@@ -719,8 +723,9 @@ github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/open-telemetry/opamp-go v0.5.0 h1:2YFbb6G4qBkq3yTRdVb5Nfz9hKHW/ldUyex352e1J7g=
|
||||
github.com/open-telemetry/opamp-go v0.5.0/go.mod h1:IMdeuHGVc5CjKSu5/oNV0o+UmiXuahoHvoZ4GOmAI9M=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.111.0 h1:n1p2DedLvPEN1XEx26s1PR1PCuXTgCY4Eo+kDTq7q0s=
|
||||
@@ -729,12 +734,16 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0/go.mod h1:I7nEkR7TDPFw162jYtPJZVevkniQfQ0FLIFuu2RGK3A=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 h1:Hh3Lt6GIw/jMfCSJ5XjBoZRmjZ1pbJJu6Xi7WrDTUi0=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0/go.mod h1:rQ9lQhijXIJIT5UGuwiKoEcWW6bdWJ4fnO+PndfuYEw=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0 h1:Ld/1EUAQ6z3CirSyf4A8waHzUAZbMPrDOno+7tb0vKM=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0/go.mod h1:wAOT1iGOOTPTw2ysr0DW2Wrfi0/TECVgiGByRQfFiV4=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 h1:kUUO8VNv/d9Tpx0NvOsRnUsz/JvZ8SWRnK+vT0cNjuU=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0/go.mod h1:SstR8PglIFBVGCZHS69bwJGl6TaCQQ5aLSEoas/8SRA=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 h1:jOG1ceAx+IATloKXHsE2Cy88XTgqPB/hiXicOrxENx8=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1/go.mod h1:mtNCoy09iO1f2zy5bEqkyRfRPaNKea57yK63cfHixts=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1 h1:G2daAIXiQhAwQSz9RK71QsBH9rmH/m/vdkFuGIEPfS4=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1/go.mod h1:/WAA1PKvHNz7E5SrtGg2KfAWl/PrmS0FVYOanoGxk0I=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 h1:mMVzpkpy6rKL1Q/xXNogZVtWebIlxTRzhsgp3b9ioCM=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1/go.mod h1:jM8Gsd0fIiwRzWrzd7Gm6PZYi5AgHPRkz0625Rtqyxo=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0 h1:TnAhTFTwmJzFq6vVcf57lnRzAp+rNx5tEyrMudtDGsc=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0/go.mod h1:l0CUp7vTH+Wv0tF5PYaHpPn1dLiVuMRAMqbBgXFpz54=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 h1:gmmzhgewk2fU0Md0vmaDEFgfRycfCfjgPvMA4SEdKiU=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1/go.mod h1:AsQJBuUUY1/yqK2c87hv4deeteaKwktwLIfQCN2OGk4=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.111.0 h1:60NMfD7WMOHKCkV+GVM8HRqWMB4EAbqEY5sF9gYUG1Y=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.111.0/go.mod h1:/qECmbWAqic6qoYp3oBmAFRpnKbJdGuk9iDdMhwHYfw=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
@@ -743,8 +752,8 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
|
||||
github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/ovh/go-ovh v1.7.0 h1:V14nF7FwDjQrZt9g7jzcvAAQ3HN6DNShRFRMC3jLoPw=
|
||||
github.com/ovh/go-ovh v1.7.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
@@ -753,13 +762,14 @@ github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTK
|
||||
github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU=
|
||||
github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
|
||||
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
|
||||
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
@@ -777,33 +787,31 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI=
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/alertmanager v0.28.0 h1:sLN+6HhZet8hrbmGHLAHWsTXgZSVCvq9Ix3U3wvivqc=
|
||||
github.com/prometheus/alertmanager v0.28.0/go.mod h1:/okSnb2LlodbMlRoOWQEKtqI/coOo2NKZDm2Hu9QHLQ=
|
||||
github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA=
|
||||
github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi/PPlt39vo9LQUHxM=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ=
|
||||
github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
|
||||
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
|
||||
github.com/prometheus/otlptranslator v0.0.0-20250320144820-d800c8b0eb07 h1:YaJ1JqyKGIUFIMUpMeT22yewZMXiTt5sLgWG1D/m4Yc=
|
||||
github.com/prometheus/otlptranslator v0.0.0-20250320144820-d800c8b0eb07/go.mod h1:ZO/4EUanXL7wbvfMHcS+rq9sCBxICdaU8RBFkVg5wv0=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
@@ -811,13 +819,15 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/prometheus v0.300.1 h1:9KKcTTq80gkzmXW0Et/QCFSrBPgmwiS3Hlcxc6o8KlM=
|
||||
github.com/prometheus/prometheus v0.300.1/go.mod h1:gtTPY/XVyCdqqnjA3NzDMb0/nc5H9hOu1RMame+gHyM=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0 h1:i+cMcpEDY1BkNm7lPDkCtE4oElsYLn+EKF8kAu2vXT4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/prometheus/prometheus v0.304.1 h1:e4kpJMb2Vh/PcR6LInake+ofcvFYHT+bCfmBvOkaZbY=
|
||||
github.com/prometheus/prometheus v0.304.1/go.mod h1:ioGx2SGKTY+fLnJSQCdTHqARVldGNS8OlIe3kvp98so=
|
||||
github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8=
|
||||
github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0=
|
||||
github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
|
||||
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
|
||||
github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
@@ -841,8 +851,8 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH
|
||||
github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
|
||||
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
|
||||
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
|
||||
@@ -857,8 +867,8 @@ github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI=
|
||||
github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE=
|
||||
github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
|
||||
github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
|
||||
@@ -879,11 +889,12 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.11.0 h1:hKY9l7SbhI4IPPs7hjKAL1iDgKc7rpfu8kx7BvehqlQ=
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.11.0/go.mod h1:CzFC21J4tLn7cEYdU5k6hg7yyf052xtZXUY2e3UF6+I=
|
||||
@@ -972,137 +983,169 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo=
|
||||
go.opentelemetry.io/collector v0.111.0/go.mod h1:eZi4Z1DmHy+sVqbUI8dZNvhrH7HZIlX+0AKorOtv6nE=
|
||||
go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms=
|
||||
go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys=
|
||||
go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
|
||||
go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
|
||||
go.opentelemetry.io/collector/component/componentprofiles v0.111.0 h1:yT3Sa833G9GMiXkAOuYi30afd/5vTmDQpZo6+X/XjXM=
|
||||
go.opentelemetry.io/collector/component/componentprofiles v0.111.0/go.mod h1:v9cm6ndumcbCSqZDBs0vRReRW7KSYax1RZVhs/CiZCo=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
|
||||
go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw=
|
||||
go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8=
|
||||
go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU=
|
||||
go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
|
||||
go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc=
|
||||
go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684=
|
||||
go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g=
|
||||
go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
|
||||
go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
|
||||
go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
|
||||
go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q=
|
||||
go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc=
|
||||
go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8=
|
||||
go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
|
||||
go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
|
||||
go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
|
||||
go.opentelemetry.io/collector v0.124.0 h1:g/dfdGFhBcQI0ggGxTmGlJnJ6Yl6T2gVxQoIj4UfXCc=
|
||||
go.opentelemetry.io/collector v0.124.0/go.mod h1:QzERYfmHUedawjr8Ph/CBEEkVqWS8IlxRLAZt+KHlCg=
|
||||
go.opentelemetry.io/collector/client v1.30.0 h1:QbvOrvwUGcnVjnIBn2zyLLubisOjgh7kMgkzDAiYpHg=
|
||||
go.opentelemetry.io/collector/client v1.30.0/go.mod h1:msXhZlNdAra2fZiyeT0o/xj43Kl1yvF9zYW0r+FhGUI=
|
||||
go.opentelemetry.io/collector/component v1.34.0 h1:YONg7FaZ5zZbj5cLdARvwtMNuZHunuyxw2fWe5fcWqc=
|
||||
go.opentelemetry.io/collector/component v1.34.0/go.mod h1:GvolsSVZskXuyfQdwYacqeBSZe/1tg4RJ0YK55KSvDA=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.128.0 h1:0lEYHgUQEMMkl5FLtMgDH8lue4B3auElQINzGIWUya4=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.128.0/go.mod h1:8vVO6JSV+edmiezJsQzW7aKQ7sFLIN6S3JawKBI646o=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.128.0 h1:MGNh5lQQ0Qmz2SmNwOqLJYaWMDkMLYj/51wjMzTBR34=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.128.0/go.mod h1:hALNxcacqOaX/Gm/dE7sNOxAEFj41SbRqtvF57Yd6gs=
|
||||
go.opentelemetry.io/collector/config/configauth v0.124.0 h1:Qcu800axWnpX0xRfW+9Jyos9+GTR6m7gTIF1udEihEo=
|
||||
go.opentelemetry.io/collector/config/configauth v0.124.0/go.mod h1:Hz5PQnTvNk2yFp50rzf85H3k0MkdwEBdYUxhpRZn75E=
|
||||
go.opentelemetry.io/collector/config/configcompression v1.30.0 h1:NKbywIEfL2PBiKnm9F2X2tbPNO0WzOQY08yWmndI3uM=
|
||||
go.opentelemetry.io/collector/config/configcompression v1.30.0/go.mod h1:QwbNpaOl6Me+wd0EdFuEJg0Cc+WR42HNjJtdq4TwE6w=
|
||||
go.opentelemetry.io/collector/config/confighttp v0.124.0 h1:W75DaPeLUuGbJtX3cTXOK0b53S5zrUsh6g5UfB6Wzsw=
|
||||
go.opentelemetry.io/collector/config/confighttp v0.124.0/go.mod h1:hiTu8HFgnzSitrogLz1urQn/+FzNzarqYk4BICy/ABs=
|
||||
go.opentelemetry.io/collector/config/configopaque v1.30.0 h1:vR2UxmzLwmkmQwyh16w8MyLODKdpNVKh0L3JFOZKzQ8=
|
||||
go.opentelemetry.io/collector/config/configopaque v1.30.0/go.mod h1:GYQiC8IejBcwE8z0O4DwbBR/Hf6U7d8DTf+cszyqwFs=
|
||||
go.opentelemetry.io/collector/config/configretry v1.30.0 h1:sapni1tymwNiuI0PjqlRR5CvYxIQYT8tyjQGVJDkVPM=
|
||||
go.opentelemetry.io/collector/config/configretry v1.30.0/go.mod h1:QNnb+MCk7aS1k2EuGJMtlNCltzD7b8uC7Xel0Dxm1wQ=
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.124.0 h1:KIg5wlHKp8nI5g/hAWZug9fE5MlPZwkRP2ZHOi4I6FU=
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.124.0/go.mod h1:WXmlNatI0vwjv7whh/qF1Xy+UufCZDk7VLtYqML7QmA=
|
||||
go.opentelemetry.io/collector/config/configtls v1.30.0 h1:wLTRV5hn/FWKWNjZ/9/ckkeD2mqWzAtwzP1kQv1YZZE=
|
||||
go.opentelemetry.io/collector/config/configtls v1.30.0/go.mod h1:yCM4ZYkLvc1VjpT/1DQIVoGmzEBHOhZltYQ7A30BMyM=
|
||||
go.opentelemetry.io/collector/confmap v1.30.0 h1:Y0MXhjQCdMyJN9xZMWWdNPWs6ncMVf7YVnyAEN2dAcM=
|
||||
go.opentelemetry.io/collector/confmap v1.30.0/go.mod h1:9DdThVDIC3VsdtTb7DgT+HwusWOocoqDkd/TErEtQgA=
|
||||
go.opentelemetry.io/collector/confmap/converter/expandconverter v0.111.0 h1:FlrfejpK6J+OytGuYEElrVZGjP4D3mTQUcqe/tkIMZQ=
|
||||
go.opentelemetry.io/collector/confmap/converter/expandconverter v0.111.0/go.mod h1:7wnSpMS3KE6wBUG8OhQELPBJod5gV6SgSbJEEpBwlR0=
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 h1:UyMO2ddtO7GKuFjrkR51IxmeBuRJrb1KKatu60oosxI=
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0/go.mod h1:SCJ8zvuuaOwQJk+zI87XSuc+HbquP2tsYb9aPlfeeRg=
|
||||
go.opentelemetry.io/collector/connector v0.111.0 h1:dOaJRO27LyX4ZnkZA51namo2V5idRWvWoMVf4b7obro=
|
||||
go.opentelemetry.io/collector/connector v0.111.0/go.mod h1:gPwxA1SK+uraSTpX20MG/cNc+axhkBm8+B6z6hh6hYg=
|
||||
go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 h1:tJ4+hcWRhknw+cRw6d6dI4CyX3/puqnd1Rg9+mWdwHU=
|
||||
go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0/go.mod h1:LdfE8hNYcEb+fI5kZp4w3ZGlTLFAmvHAPtTZxS6TZ38=
|
||||
go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
|
||||
go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
|
||||
go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
|
||||
go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
|
||||
go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
|
||||
go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
|
||||
go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
|
||||
go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
|
||||
go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
|
||||
go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
|
||||
go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk=
|
||||
go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY=
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.30.0 h1:9wgc3fVrdQWrE/gcGnMF/SjCsMvEwY7AXZPV0OBEO/4=
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.30.0/go.mod h1:Mmkoh8935XSM/QtZ7k1Wis2isGToCFgjGv+O8LrLWos=
|
||||
go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.30.0 h1:aKu87mHavpsGfkr3E/I4L8C4knvfhXgZ04uQ7xf9Pko=
|
||||
go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.30.0/go.mod h1:ip1d328KzF40TpDMuHg/8UWnLsII1m6Z91UaM9UWiXs=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 h1:PK+CaSgjLvzHaafBieJ3AjiUTAPuf40C+/Fn38LvmW8=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0/go.mod h1:DZmFSgWiqXQrzld9uU+73YAVI5JRIgd8RkK5HcaXGU0=
|
||||
go.opentelemetry.io/collector/connector v0.124.0 h1:/Wk8A4gOqjhE+WvKCMqCFhzUIvSi3sdN3RGvopjD6SY=
|
||||
go.opentelemetry.io/collector/connector v0.124.0/go.mod h1:dnYcXgUZp8ZmT7nbBPf38+mP2DD3T47m9jyGbdaCEXc=
|
||||
go.opentelemetry.io/collector/connector/connectortest v0.124.0 h1:gAD2jt7Th6DD8tDTU72Sv2xXvqJEGSjfncr9nTSVCg8=
|
||||
go.opentelemetry.io/collector/connector/connectortest v0.124.0/go.mod h1:0017vT2aCY1NmYXEepxvEfMA9YufKUoBM3/qtD6k9UM=
|
||||
go.opentelemetry.io/collector/connector/xconnector v0.124.0 h1:rdjwSfajHjJVRznw/NKGGzY0PKBTKBypZngGxOaJuEg=
|
||||
go.opentelemetry.io/collector/connector/xconnector v0.124.0/go.mod h1:rOhdUXPzTZbJ2L8VV43r7Rz/ZBfgWxQ+RI9mcqlzz5g=
|
||||
go.opentelemetry.io/collector/consumer v1.34.0 h1:oBhHH6mgViOGhVDPozE+sUdt7jFBo2Hh32lsSr2L3Tc=
|
||||
go.opentelemetry.io/collector/consumer v1.34.0/go.mod h1:DVMCb56ZBlPNcmo0lSJKn3rp18oyZQCedRE4GKIMI+Q=
|
||||
go.opentelemetry.io/collector/consumer/consumererror v0.128.0 h1:3htkWoHwXZ801ORmGeORdcMGqJHEbwdjaWhIj4LNbxw=
|
||||
go.opentelemetry.io/collector/consumer/consumererror v0.128.0/go.mod h1:v3eUnvuIBSV2yBWiWoZELV1jki7HFMttWeBF311XIU0=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.128.0 h1:x50GB0I/QvU3sQuNCap5z/P2cnq2yHoRJ/8awkiT87w=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.128.0/go.mod h1:Wb3IAbMY/DOIwJPy81PuBiW2GnKoNIz4THE7wfJwovE=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 h1:4E+KTdCjkRS3SIw0bsv5kpv9XFXHf8x9YiPEuxBVEHY=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.128.0/go.mod h1:OmzilL/qbjCzPMHay+WEA7/cPe5xuX7Jbj5WPIpqaMo=
|
||||
go.opentelemetry.io/collector/exporter v0.124.0 h1:ii+9tU/iSrPl4+YDvqFVflksA9hUYEzwMIpmvP4JZ8w=
|
||||
go.opentelemetry.io/collector/exporter v0.124.0/go.mod h1:Q8tOEwFu3CN8VGjE4H2yZcCRG9Q60foQIyZGKPD/jig=
|
||||
go.opentelemetry.io/collector/exporter/exportertest v0.124.0 h1:IOxA/4CiVWGPlmA0JofK6W4DzvwW1YJes09r6osluIE=
|
||||
go.opentelemetry.io/collector/exporter/exportertest v0.124.0/go.mod h1:2EmU8IwVJV79MmFBFFW1LCN0Ob2UZsEkX/mSUB06lbI=
|
||||
go.opentelemetry.io/collector/exporter/xexporter v0.124.0 h1:Itfn2+F4ki8hObOtPCecWBwGpuxakUYSsTwwkB5iUns=
|
||||
go.opentelemetry.io/collector/exporter/xexporter v0.124.0/go.mod h1:dNK/PPY02gA9BawIKHyVk8kIFdYvqVZ2A+LlMZucIPY=
|
||||
go.opentelemetry.io/collector/extension v1.30.0 h1:AJqntAp1p40Q1az2Vze3OHiMURq56KWnUxaLzs1ghaA=
|
||||
go.opentelemetry.io/collector/extension v1.30.0/go.mod h1:a21WpypFQp9x0Go7yMOknYmIKvdIoWGzjz+h1WMjzLk=
|
||||
go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
|
||||
go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
|
||||
go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 h1:Ps2/2TUbAkxgZu1YxSxDweZDLJx5x7CyNKCINZkLFtY=
|
||||
go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0/go.mod h1:q4kBSWsOX62hAp7si+Y0Y0ZXWyCpXjiRuWWz7IL/MDI=
|
||||
go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 h1:X+YXkJ3kX8c3xN/Mfiqc/gKB7NaQnG4Cge9R60lKOyw=
|
||||
go.opentelemetry.io/collector/extension/zpagesextension v0.111.0/go.mod h1:v5u5Ots6HgbhKsvRXB+SF9cmVTgkUATNiejHbpsa0rY=
|
||||
go.opentelemetry.io/collector/featuregate v1.17.0 h1:vpfXyWe7DFqCsDArsR9rAKKtVpt72PKjzjeqPegViws=
|
||||
go.opentelemetry.io/collector/featuregate v1.17.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
|
||||
go.opentelemetry.io/collector/internal/globalgates v0.111.0 h1:pPf/U401i/bEJ8ucbYMyqOdkujyZ92Gbm6RFkJrDvBc=
|
||||
go.opentelemetry.io/collector/internal/globalgates v0.111.0/go.mod h1:HqIBKc8J5Vccn93gkN1uaVK42VbVsuVyjmo5b1MORZo=
|
||||
go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
|
||||
go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
|
||||
go.opentelemetry.io/collector/otelcol v0.111.0 h1:RcS1/BDsEBGdI4YjosdElxYwsA2tTtiYEuWjEF0p8vk=
|
||||
go.opentelemetry.io/collector/otelcol v0.111.0/go.mod h1:B/ri/CwsW7zeLXkCcB3XtarxjJ80eIC+z8guGhFFpis=
|
||||
go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
|
||||
go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
|
||||
go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
|
||||
go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
|
||||
go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
|
||||
go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
|
||||
go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
|
||||
go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
|
||||
go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
|
||||
go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
|
||||
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
|
||||
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
|
||||
go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y=
|
||||
go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI=
|
||||
go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms=
|
||||
go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY=
|
||||
go.opentelemetry.io/collector/extension/extensionauth v1.30.0 h1:HfNT4F1LDEyuItoHq01LrPiUmMpfc5LnOfE4OYVSghA=
|
||||
go.opentelemetry.io/collector/extension/extensionauth v1.30.0/go.mod h1:bVWkWyyd0aCYu+x6q4HdezfzL0QAqlq5PO7NwckXe4s=
|
||||
go.opentelemetry.io/collector/extension/extensioncapabilities v0.124.0 h1:6emRXUQriceBcrwRDf2MPQQMRu7jmP0Z0XaJ4zdjt+I=
|
||||
go.opentelemetry.io/collector/extension/extensioncapabilities v0.124.0/go.mod h1:ovL3wgZuQ8/U7UxUBTqqGSYQTPxHPHmj4P+kp/zb0hA=
|
||||
go.opentelemetry.io/collector/extension/extensiontest v0.124.0 h1:pWfKxEqvq5vVdQy+UIerw5j7ZrXezxsfQfuUe8g/xVo=
|
||||
go.opentelemetry.io/collector/extension/extensiontest v0.124.0/go.mod h1:DLVRyW7tJt8TtYq0Wr5BUsM494YqDiIjN8YCmbVKqjs=
|
||||
go.opentelemetry.io/collector/extension/xextension v0.124.0 h1:Yzf11HXaiMHfS50Zy/CYKfJjoi+/w/tgRZdDQ2VIdW0=
|
||||
go.opentelemetry.io/collector/extension/xextension v0.124.0/go.mod h1:GeM0aSgwVSba3Bvvspuy1E+1aa/Q1CDxoK+e/xcJFVg=
|
||||
go.opentelemetry.io/collector/extension/zpagesextension v0.124.0 h1:Vr4y4hCaUr3jKLmCzldVkTgtkvcGwDp4PpIkCDDN9c8=
|
||||
go.opentelemetry.io/collector/extension/zpagesextension v0.124.0/go.mod h1:822rM4VYcfywso0CH7wUQV8T/4ZfSrtOA65z4FaY8Mk=
|
||||
go.opentelemetry.io/collector/featuregate v1.34.0 h1:zqDHpEYy1UeudrfUCvlcJL2t13dXywrC6lwpNZ5DrCU=
|
||||
go.opentelemetry.io/collector/featuregate v1.34.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
|
||||
go.opentelemetry.io/collector/internal/fanoutconsumer v0.124.0 h1:8+xc3OxriK1nZNBApFCzF7lszXyBQxyJ/Nnzy5Q4hCM=
|
||||
go.opentelemetry.io/collector/internal/fanoutconsumer v0.124.0/go.mod h1:CoT5fVYpTT4RWUE9DihSMlxXqGP/VnILnBBGld8Bu6o=
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.128.0 h1:ySEYWoY7J8DAYdlw2xlF0w+ODQi3AhYj7TRNflsCbx8=
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.128.0/go.mod h1:572B/iJqjauv3aT+zcwnlNWBPqM7+KqrYGSUuOAStrM=
|
||||
go.opentelemetry.io/collector/otelcol v0.124.0 h1:q/+ebTZgEZX+yFbvO7FeqpEtvtRPJ+YzZzHsVzqA71s=
|
||||
go.opentelemetry.io/collector/otelcol v0.124.0/go.mod h1:mFGJZn5YuffdMVO/lPBavbW+R64Dgd3jOMgw2WAmJEM=
|
||||
go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8=
|
||||
go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.128.0 h1:6DEtzs/liqv/ukz2EHbC5OMaj2V6K2pzuj/LaRg2YmY=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.128.0/go.mod h1:bVVRpz+zKFf1UCCRUFqy8LvnO3tHlXKkdqW2d+Wi/iA=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.128.0 h1:5xcsMtyzvb18AnS2skVtWreQP1nl6G3PiXaylKCZ6pA=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.128.0/go.mod h1:9/VYVgzv3JMuIyo19KsT3FwkVyxbh3Eg5QlabQEUczA=
|
||||
go.opentelemetry.io/collector/pipeline v0.128.0 h1:WgNXdFbyf/QRLy5XbO/jtPQosWrSWX/TEnSYpJq8bgI=
|
||||
go.opentelemetry.io/collector/pipeline v0.128.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
|
||||
go.opentelemetry.io/collector/pipeline/xpipeline v0.124.0 h1:ADHUrozlIgSDjXMsAC5t8l4p9TVo+QH33XArFfcL9ns=
|
||||
go.opentelemetry.io/collector/pipeline/xpipeline v0.124.0/go.mod h1:ep7XJFdCEq04/5yUyiWWzgKvBYMwRJR5XNWmGpIGbVQ=
|
||||
go.opentelemetry.io/collector/processor v1.34.0 h1:5pwXIG12XXxdkJ8F68e2cBEjEnFlCIAZhqEYM7vjkqE=
|
||||
go.opentelemetry.io/collector/processor v1.34.0/go.mod h1:VCl4vYj2tdO4APUcr0q6Eh796mqCCsH9Z/gqaPuzlUs=
|
||||
go.opentelemetry.io/collector/processor/processorhelper v0.128.0 h1:e4/BDrPtoEkqEbV6Vmg7qqnHnEjgrwlE2DLVuftDBDY=
|
||||
go.opentelemetry.io/collector/processor/processorhelper v0.128.0/go.mod h1:MKGXgWMuy4xQ6AL094RVXVHb3HZ4NFmW0azNsOzQB44=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.128.0 h1:xPhOSmGFDGqhC3/nu1BqPSE6EpDPAf1/F+BfaYjDn/8=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.128.0/go.mod h1:XXXom+mbAQtrkcvq4Ecd6n8RQoVgcfLe1vrUlr6U2gI=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.128.0 h1:ObbtdXab0is6bdt4XabsRJZ+SUTuwQjPVlHTbmScfNg=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.128.0/go.mod h1:/nHXW15nzwSRQ+25Cb+r17he/uMtCEvSOBGqpDbn3Uk=
|
||||
go.opentelemetry.io/collector/receiver v1.34.0 h1:un6iRBXZBz1zacEmyfDCVDDaBY4GP3TQLwor894fywg=
|
||||
go.opentelemetry.io/collector/receiver v1.34.0/go.mod h1:4J9xhbXJiI/rYlvlMTskXRGbwFeczJiCkW5R2YfTe88=
|
||||
go.opentelemetry.io/collector/receiver/receiverhelper v0.128.0 h1:/aCWmmj9tNGY6U16fYXscJEF9BOMrzrWkLmgQbBXYfs=
|
||||
go.opentelemetry.io/collector/receiver/receiverhelper v0.128.0/go.mod h1:wwSFr/7jjv7yNBnH03wpiurnJiWjaJX9Y7Oj3XfhRYw=
|
||||
go.opentelemetry.io/collector/receiver/receivertest v0.124.0 h1:mx0290aXAo+wfjm4NgbKUodjT5SbS306zmk+AeqeVxE=
|
||||
go.opentelemetry.io/collector/receiver/receivertest v0.124.0/go.mod h1:3RpopRmIzx5T4zTStHJC0HHfd8YFWm8e9bia1HiuDtY=
|
||||
go.opentelemetry.io/collector/receiver/xreceiver v0.124.0 h1:YigTUKk8p/aIfqaT0ST7teT9KbLThWD5n2km83byftw=
|
||||
go.opentelemetry.io/collector/receiver/xreceiver v0.124.0/go.mod h1:NkTpmpAEDT17Dko4gpHUnRztrSkdSd6B0+Y4gfuCWIA=
|
||||
go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA=
|
||||
go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
|
||||
go.opentelemetry.io/collector/service v0.124.0 h1:lUpizko/Y2P+XXbZ9wiKM8acLSt6ZIvC3/6/j6rcq4w=
|
||||
go.opentelemetry.io/collector/service v0.124.0/go.mod h1:w2eL3KKOMW4CvqCWyZ3P/Qh1ZBEPGG/uRz/0LpHbpv0=
|
||||
go.opentelemetry.io/collector/service/hostcapabilities v0.124.0 h1:ArxbARF7+bnzK8xLnN2G41KInbcN1aGhSBR76VeUQi8=
|
||||
go.opentelemetry.io/collector/service/hostcapabilities v0.124.0/go.mod h1:vifQsB+lkeCsjBCRPVHca9lJ3pLpLPZKCGrG77nkxFQ=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ=
|
||||
go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c=
|
||||
go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.30.0 h1:vumy4r1KMyaoQRltX7cJ37p3nluzALX9nugCjNNefuY=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.30.0/go.mod h1:fRbvRsaeVZ82LIl3u0rIvusIel2UUf+JcaaIpy5taho=
|
||||
go.opentelemetry.io/contrib/zpages v0.55.0 h1:F+xj261Ulwl79QC+2O+IO1b3NbwppUDwN+7LbDSdQcY=
|
||||
go.opentelemetry.io/contrib/zpages v0.55.0/go.mod h1:dDqDGDfbXSjt/k9orZk4Huulvz1letX1YWTKts5GQpo=
|
||||
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
|
||||
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:WypxHH02KX2poqqbaadmkMYalGyy/vil4HE4PM4nRJc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 h1:bZHOb8k/CwwSt0DgvgaoOhBXWNdWqFWaIsGTtg1H3KE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0/go.mod h1:XlV163j81kDdIt5b5BXCjdqVfqJFy/LJrHA697SorvQ=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 h1:IyFlqNsi8VT/nwYlLJfdM0y1gavxGpEvnf6FtVfZ6X4=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0/go.mod h1:bxiX8eUeKoAEQmbq/ecUT8UqZwCjZW52yJrXJUSozsk=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0=
|
||||
go.opentelemetry.io/otel/log v0.10.0 h1:1CXmspaRITvFcjA4kyVszuG4HjA61fPDxMb7q3BuyF0=
|
||||
go.opentelemetry.io/otel/log v0.10.0/go.mod h1:PbVdm9bXKku/gL0oFfUF4wwsQsOPlpo4VEqjvxih+FM=
|
||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
||||
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0 h1:lR4teQGWfeDVGoute6l0Ou+RpFqQ9vaPdrNJlST0bvw=
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0/go.mod h1:A+V1UTWREhWAittaQEG4bYm4gAZa6xnvVu+xKrIRkzo=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0/go.mod h1:CosX/aS4eHnG9D7nESYpV753l4j9q5j3SL/PUYd2lR8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/contrib/otelconf v0.15.0 h1:BLNiIUsrNcqhSKpsa6CnhE6LdrpY1A8X0szMVsu99eo=
|
||||
go.opentelemetry.io/contrib/otelconf v0.15.0/go.mod h1:OPH1seO5z9dp1P26gnLtoM9ht7JDvh3Ws6XRHuXqImY=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.35.0 h1:DpwKW04LkdFRFCIgM3sqwTJA/QREHMeMHYPWP1WeaPQ=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.35.0/go.mod h1:9+SNxwqvCWo1qQwUpACBY5YKNVxFJn5mlbXg/4+uKBg=
|
||||
go.opentelemetry.io/contrib/zpages v0.60.0 h1:wOM9ie1Hz4H88L9KE6GrGbKJhfm+8F1NfW/Y3q9Xt+8=
|
||||
go.opentelemetry.io/contrib/zpages v0.60.0/go.mod h1:xqfToSRGh2MYUsfyErNz8jnNDPlnpZqWM/y6Z2Cx7xw=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 h1:HMUytBT3uGhPKYY/u/G5MR9itrlSO2SMOsSD3Tk3k7A=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0/go.mod h1:hdDXsiNLmdW/9BF2jQpnHHlhFajpWCEYfM6e5m2OAZg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 h1:C/Wi2F8wEmbxJ9Kuzw/nhP+Z9XaHYMkyDmXy6yR2cjw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0/go.mod h1:0Lr9vmGKzadCTgsiBydxr6GEZ8SsZ7Ks53LzjWG5Ar4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NIXxOCFx+SKbhCVxwl3ETG8ClLPAa0KuKV6p3yhxP8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0 h1:AHh/lAP1BHrY5gBwk8ncc25FXWm/gmmY3BX258z5nuk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0/go.mod h1:QpFWz1QxqevfjwzYdbMb4Y1NnlJvqSGwyuU0B4iuc9c=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 h1:k6KdfZk72tVW/QVZf60xlDziDvYAePj5QHwoQvrB2m8=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0/go.mod h1:5Y3ZJLqzi/x/kYtrSrPSx7TFI/SGsL7q2kME027tH6I=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 h1:T0Ec2E+3YZf5bgTNQVet8iTDW7oIk03tXHq+wkwIDnE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0/go.mod h1:30v2gqH+vYGJsesLWFov8u47EpYTcIQcBjKpI6pJThg=
|
||||
go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc=
|
||||
go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E=
|
||||
go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989 h1:4JF7oY9CcHrPGfBLijDcXZyCzGckVEyOjuat5ktmQRg=
|
||||
go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989/go.mod h1:NToOxLDCS1tXDSB2dIj44H9xGPOpKr0csIN+gnuihv4=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/log v0.11.0 h1:7bAOpjpGglWhdEzP8z0VXc4jObOiDEwr3IYbhBnjk2c=
|
||||
go.opentelemetry.io/otel/sdk/log v0.11.0/go.mod h1:dndLTxZbwBstZoqsJB3kGsRPkpAgaJrWfQg3lhlHFFY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
@@ -1138,8 +1181,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -1165,8 +1208,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1211,7 +1254,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
@@ -1241,8 +1283,8 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1366,8 +1408,8 @@ golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -1425,16 +1467,16 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
|
||||
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0=
|
||||
gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
@@ -1474,8 +1516,8 @@ google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRR
|
||||
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko=
|
||||
google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ=
|
||||
google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ=
|
||||
google.golang.org/api v0.230.0 h1:2u1hni3E+UXAXrONrrkfWpi/V6cyKVAbfGVeGtC3OxM=
|
||||
google.golang.org/api v0.230.0/go.mod h1:aqvtoMk7YkiXx+6U12arQFExiRV9D/ekvMCwCd/TksQ=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1562,10 +1604,10 @@ google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
@@ -1598,8 +1640,8 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
|
||||
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
|
||||
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@@ -1615,8 +1657,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
|
||||
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -1625,6 +1667,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
@@ -1655,25 +1699,25 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
|
||||
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
|
||||
k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
|
||||
k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
|
||||
k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
|
||||
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
|
||||
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
|
||||
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
|
||||
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
|
||||
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
@@ -2,7 +2,7 @@ package clickhouseprometheus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
@@ -17,7 +17,12 @@ func unmarshalLabels(s string) ([]prompb.Label, string, error) {
|
||||
for n, v := range m {
|
||||
if n == "__name__" {
|
||||
metricName = v
|
||||
} else {
|
||||
if !model.IsValidLegacyMetricName(n) {
|
||||
n = `"` + n + `"`
|
||||
}
|
||||
}
|
||||
|
||||
res = append(res, prompb.Label{
|
||||
Name: n,
|
||||
Value: v,
|
||||
|
||||
@@ -33,6 +33,12 @@ func (a *API) QueryRange(rw http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the query request
|
||||
if err := queryRangeRequest.Validate(); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
|
||||
@@ -117,7 +117,7 @@ func (bc *bucketCache) GetMissRanges(
|
||||
}
|
||||
|
||||
// Put stores fresh query results in the cache
|
||||
func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, fresh *qbtypes.Result) {
|
||||
func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step, fresh *qbtypes.Result) {
|
||||
// Get query window
|
||||
startMs, endMs := q.Window()
|
||||
|
||||
@@ -159,8 +159,36 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
|
||||
return
|
||||
}
|
||||
|
||||
// Convert trimmed result to buckets
|
||||
freshBuckets := bc.resultToBuckets(ctx, trimmedResult, startMs, cachableEndMs)
|
||||
// Adjust start and end times to only cache complete intervals
|
||||
cachableStartMs := startMs
|
||||
stepMs := uint64(step.Duration.Milliseconds())
|
||||
|
||||
// If we have a step interval, adjust boundaries to only cache complete intervals
|
||||
if stepMs > 0 {
|
||||
// If start is not aligned, round up to next step boundary (first complete interval)
|
||||
if startMs%stepMs != 0 {
|
||||
cachableStartMs = ((startMs / stepMs) + 1) * stepMs
|
||||
}
|
||||
|
||||
// If end is not aligned, round down to previous step boundary (last complete interval)
|
||||
if cachableEndMs%stepMs != 0 {
|
||||
cachableEndMs = (cachableEndMs / stepMs) * stepMs
|
||||
}
|
||||
|
||||
// If after adjustment we have no complete intervals, don't cache
|
||||
if cachableStartMs >= cachableEndMs {
|
||||
bc.logger.DebugContext(ctx, "no complete intervals to cache",
|
||||
"original_start", startMs,
|
||||
"original_end", endMs,
|
||||
"adjusted_start", cachableStartMs,
|
||||
"adjusted_end", cachableEndMs,
|
||||
"step", stepMs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Convert trimmed result to buckets with adjusted boundaries
|
||||
freshBuckets := bc.resultToBuckets(ctx, trimmedResult, cachableStartMs, cachableEndMs)
|
||||
|
||||
// If no fresh buckets and no existing data, don't cache
|
||||
if len(freshBuckets) == 0 && len(existingData.Buckets) == 0 {
|
||||
@@ -485,6 +513,12 @@ func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*cac
|
||||
}
|
||||
|
||||
if existingSeries, ok := seriesMap[key]; ok {
|
||||
// Merge values, avoiding duplicate timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Pre-allocate capacity for merged values
|
||||
newCap := len(existingSeries.Values) + len(series.Values)
|
||||
if cap(existingSeries.Values) < newCap {
|
||||
@@ -492,7 +526,13 @@ func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*cac
|
||||
copy(newValues, existingSeries.Values)
|
||||
existingSeries.Values = newValues
|
||||
}
|
||||
existingSeries.Values = append(existingSeries.Values, series.Values...)
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// New series
|
||||
seriesMap[key] = series
|
||||
@@ -697,7 +737,7 @@ func (bc *bucketCache) trimResultToFluxBoundary(result *qbtypes.Result, fluxBoun
|
||||
switch result.Type {
|
||||
case qbtypes.RequestTypeTimeSeries:
|
||||
// Trim time series data
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok && tsData != nil {
|
||||
trimmedData := &qbtypes.TimeSeriesData{
|
||||
QueryName: tsData.QueryName,
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func BenchmarkBucketCache_GetMissRanges(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
}
|
||||
|
||||
// Create test queries with varying cache hit patterns
|
||||
@@ -121,7 +121,7 @@ func BenchmarkBucketCache_Put(b *testing.B) {
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < tc.numQueries; j++ {
|
||||
bc.Put(ctx, orgID, queries[j], results[j])
|
||||
bc.Put(ctx, orgID, queries[j], qbtypes.Step{Duration: 1000 * time.Millisecond}, results[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -259,7 +259,7 @@ func BenchmarkBucketCache_ConcurrentOperations(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
@@ -284,7 +284,7 @@ func BenchmarkBucketCache_ConcurrentOperations(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
case 2: // Partial read
|
||||
query := &mockQuery{
|
||||
fingerprint: fmt.Sprintf("concurrent-query-%d", i%100),
|
||||
|
||||
117
pkg/querier/bucket_cache_step_test.go
Normal file
117
pkg/querier/bucket_cache_step_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBucketCacheStepAlignment(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
orgID := valuer.UUID{}
|
||||
cache := createTestCache(t)
|
||||
bc := NewBucketCache(instrumentationtest.New().ToProviderSettings(), cache, time.Hour, 5*time.Minute)
|
||||
|
||||
// Test with 5-minute step
|
||||
step := qbtypes.Step{Duration: 5 * time.Minute}
|
||||
|
||||
// Query from 12:02 to 12:58 (both unaligned)
|
||||
// Complete intervals: 12:05 to 12:55
|
||||
query := &mockQuery{
|
||||
fingerprint: "test-step-alignment",
|
||||
startMs: 1672563720000, // 12:02
|
||||
endMs: 1672567080000, // 12:58
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "test"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1672563720000, Value: 1, Partial: true}, // 12:02
|
||||
{Timestamp: 1672563900000, Value: 2}, // 12:05
|
||||
{Timestamp: 1672564200000, Value: 2.5}, // 12:10
|
||||
{Timestamp: 1672564500000, Value: 2.6}, // 12:15
|
||||
{Timestamp: 1672566600000, Value: 2.9}, // 12:50
|
||||
{Timestamp: 1672566900000, Value: 3}, // 12:55
|
||||
{Timestamp: 1672567080000, Value: 4, Partial: true}, // 12:58
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Put result in cache
|
||||
bc.Put(ctx, orgID, query, step, result)
|
||||
|
||||
// Get cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, step)
|
||||
|
||||
// Should have cached data
|
||||
require.NotNil(t, cached)
|
||||
|
||||
// Log the missing ranges to debug
|
||||
t.Logf("Missing ranges: %v", missing)
|
||||
for i, r := range missing {
|
||||
t.Logf("Missing range %d: From=%d, To=%d", i, r.From, r.To)
|
||||
}
|
||||
|
||||
// Should have 2 missing ranges for partial intervals
|
||||
require.Len(t, missing, 2)
|
||||
|
||||
// First partial: 12:02 to 12:05
|
||||
assert.Equal(t, uint64(1672563720000), missing[0].From)
|
||||
assert.Equal(t, uint64(1672563900000), missing[0].To)
|
||||
|
||||
// Second partial: 12:55 to 12:58
|
||||
assert.Equal(t, uint64(1672566900000), missing[1].From, "Second missing range From")
|
||||
assert.Equal(t, uint64(1672567080000), missing[1].To, "Second missing range To")
|
||||
}
|
||||
|
||||
func TestBucketCacheNoStepInterval(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
orgID := valuer.UUID{}
|
||||
cache := createTestCache(t)
|
||||
bc := NewBucketCache(instrumentationtest.New().ToProviderSettings(), cache, time.Hour, 5*time.Minute)
|
||||
|
||||
// Test with no step (stepMs = 0)
|
||||
step := qbtypes.Step{Duration: 0}
|
||||
|
||||
query := &mockQuery{
|
||||
fingerprint: "test-no-step",
|
||||
startMs: 1672563720000,
|
||||
endMs: 1672567080000,
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{{Index: 0, Series: []*qbtypes.TimeSeries{}}},
|
||||
},
|
||||
}
|
||||
|
||||
// Should cache the entire range when step is 0
|
||||
bc.Put(ctx, orgID, query, step, result)
|
||||
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, step)
|
||||
assert.NotNil(t, cached)
|
||||
assert.Len(t, missing, 0)
|
||||
}
|
||||
@@ -128,7 +128,7 @@ func TestBucketCache_GetMissRanges_EmptyCache(t *testing.T) {
|
||||
endMs: 5000,
|
||||
}
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
assert.Nil(t, cached)
|
||||
assert.Len(t, missing, 1)
|
||||
@@ -159,13 +159,13 @@ func TestBucketCache_Put_And_Get(t *testing.T) {
|
||||
}
|
||||
|
||||
// Store in cache
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
|
||||
// Wait a bit for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Retrieve from cache
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
assert.NotNil(t, cached.Value)
|
||||
assert.Len(t, missing, 0)
|
||||
@@ -193,7 +193,7 @@ func TestBucketCache_PartialHit(t *testing.T) {
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 1000, 3000, 1000),
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, result1)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{Duration: 1000 * time.Millisecond}, result1)
|
||||
|
||||
// Wait for cache write
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -205,7 +205,7 @@ func TestBucketCache_PartialHit(t *testing.T) {
|
||||
endMs: 5000,
|
||||
}
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query2, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query2, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Should have cached data
|
||||
assert.NotNil(t, cached.Value)
|
||||
@@ -226,7 +226,7 @@ func TestBucketCache_MultipleBuckets(t *testing.T) {
|
||||
startMs: 1000,
|
||||
endMs: 2000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{Duration: 100 * time.Millisecond}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 1000, 2000, 100),
|
||||
})
|
||||
@@ -236,7 +236,7 @@ func TestBucketCache_MultipleBuckets(t *testing.T) {
|
||||
startMs: 3000,
|
||||
endMs: 4000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, qbtypes.Step{Duration: 100 * time.Millisecond}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 3000, 4000, 100),
|
||||
})
|
||||
@@ -251,7 +251,7 @@ func TestBucketCache_MultipleBuckets(t *testing.T) {
|
||||
endMs: 4500,
|
||||
}
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query3, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query3, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Should have cached data
|
||||
assert.NotNil(t, cached.Value)
|
||||
@@ -284,13 +284,13 @@ func TestBucketCache_FluxInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
// This should not be cached due to flux interval
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
|
||||
// Wait a bit
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Try to get the data
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Should have no cached data
|
||||
assert.Nil(t, cached)
|
||||
@@ -354,7 +354,7 @@ func TestBucketCache_MergeTimeSeriesResults(t *testing.T) {
|
||||
startMs: 1000,
|
||||
endMs: 3000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{Duration: 1000 * time.Millisecond}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
@@ -370,7 +370,7 @@ func TestBucketCache_MergeTimeSeriesResults(t *testing.T) {
|
||||
startMs: 3000,
|
||||
endMs: 5000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, qbtypes.Step{Duration: 1000 * time.Millisecond}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
@@ -390,7 +390,7 @@ func TestBucketCache_MergeTimeSeriesResults(t *testing.T) {
|
||||
endMs: 5000,
|
||||
}
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query3, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query3, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Should have no missing ranges
|
||||
assert.Len(t, missing, 0)
|
||||
@@ -445,10 +445,10 @@ func TestBucketCache_RawData(t *testing.T) {
|
||||
Value: rawData,
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Raw data should not be cached
|
||||
assert.Nil(t, cached)
|
||||
@@ -485,10 +485,10 @@ func TestBucketCache_ScalarData(t *testing.T) {
|
||||
Value: scalarData,
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Scalar data should not be cached
|
||||
assert.Nil(t, cached)
|
||||
@@ -513,11 +513,11 @@ func TestBucketCache_EmptyFingerprint(t *testing.T) {
|
||||
Value: createTestTimeSeries("A", 1000, 5000, 1000),
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should still be able to retrieve
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
assert.NotNil(t, cached.Value)
|
||||
assert.Len(t, missing, 0)
|
||||
}
|
||||
@@ -568,7 +568,7 @@ func TestBucketCache_ConcurrentAccess(t *testing.T) {
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries(fmt.Sprintf("Q%d", id), query.startMs, query.endMs, 100),
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 100 * time.Microsecond}, result)
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
@@ -581,7 +581,7 @@ func TestBucketCache_ConcurrentAccess(t *testing.T) {
|
||||
startMs: uint64(id * 1000),
|
||||
endMs: uint64((id + 1) * 1000),
|
||||
}
|
||||
bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
@@ -628,10 +628,10 @@ func TestBucketCache_GetMissRanges_FluxInterval(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
bc.Put(ctx, orgID, query, cachedResult)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, cachedResult)
|
||||
|
||||
// Get miss ranges
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
assert.NotNil(t, cached)
|
||||
t.Logf("Missing ranges: %+v, query range: %d-%d", missing, query.startMs, query.endMs)
|
||||
|
||||
@@ -690,10 +690,10 @@ func TestBucketCache_Put_FluxIntervalTrimming(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
|
||||
// Retrieve cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Should have cached data
|
||||
assert.NotNil(t, cached)
|
||||
@@ -760,10 +760,10 @@ func TestBucketCache_Put_EntireRangeInFluxInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result - should not cache anything
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
|
||||
// Try to get cached data - should have no cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Should have no cached value
|
||||
assert.Nil(t, cached)
|
||||
@@ -785,18 +785,6 @@ func TestBucketCache_EmptyDataHandling(t *testing.T) {
|
||||
shouldCache bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "truly_empty_time_series",
|
||||
result: &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
Aggregations: []*qbtypes.AggregationBucket{},
|
||||
},
|
||||
},
|
||||
shouldCache: false,
|
||||
description: "No aggregations means truly empty - should not cache",
|
||||
},
|
||||
{
|
||||
name: "filtered_empty_time_series",
|
||||
result: &qbtypes.Result{
|
||||
@@ -878,17 +866,16 @@ func TestBucketCache_EmptyDataHandling(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, tt.result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, tt.result)
|
||||
|
||||
// Wait a bit for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Try to get cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
if tt.shouldCache {
|
||||
assert.NotNil(t, cached, tt.description)
|
||||
assert.Len(t, missing, 0, "Should have no missing ranges when data is cached")
|
||||
} else {
|
||||
assert.Nil(t, cached, tt.description)
|
||||
assert.Len(t, missing, 1, "Should have entire range as missing when data is not cached")
|
||||
@@ -944,13 +931,13 @@ func TestBucketCache_PartialValues(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
|
||||
// Wait for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Should have cached data
|
||||
assert.NotNil(t, cached)
|
||||
@@ -1014,13 +1001,13 @@ func TestBucketCache_AllPartialValues(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
|
||||
// Wait for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// When all values are partial and filtered out, the result is cached as empty
|
||||
// This prevents re-querying for the same misaligned time range
|
||||
@@ -1075,7 +1062,7 @@ func TestBucketCache_FilteredCachedResults(t *testing.T) {
|
||||
}
|
||||
|
||||
// Cache the wide range
|
||||
bc.Put(ctx, orgID, query1, result1)
|
||||
bc.Put(ctx, orgID, query1, qbtypes.Step{Duration: 1000 * time.Millisecond}, result1)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Now query for a smaller range (2000-3500ms)
|
||||
@@ -1086,7 +1073,7 @@ func TestBucketCache_FilteredCachedResults(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get cached data - should be filtered to requested range
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query2, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query2, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
|
||||
// Should have no missing ranges
|
||||
assert.Len(t, missing, 0)
|
||||
@@ -1246,7 +1233,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1300,7 +1287,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1352,7 +1339,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1409,11 +1396,11 @@ func TestBucketCache_NoCache(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result in cache
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify data is cached
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000 * time.Millisecond})
|
||||
assert.NotNil(t, cached)
|
||||
assert.Len(t, missing, 0)
|
||||
|
||||
|
||||
@@ -118,6 +118,10 @@ func (q *builderQuery[T]) Fingerprint() string {
|
||||
parts = append(parts, fmt.Sprintf("having=%s", q.spec.Having.Expression))
|
||||
}
|
||||
|
||||
if q.spec.ShiftBy != 0 {
|
||||
parts = append(parts, fmt.Sprintf("shiftby=%d", q.spec.ShiftBy))
|
||||
}
|
||||
|
||||
return strings.Join(parts, "&")
|
||||
}
|
||||
|
||||
@@ -204,7 +208,14 @@ func (q *builderQuery[T]) executeWithContext(ctx context.Context, query string,
|
||||
|
||||
// Pass query window and step for partial value detection
|
||||
queryWindow := &qbtypes.TimeRange{From: q.fromMS, To: q.toMS}
|
||||
payload, err := consume(rows, q.kind, queryWindow, q.spec.StepInterval, q.spec.Name)
|
||||
|
||||
kind := q.kind
|
||||
// all metric queries are time series then reduced if required
|
||||
if q.spec.Signal == telemetrytypes.SignalMetrics {
|
||||
kind = qbtypes.RequestTypeTimeSeries
|
||||
}
|
||||
|
||||
payload, err := consume(rows, kind, queryWindow, q.spec.StepInterval, q.spec.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,16 +235,18 @@ func (q *builderQuery[T]) executeWindowList(ctx context.Context) (*qbtypes.Resul
|
||||
isAsc := len(q.spec.Order) > 0 &&
|
||||
strings.ToLower(string(q.spec.Order[0].Direction.StringValue())) == "asc"
|
||||
|
||||
fromMS, toMS := q.fromMS, q.toMS
|
||||
|
||||
// Adjust [fromMS,toMS] window if a cursor was supplied
|
||||
if cur := strings.TrimSpace(q.spec.Cursor); cur != "" {
|
||||
if ts, err := decodeCursor(cur); err == nil {
|
||||
if isAsc {
|
||||
if uint64(ts) >= q.fromMS {
|
||||
q.fromMS = uint64(ts + 1)
|
||||
if uint64(ts) >= fromMS {
|
||||
fromMS = uint64(ts + 1)
|
||||
}
|
||||
} else { // DESC
|
||||
if uint64(ts) <= q.toMS {
|
||||
q.toMS = uint64(ts - 1)
|
||||
if uint64(ts) <= toMS {
|
||||
toMS = uint64(ts - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -252,7 +265,16 @@ func (q *builderQuery[T]) executeWindowList(ctx context.Context) (*qbtypes.Resul
|
||||
totalBytes := uint64(0)
|
||||
start := time.Now()
|
||||
|
||||
for _, r := range makeBuckets(q.fromMS, q.toMS) {
|
||||
// Get buckets and reverse them for ascending order
|
||||
buckets := makeBuckets(fromMS, toMS)
|
||||
if isAsc {
|
||||
// Reverse the buckets for ascending order
|
||||
for i, j := 0, len(buckets)-1; i < j; i, j = i+1, j-1 {
|
||||
buckets[i], buckets[j] = buckets[j], buckets[i]
|
||||
}
|
||||
}
|
||||
|
||||
for _, r := range buckets {
|
||||
q.spec.Offset = 0
|
||||
q.spec.Limit = need
|
||||
|
||||
|
||||
131
pkg/querier/builder_query_test.go
Normal file
131
pkg/querier/builder_query_test.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBuilderQueryFingerprint(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query *builderQuery[qbtypes.MetricAggregation]
|
||||
expectInKey []string
|
||||
notExpectInKey []string
|
||||
}{
|
||||
{
|
||||
name: "fingerprint includes shiftby when ShiftBy field is set",
|
||||
query: &builderQuery[qbtypes.MetricAggregation]{
|
||||
kind: qbtypes.RequestTypeTimeSeries,
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
ShiftBy: 3600,
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectInKey: []string{"shiftby=3600"},
|
||||
notExpectInKey: []string{"functions=", "timeshift", "absolute"},
|
||||
},
|
||||
{
|
||||
name: "fingerprint includes shiftby but not other functions",
|
||||
query: &builderQuery[qbtypes.MetricAggregation]{
|
||||
kind: qbtypes.RequestTypeTimeSeries,
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
ShiftBy: 3600,
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: qbtypes.FunctionNameAbsolute,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectInKey: []string{"shiftby=3600"},
|
||||
notExpectInKey: []string{"functions=", "absolute"},
|
||||
},
|
||||
{
|
||||
name: "no shiftby in fingerprint when ShiftBy is zero",
|
||||
query: &builderQuery[qbtypes.MetricAggregation]{
|
||||
kind: qbtypes.RequestTypeTimeSeries,
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
ShiftBy: 0,
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameAbsolute,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectInKey: []string{},
|
||||
notExpectInKey: []string{"shiftby=", "functions=", "absolute"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fingerprint := tt.query.Fingerprint()
|
||||
for _, expected := range tt.expectInKey {
|
||||
assert.True(t, strings.Contains(fingerprint, expected),
|
||||
"Expected fingerprint to contain '%s', got: %s", expected, fingerprint)
|
||||
}
|
||||
for _, notExpected := range tt.notExpectInKey {
|
||||
assert.False(t, strings.Contains(fingerprint, notExpected),
|
||||
"Expected fingerprint NOT to contain '%s', got: %s", notExpected, fingerprint)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeBucketsOrder(t *testing.T) {
|
||||
// Test that makeBuckets returns buckets in reverse chronological order by default
|
||||
// Using milliseconds as input - need > 1 hour range to get multiple buckets
|
||||
now := uint64(1700000000000) // Some timestamp in ms
|
||||
startMS := now
|
||||
endMS := now + uint64(10*60*60*1000) // 10 hours later
|
||||
|
||||
buckets := makeBuckets(startMS, endMS)
|
||||
|
||||
// Should have multiple buckets for a 10 hour range
|
||||
assert.True(t, len(buckets) > 1, "Should have multiple buckets for 10 hour range, got %d", len(buckets))
|
||||
|
||||
// Log buckets for debugging
|
||||
t.Logf("Generated %d buckets:", len(buckets))
|
||||
for i, b := range buckets {
|
||||
durationMs := (b.toNS - b.fromNS) / 1e6
|
||||
t.Logf("Bucket %d: duration=%dms", i, durationMs)
|
||||
}
|
||||
|
||||
// Verify buckets are in reverse chronological order (newest to oldest)
|
||||
for i := 0; i < len(buckets)-1; i++ {
|
||||
assert.True(t, buckets[i].toNS > buckets[i+1].toNS,
|
||||
"Bucket %d end should be after bucket %d end", i, i+1)
|
||||
assert.Equal(t, buckets[i].fromNS, buckets[i+1].toNS,
|
||||
"Bucket %d start should equal bucket %d end (continuous buckets)", i, i+1)
|
||||
}
|
||||
|
||||
// First bucket should end at endNS (converted to nanoseconds)
|
||||
expectedEndNS := querybuilder.ToNanoSecs(endMS)
|
||||
assert.Equal(t, expectedEndNS, buckets[0].toNS)
|
||||
|
||||
// Last bucket should start at startNS (converted to nanoseconds)
|
||||
expectedStartNS := querybuilder.ToNanoSecs(startMS)
|
||||
assert.Equal(t, expectedStartNS, buckets[len(buckets)-1].fromNS)
|
||||
}
|
||||
@@ -176,7 +176,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
lblVals = append(lblVals, *val)
|
||||
lblObjs = append(lblObjs, &qbtypes.Label{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: name},
|
||||
Value: val,
|
||||
Value: *val,
|
||||
})
|
||||
|
||||
default:
|
||||
@@ -227,8 +227,9 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
}
|
||||
}
|
||||
if maxAgg < 0 {
|
||||
//nolint:nilnil
|
||||
return nil, nil // empty result-set
|
||||
return &qbtypes.TimeSeriesData{
|
||||
QueryName: queryName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
buckets := make([]*qbtypes.AggregationBucket, maxAgg+1)
|
||||
@@ -319,8 +320,9 @@ func readAsScalar(rows driver.Rows, queryName string) (*qbtypes.ScalarData, erro
|
||||
}
|
||||
|
||||
return &qbtypes.ScalarData{
|
||||
Columns: cd,
|
||||
Data: data,
|
||||
QueryName: queryName,
|
||||
Columns: cd,
|
||||
Data: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,5 +17,5 @@ type BucketCache interface {
|
||||
// cached portion + list of gaps to fetch
|
||||
GetMissRanges(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step) (cached *qbtypes.Result, missing []*qbtypes.TimeRange)
|
||||
// store fresh buckets for future hits
|
||||
Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, fresh *qbtypes.Result)
|
||||
}
|
||||
Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step, fresh *qbtypes.Result)
|
||||
}
|
||||
|
||||
652
pkg/querier/postprocess.go
Normal file
652
pkg/querier/postprocess.go
Normal file
@@ -0,0 +1,652 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// queryInfo holds common query properties
|
||||
type queryInfo struct {
|
||||
Name string
|
||||
Disabled bool
|
||||
Step qbtypes.Step
|
||||
}
|
||||
|
||||
// getqueryInfo extracts common info from any query type
|
||||
func getqueryInfo(spec any) queryInfo {
|
||||
switch s := spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
return queryInfo{Name: s.Name, Disabled: s.Disabled, Step: s.StepInterval}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
return queryInfo{Name: s.Name, Disabled: s.Disabled, Step: s.StepInterval}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
return queryInfo{Name: s.Name, Disabled: s.Disabled, Step: s.StepInterval}
|
||||
case qbtypes.QueryBuilderFormula:
|
||||
return queryInfo{Name: s.Name, Disabled: false}
|
||||
case qbtypes.PromQuery:
|
||||
return queryInfo{Name: s.Name, Disabled: s.Disabled, Step: s.Step}
|
||||
case qbtypes.ClickHouseQuery:
|
||||
return queryInfo{Name: s.Name, Disabled: s.Disabled}
|
||||
}
|
||||
return queryInfo{}
|
||||
}
|
||||
|
||||
// getQueryName is a convenience function when only name is needed
|
||||
func getQueryName(spec any) string {
|
||||
return getqueryInfo(spec).Name
|
||||
}
|
||||
|
||||
func (q *querier) postProcessResults(ctx context.Context, results map[string]any, req *qbtypes.QueryRangeRequest) (map[string]any, error) {
|
||||
// Convert results to typed format for processing
|
||||
typedResults := make(map[string]*qbtypes.Result)
|
||||
for name, result := range results {
|
||||
typedResults[name] = &qbtypes.Result{
|
||||
Value: result,
|
||||
}
|
||||
}
|
||||
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessBuilderQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessBuilderQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessMetricQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply formula calculations
|
||||
typedResults = q.applyFormulas(ctx, typedResults, req)
|
||||
|
||||
// Filter out disabled queries
|
||||
typedResults = q.filterDisabledQueries(typedResults, req)
|
||||
|
||||
// Apply table formatting for UI if requested
|
||||
if req.FormatOptions != nil && req.FormatOptions.FormatTableResultForUI && req.RequestType == qbtypes.RequestTypeScalar {
|
||||
// Format results as a table - this merges all queries into a single table
|
||||
tableResult := q.formatScalarResultsAsTable(typedResults, req)
|
||||
|
||||
// Return the table under the first query's name so it gets included in results
|
||||
if len(req.CompositeQuery.Queries) > 0 {
|
||||
firstQueryName := getQueryName(req.CompositeQuery.Queries[0].Spec)
|
||||
if firstQueryName != "" && tableResult["table"] != nil {
|
||||
// Return table under first query name
|
||||
return map[string]any{firstQueryName: tableResult["table"]}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return tableResult, nil
|
||||
}
|
||||
|
||||
// Convert back to map[string]any
|
||||
finalResults := make(map[string]any)
|
||||
for name, result := range typedResults {
|
||||
finalResults[name] = result.Value
|
||||
}
|
||||
|
||||
return finalResults, nil
|
||||
}
|
||||
|
||||
// postProcessBuilderQuery applies postprocessing to a single builder query result
|
||||
func postProcessBuilderQuery[T any](
|
||||
q *querier,
|
||||
result *qbtypes.Result,
|
||||
query qbtypes.QueryBuilderQuery[T],
|
||||
_ *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
|
||||
// Apply functions
|
||||
if len(query.Functions) > 0 {
|
||||
result = q.applyFunctions(result, query.Functions)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// postProcessMetricQuery applies postprocessing to a metric query result
|
||||
func postProcessMetricQuery(
|
||||
q *querier,
|
||||
result *qbtypes.Result,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
req *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
|
||||
if query.Limit > 0 {
|
||||
result = q.applySeriesLimit(result, query.Limit, query.Order)
|
||||
}
|
||||
|
||||
if len(query.Functions) > 0 {
|
||||
result = q.applyFunctions(result, query.Functions)
|
||||
}
|
||||
|
||||
// Apply reduce to for scalar request type
|
||||
if req.RequestType == qbtypes.RequestTypeScalar {
|
||||
if len(query.Aggregations) > 0 && query.Aggregations[0].ReduceTo != qbtypes.ReduceToUnknown {
|
||||
result = q.applyMetricReduceTo(result, query.Aggregations[0].ReduceTo)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyMetricReduceTo applies reduce to operation using the metric's ReduceTo field
|
||||
func (q *querier) applyMetricReduceTo(result *qbtypes.Result, reduceOp qbtypes.ReduceTo) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
// Use the FunctionReduceTo helper
|
||||
reducedSeries := qbtypes.FunctionReduceTo(series, reduceOp)
|
||||
agg.Series[i] = reducedSeries
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scalarData := convertTimeSeriesDataToScalar(tsData, tsData.QueryName)
|
||||
result.Value = scalarData
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applySeriesLimit limits the number of series in the result
|
||||
func (q *querier) applySeriesLimit(result *qbtypes.Result, limit int, orderBy []qbtypes.OrderBy) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
// Use the ApplySeriesLimit function from querybuildertypes
|
||||
agg.Series = qbtypes.ApplySeriesLimit(agg.Series, orderBy, limit)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyFunctions applies functions to time series data
|
||||
func (q *querier) applyFunctions(result *qbtypes.Result, functions []qbtypes.Function) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
agg.Series[i] = qbtypes.ApplyFunctions(functions, series)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyFormulas processes formula queries in the composite query
|
||||
func (q *querier) applyFormulas(ctx context.Context, results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]*qbtypes.Result {
|
||||
// Collect formula queries
|
||||
formulaQueries := make(map[string]qbtypes.QueryBuilderFormula)
|
||||
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
if query.Type == qbtypes.QueryTypeFormula {
|
||||
if formula, ok := query.Spec.(qbtypes.QueryBuilderFormula); ok {
|
||||
formulaQueries[formula.Name] = formula
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process each formula
|
||||
for name, formula := range formulaQueries {
|
||||
// Check if we're dealing with time series or scalar data
|
||||
if req.RequestType == qbtypes.RequestTypeTimeSeries {
|
||||
result := q.processTimeSeriesFormula(ctx, results, formula, req)
|
||||
if result != nil {
|
||||
results[name] = result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// processTimeSeriesFormula handles formula evaluation for time series data
|
||||
func (q *querier) processTimeSeriesFormula(
|
||||
ctx context.Context,
|
||||
results map[string]*qbtypes.Result,
|
||||
formula qbtypes.QueryBuilderFormula,
|
||||
_ *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
// Prepare time series data for formula evaluation
|
||||
timeSeriesData := make(map[string]*qbtypes.TimeSeriesData)
|
||||
|
||||
// Extract time series data from results
|
||||
for queryName, result := range results {
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
timeSeriesData[queryName] = tsData
|
||||
}
|
||||
}
|
||||
|
||||
// Create formula evaluator
|
||||
// TODO(srikanthccv): add conditional default zero
|
||||
canDefaultZero := make(map[string]bool)
|
||||
evaluator, err := qbtypes.NewFormulaEvaluator(formula.Expression, canDefaultZero)
|
||||
if err != nil {
|
||||
q.logger.ErrorContext(ctx, "failed to create formula evaluator", "error", err, "formula", formula.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Evaluate the formula
|
||||
formulaSeries, err := evaluator.EvaluateFormula(timeSeriesData)
|
||||
if err != nil {
|
||||
q.logger.ErrorContext(ctx, "failed to evaluate formula", "error", err, "formula", formula.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create result for formula
|
||||
formulaResult := &qbtypes.TimeSeriesData{
|
||||
QueryName: formula.Name,
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: formulaSeries,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Apply functions if any
|
||||
result := &qbtypes.Result{
|
||||
Value: formulaResult,
|
||||
}
|
||||
|
||||
if len(formula.Functions) > 0 {
|
||||
result = q.applyFunctions(result, formula.Functions)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// filterDisabledQueries removes results for disabled queries
|
||||
func (q *querier) filterDisabledQueries(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]*qbtypes.Result {
|
||||
filtered := make(map[string]*qbtypes.Result)
|
||||
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
info := getqueryInfo(query.Spec)
|
||||
if !info.Disabled {
|
||||
if result, ok := results[info.Name]; ok {
|
||||
filtered[info.Name] = result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
// formatScalarResultsAsTable formats scalar results as a unified table for UI display
|
||||
func (q *querier) formatScalarResultsAsTable(results map[string]*qbtypes.Result, _ *qbtypes.QueryRangeRequest) map[string]any {
|
||||
if len(results) == 0 {
|
||||
return map[string]any{"table": &qbtypes.ScalarData{}}
|
||||
}
|
||||
|
||||
// Convert all results to ScalarData first
|
||||
scalarResults := make(map[string]*qbtypes.ScalarData)
|
||||
for name, result := range results {
|
||||
if sd, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
scalarResults[name] = sd
|
||||
} else if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
scalarResults[name] = convertTimeSeriesDataToScalar(tsData, name)
|
||||
}
|
||||
}
|
||||
|
||||
// If single result already has multiple queries, just deduplicate
|
||||
if len(scalarResults) == 1 {
|
||||
for _, sd := range scalarResults {
|
||||
if hasMultipleQueries(sd) {
|
||||
return map[string]any{"table": deduplicateRows(sd)}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise merge all results
|
||||
merged := mergeScalarData(scalarResults)
|
||||
return map[string]any{"table": merged}
|
||||
}
|
||||
|
||||
// convertTimeSeriesDataToScalar converts time series to scalar format
|
||||
func convertTimeSeriesDataToScalar(tsData *qbtypes.TimeSeriesData, queryName string) *qbtypes.ScalarData {
|
||||
if tsData == nil || len(tsData.Aggregations) == 0 {
|
||||
return &qbtypes.ScalarData{QueryName: queryName}
|
||||
}
|
||||
|
||||
columns := []*qbtypes.ColumnDescriptor{}
|
||||
|
||||
// Add group columns from first series
|
||||
if len(tsData.Aggregations[0].Series) > 0 {
|
||||
for _, label := range tsData.Aggregations[0].Series[0].Labels {
|
||||
columns = append(columns, &qbtypes.ColumnDescriptor{
|
||||
TelemetryFieldKey: label.Key,
|
||||
QueryName: queryName,
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Add aggregation columns
|
||||
for _, agg := range tsData.Aggregations {
|
||||
name := agg.Alias
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("__result_%d", agg.Index)
|
||||
}
|
||||
columns = append(columns, &qbtypes.ColumnDescriptor{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: name},
|
||||
QueryName: queryName,
|
||||
AggregationIndex: int64(agg.Index),
|
||||
Meta: agg.Meta,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
})
|
||||
}
|
||||
|
||||
// Build rows
|
||||
data := [][]any{}
|
||||
for seriesIdx, series := range tsData.Aggregations[0].Series {
|
||||
row := make([]any, len(columns))
|
||||
|
||||
// Add group values
|
||||
for i, label := range series.Labels {
|
||||
row[i] = label.Value
|
||||
}
|
||||
|
||||
// Add aggregation values (last value)
|
||||
groupColCount := len(series.Labels)
|
||||
for aggIdx, agg := range tsData.Aggregations {
|
||||
if seriesIdx < len(agg.Series) && len(agg.Series[seriesIdx].Values) > 0 {
|
||||
lastValue := agg.Series[seriesIdx].Values[len(agg.Series[seriesIdx].Values)-1].Value
|
||||
row[groupColCount+aggIdx] = lastValue
|
||||
} else {
|
||||
row[groupColCount+aggIdx] = "n/a"
|
||||
}
|
||||
}
|
||||
|
||||
data = append(data, row)
|
||||
}
|
||||
|
||||
return &qbtypes.ScalarData{
|
||||
QueryName: queryName,
|
||||
Columns: columns,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// hasMultipleQueries checks if ScalarData contains columns from multiple queries
|
||||
func hasMultipleQueries(sd *qbtypes.ScalarData) bool {
|
||||
queries := make(map[string]bool)
|
||||
for _, col := range sd.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName != "" {
|
||||
queries[col.QueryName] = true
|
||||
}
|
||||
}
|
||||
return len(queries) > 1
|
||||
}
|
||||
|
||||
// deduplicateRows removes duplicate rows based on group columns
|
||||
func deduplicateRows(sd *qbtypes.ScalarData) *qbtypes.ScalarData {
|
||||
// Find group column indices
|
||||
groupIndices := []int{}
|
||||
for i, col := range sd.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
groupIndices = append(groupIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Build unique rows map
|
||||
uniqueRows := make(map[string][]any)
|
||||
for _, row := range sd.Data {
|
||||
key := buildRowKey(row, groupIndices)
|
||||
if existing, found := uniqueRows[key]; found {
|
||||
// Merge non-n/a values
|
||||
for i, val := range row {
|
||||
if existing[i] == "n/a" && val != "n/a" {
|
||||
existing[i] = val
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rowCopy := make([]any, len(row))
|
||||
copy(rowCopy, row)
|
||||
uniqueRows[key] = rowCopy
|
||||
}
|
||||
}
|
||||
|
||||
// Convert back to slice
|
||||
data := make([][]any, 0, len(uniqueRows))
|
||||
for _, row := range uniqueRows {
|
||||
data = append(data, row)
|
||||
}
|
||||
|
||||
// Sort by first aggregation column
|
||||
sortByFirstAggregation(data, sd.Columns)
|
||||
|
||||
return &qbtypes.ScalarData{
|
||||
Columns: sd.Columns,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// mergeScalarData merges multiple scalar data results
|
||||
func mergeScalarData(results map[string]*qbtypes.ScalarData) *qbtypes.ScalarData {
|
||||
// Collect unique group columns
|
||||
groupCols := []string{}
|
||||
groupColMap := make(map[string]*qbtypes.ColumnDescriptor)
|
||||
|
||||
for _, sd := range results {
|
||||
for _, col := range sd.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
if _, exists := groupColMap[col.Name]; !exists {
|
||||
groupColMap[col.Name] = col
|
||||
groupCols = append(groupCols, col.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build final columns
|
||||
columns := []*qbtypes.ColumnDescriptor{}
|
||||
|
||||
// Add group columns
|
||||
for _, name := range groupCols {
|
||||
columns = append(columns, groupColMap[name])
|
||||
}
|
||||
|
||||
// Add aggregation columns from each query (sorted by query name)
|
||||
queryNames := make([]string, 0, len(results))
|
||||
for name := range results {
|
||||
queryNames = append(queryNames, name)
|
||||
}
|
||||
sort.Strings(queryNames)
|
||||
|
||||
for _, queryName := range queryNames {
|
||||
sd := results[queryName]
|
||||
for _, col := range sd.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
columns = append(columns, col)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Merge rows
|
||||
rowMap := make(map[string][]any)
|
||||
|
||||
for queryName, sd := range results {
|
||||
// Create index mappings
|
||||
groupMap := make(map[string]int)
|
||||
for i, col := range sd.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
groupMap[col.Name] = i
|
||||
}
|
||||
}
|
||||
|
||||
// Process each row
|
||||
for _, row := range sd.Data {
|
||||
key := buildKeyFromGroupCols(row, groupMap, groupCols)
|
||||
|
||||
if _, exists := rowMap[key]; !exists {
|
||||
// Initialize new row
|
||||
newRow := make([]any, len(columns))
|
||||
// Set group values
|
||||
for i, colName := range groupCols {
|
||||
if idx, ok := groupMap[colName]; ok && idx < len(row) {
|
||||
newRow[i] = row[idx]
|
||||
} else {
|
||||
newRow[i] = "n/a"
|
||||
}
|
||||
}
|
||||
// Initialize all aggregations to n/a
|
||||
for i := len(groupCols); i < len(columns); i++ {
|
||||
newRow[i] = "n/a"
|
||||
}
|
||||
rowMap[key] = newRow
|
||||
}
|
||||
|
||||
// Set aggregation values for this query
|
||||
mergedRow := rowMap[key]
|
||||
colIdx := len(groupCols)
|
||||
for _, col := range columns[len(groupCols):] {
|
||||
if col.QueryName == queryName {
|
||||
// Find the value in the original row
|
||||
for i, origCol := range sd.Columns {
|
||||
if origCol.Type == qbtypes.ColumnTypeAggregation &&
|
||||
origCol.AggregationIndex == col.AggregationIndex {
|
||||
if i < len(row) {
|
||||
mergedRow[colIdx] = row[i]
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
colIdx++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice
|
||||
data := make([][]any, 0, len(rowMap))
|
||||
for _, row := range rowMap {
|
||||
data = append(data, row)
|
||||
}
|
||||
|
||||
// Sort by first aggregation column
|
||||
sortByFirstAggregation(data, columns)
|
||||
|
||||
return &qbtypes.ScalarData{
|
||||
Columns: columns,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// buildRowKey builds a unique key from row values at specified indices
|
||||
func buildRowKey(row []any, indices []int) string {
|
||||
parts := make([]string, len(indices))
|
||||
for i, idx := range indices {
|
||||
if idx < len(row) {
|
||||
parts[i] = fmt.Sprintf("%v", row[idx])
|
||||
} else {
|
||||
parts[i] = "n/a"
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%v", parts)
|
||||
}
|
||||
|
||||
// buildKeyFromGroupCols builds a key from group column values
|
||||
func buildKeyFromGroupCols(row []any, groupMap map[string]int, groupCols []string) string {
|
||||
parts := make([]string, len(groupCols))
|
||||
for i, colName := range groupCols {
|
||||
if idx, ok := groupMap[colName]; ok && idx < len(row) {
|
||||
parts[i] = fmt.Sprintf("%v", row[idx])
|
||||
} else {
|
||||
parts[i] = "n/a"
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%v", parts)
|
||||
}
|
||||
|
||||
// sortByFirstAggregation sorts data by the first aggregation column (descending)
|
||||
func sortByFirstAggregation(data [][]any, columns []*qbtypes.ColumnDescriptor) {
|
||||
// Find first aggregation column
|
||||
aggIdx := -1
|
||||
for i, col := range columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
aggIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if aggIdx < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sort.SliceStable(data, func(i, j int) bool {
|
||||
return compareValues(data[i][aggIdx], data[j][aggIdx]) > 0
|
||||
})
|
||||
}
|
||||
|
||||
// compareValues compares two values for sorting (handles n/a and numeric types)
|
||||
func compareValues(a, b any) int {
|
||||
// Handle n/a values
|
||||
if a == "n/a" && b == "n/a" {
|
||||
return 0
|
||||
}
|
||||
if a == "n/a" {
|
||||
return -1
|
||||
}
|
||||
if b == "n/a" {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Compare numeric values
|
||||
aFloat, aOk := toFloat64(a)
|
||||
bFloat, bOk := toFloat64(b)
|
||||
|
||||
if aOk && bOk {
|
||||
if aFloat > bFloat {
|
||||
return 1
|
||||
} else if aFloat < bFloat {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Fallback to string comparison
|
||||
return 0
|
||||
}
|
||||
|
||||
// toFloat64 attempts to convert a value to float64
|
||||
func toFloat64(v any) (float64, bool) {
|
||||
switch val := v.(type) {
|
||||
case float64:
|
||||
return val, true
|
||||
case int64:
|
||||
return float64(val), true
|
||||
case int:
|
||||
return float64(val), true
|
||||
case int32:
|
||||
return float64(val), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
@@ -54,8 +56,82 @@ func New(
|
||||
}
|
||||
}
|
||||
|
||||
// extractShiftFromBuilderQuery extracts the shift value from timeShift function if present
|
||||
func extractShiftFromBuilderQuery[T any](spec qbtypes.QueryBuilderQuery[T]) int64 {
|
||||
for _, fn := range spec.Functions {
|
||||
if fn.Name == qbtypes.FunctionNameTimeShift && len(fn.Args) > 0 {
|
||||
switch v := fn.Args[0].Value.(type) {
|
||||
case float64:
|
||||
return int64(v)
|
||||
case int64:
|
||||
return v
|
||||
case int:
|
||||
return int64(v)
|
||||
case string:
|
||||
if shiftFloat, err := strconv.ParseFloat(v, 64); err == nil {
|
||||
return int64(shiftFloat)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// adjustTimeRangeForShift adjusts the time range based on the shift value from timeShift function
|
||||
func adjustTimeRangeForShift[T any](spec qbtypes.QueryBuilderQuery[T], tr qbtypes.TimeRange, kind qbtypes.RequestType) qbtypes.TimeRange {
|
||||
// Only apply time shift for time series and scalar queries
|
||||
// Raw/list queries don't support timeshift
|
||||
if kind != qbtypes.RequestTypeTimeSeries && kind != qbtypes.RequestTypeScalar {
|
||||
return tr
|
||||
}
|
||||
|
||||
// Use the ShiftBy field if it's already populated, otherwise extract it
|
||||
shiftBy := spec.ShiftBy
|
||||
if shiftBy == 0 {
|
||||
shiftBy = extractShiftFromBuilderQuery(spec)
|
||||
}
|
||||
|
||||
if shiftBy == 0 {
|
||||
return tr
|
||||
}
|
||||
|
||||
// ShiftBy is in seconds, convert to milliseconds and shift backward in time
|
||||
shiftMS := shiftBy * 1000
|
||||
return qbtypes.TimeRange{
|
||||
From: tr.From - uint64(shiftMS),
|
||||
To: tr.To - uint64(shiftMS),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtypes.QueryRangeRequest) (*qbtypes.QueryRangeResponse, error) {
|
||||
|
||||
// First pass: collect all metric names that need temporality
|
||||
metricNames := make([]string, 0)
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
if query.Type == qbtypes.QueryTypeBuilder {
|
||||
if spec, ok := query.Spec.(qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]); ok {
|
||||
for _, agg := range spec.Aggregations {
|
||||
if agg.MetricName != "" {
|
||||
metricNames = append(metricNames, agg.MetricName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch temporality for all metrics at once
|
||||
var metricTemporality map[string]metrictypes.Temporality
|
||||
if len(metricNames) > 0 {
|
||||
var err error
|
||||
metricTemporality, err = q.metadataStore.FetchTemporalityMulti(ctx, metricNames...)
|
||||
if err != nil {
|
||||
q.logger.WarnContext(ctx, "failed to fetch metric temporality", "error", err, "metrics", metricNames)
|
||||
// Continue without temporality - statement builder will handle unspecified
|
||||
metricTemporality = make(map[string]metrictypes.Temporality)
|
||||
}
|
||||
q.logger.DebugContext(ctx, "fetched metric temporalities", "metric_temporality", metricTemporality)
|
||||
}
|
||||
|
||||
queries := make(map[string]qbtypes.Query)
|
||||
steps := make(map[string]qbtypes.Step)
|
||||
|
||||
@@ -79,15 +155,28 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
case qbtypes.QueryTypeBuilder:
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
bq := newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
spec.ShiftBy = extractShiftFromBuilderQuery(spec)
|
||||
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
bq := newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, spec, timeRange, req.RequestType)
|
||||
queries[spec.Name] = bq
|
||||
steps[spec.Name] = spec.StepInterval
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
bq := newBuilderQuery(q.telemetryStore, q.logStmtBuilder, spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
spec.ShiftBy = extractShiftFromBuilderQuery(spec)
|
||||
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
bq := newBuilderQuery(q.telemetryStore, q.logStmtBuilder, spec, timeRange, req.RequestType)
|
||||
queries[spec.Name] = bq
|
||||
steps[spec.Name] = spec.StepInterval
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
bq := newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
for i := range spec.Aggregations {
|
||||
if spec.Aggregations[i].MetricName != "" && spec.Aggregations[i].Temporality == metrictypes.Unknown {
|
||||
if temp, ok := metricTemporality[spec.Aggregations[i].MetricName]; ok && temp != metrictypes.Unknown {
|
||||
spec.Aggregations[i].Temporality = temp
|
||||
}
|
||||
}
|
||||
}
|
||||
spec.ShiftBy = extractShiftFromBuilderQuery(spec)
|
||||
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
bq := newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, spec, timeRange, req.RequestType)
|
||||
queries[spec.Name] = bq
|
||||
steps[spec.Name] = spec.StepInterval
|
||||
default:
|
||||
@@ -133,13 +222,18 @@ func (q *querier) run(ctx context.Context, orgID valuer.UUID, qs map[string]qbty
|
||||
}
|
||||
}
|
||||
|
||||
processedResults, err := q.postProcessResults(ctx, results, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeResponse{
|
||||
Type: req.RequestType,
|
||||
Data: struct {
|
||||
Results []any `json:"results"`
|
||||
Warnings []string `json:"warnings"`
|
||||
}{
|
||||
Results: maps.Values(results),
|
||||
Results: maps.Values(processedResults),
|
||||
Warnings: warnings,
|
||||
},
|
||||
Meta: struct {
|
||||
@@ -173,7 +267,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
return nil, err
|
||||
}
|
||||
// Store in cache for future use
|
||||
q.bucketCache.Put(ctx, orgID, query, result)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, result)
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
@@ -183,6 +277,10 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
errors := make([]error, len(missingRanges))
|
||||
totalStats := qbtypes.ExecStats{}
|
||||
|
||||
q.logger.DebugContext(ctx, "executing queries for missing ranges",
|
||||
"missing_ranges_count", len(missingRanges),
|
||||
"ranges", missingRanges)
|
||||
|
||||
sem := make(chan struct{}, 4)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
@@ -224,7 +322,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q.bucketCache.Put(ctx, orgID, query, result)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, result)
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
@@ -248,7 +346,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
mergedResult.Stats.DurationMS += totalStats.DurationMS
|
||||
|
||||
// Store merged result in cache
|
||||
q.bucketCache.Put(ctx, orgID, query, mergedResult)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, mergedResult)
|
||||
|
||||
return mergedResult, nil
|
||||
}
|
||||
@@ -261,11 +359,17 @@ func (q *querier) createRangedQuery(originalQuery qbtypes.Query, timeRange qbtyp
|
||||
case *chSQLQuery:
|
||||
return newchSQLQuery(q.telemetryStore, qt.query, qt.args, timeRange, qt.kind)
|
||||
case *builderQuery[qbtypes.TraceAggregation]:
|
||||
return newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, qt.spec, timeRange, qt.kind)
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, qt.spec, adjustedTimeRange, qt.kind)
|
||||
case *builderQuery[qbtypes.LogAggregation]:
|
||||
return newBuilderQuery(q.telemetryStore, q.logStmtBuilder, qt.spec, timeRange, qt.kind)
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.logStmtBuilder, qt.spec, adjustedTimeRange, qt.kind)
|
||||
case *builderQuery[qbtypes.MetricAggregation]:
|
||||
return newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, qt.spec, timeRange, qt.kind)
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, qt.spec, adjustedTimeRange, qt.kind)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -273,8 +377,29 @@ func (q *querier) createRangedQuery(originalQuery qbtypes.Query, timeRange qbtyp
|
||||
|
||||
// mergeResults merges cached result with fresh results
|
||||
func (q *querier) mergeResults(cached *qbtypes.Result, fresh []*qbtypes.Result) *qbtypes.Result {
|
||||
if cached == nil && len(fresh) == 1 {
|
||||
return fresh[0]
|
||||
if cached == nil {
|
||||
if len(fresh) == 1 {
|
||||
return fresh[0]
|
||||
}
|
||||
if len(fresh) == 0 {
|
||||
return nil
|
||||
}
|
||||
// If cached is nil but we have multiple fresh results, we need to merge them
|
||||
// We need to merge all fresh results properly to avoid duplicates
|
||||
merged := &qbtypes.Result{
|
||||
Type: fresh[0].Type,
|
||||
Stats: fresh[0].Stats,
|
||||
Warnings: fresh[0].Warnings,
|
||||
}
|
||||
|
||||
// Merge all fresh results including the first one
|
||||
switch merged.Type {
|
||||
case qbtypes.RequestTypeTimeSeries:
|
||||
// Pass nil as cached value to ensure proper merging of all fresh results
|
||||
merged.Value = q.mergeTimeSeriesResults(nil, fresh)
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// Start with cached result
|
||||
@@ -315,23 +440,52 @@ func (q *querier) mergeResults(cached *qbtypes.Result, fresh []*qbtypes.Result)
|
||||
// mergeTimeSeriesResults merges time series data
|
||||
func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, freshResults []*qbtypes.Result) *qbtypes.TimeSeriesData {
|
||||
|
||||
// Map to store merged series by query name and series key
|
||||
// Map to store merged series by aggregation index and series key
|
||||
seriesMap := make(map[int]map[string]*qbtypes.TimeSeries)
|
||||
// Map to store aggregation bucket metadata
|
||||
bucketMetadata := make(map[int]*qbtypes.AggregationBucket)
|
||||
|
||||
for _, aggBucket := range cachedValue.Aggregations {
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
}
|
||||
for _, series := range aggBucket.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
seriesMap[aggBucket.Index][key] = series
|
||||
// Process cached data if available
|
||||
if cachedValue != nil && cachedValue.Aggregations != nil {
|
||||
for _, aggBucket := range cachedValue.Aggregations {
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
}
|
||||
if bucketMetadata[aggBucket.Index] == nil {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
}
|
||||
for _, series := range aggBucket.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
if existingSeries, ok := seriesMap[aggBucket.Index][key]; ok {
|
||||
// Merge values from duplicate series in cached data, avoiding duplicate timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create a copy to avoid modifying the cached data
|
||||
seriesCopy := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, len(series.Values)),
|
||||
}
|
||||
copy(seriesCopy.Values, series.Values)
|
||||
seriesMap[aggBucket.Index][key] = seriesCopy
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add fresh series
|
||||
for _, result := range freshResults {
|
||||
freshTS, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
if !ok || freshTS == nil || freshTS.Aggregations == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -339,6 +493,12 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
}
|
||||
// Prefer fresh metadata over cached metadata
|
||||
if aggBucket.Alias != "" || aggBucket.Meta.Unit != "" {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
} else if bucketMetadata[aggBucket.Index] == nil {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
}
|
||||
}
|
||||
|
||||
for _, aggBucket := range freshTS.Aggregations {
|
||||
@@ -346,8 +506,19 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
|
||||
if existingSeries, ok := seriesMap[aggBucket.Index][key]; ok {
|
||||
// Merge values
|
||||
existingSeries.Values = append(existingSeries.Values, series.Values...)
|
||||
// Merge values, avoiding duplicate timestamps
|
||||
// Create a map to track existing timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// New series
|
||||
seriesMap[aggBucket.Index][key] = series
|
||||
@@ -357,10 +528,18 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
}
|
||||
|
||||
result := &qbtypes.TimeSeriesData{
|
||||
QueryName: cachedValue.QueryName,
|
||||
Aggregations: []*qbtypes.AggregationBucket{},
|
||||
}
|
||||
|
||||
// Set QueryName from cached or first fresh result
|
||||
if cachedValue != nil {
|
||||
result.QueryName = cachedValue.QueryName
|
||||
} else if len(freshResults) > 0 {
|
||||
if freshTS, ok := freshResults[0].Value.(*qbtypes.TimeSeriesData); ok && freshTS != nil {
|
||||
result.QueryName = freshTS.QueryName
|
||||
}
|
||||
}
|
||||
|
||||
for index, series := range seriesMap {
|
||||
var aggSeries []*qbtypes.TimeSeries
|
||||
for _, s := range series {
|
||||
@@ -377,10 +556,17 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
aggSeries = append(aggSeries, s)
|
||||
}
|
||||
|
||||
result.Aggregations = append(result.Aggregations, &qbtypes.AggregationBucket{
|
||||
// Preserve bucket metadata from either cached or fresh results
|
||||
bucket := &qbtypes.AggregationBucket{
|
||||
Index: index,
|
||||
Series: aggSeries,
|
||||
})
|
||||
}
|
||||
if metadata, ok := bucketMetadata[index]; ok {
|
||||
bucket.Alias = metadata.Alias
|
||||
bucket.Meta = metadata.Meta
|
||||
}
|
||||
|
||||
result.Aggregations = append(result.Aggregations, bucket)
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
229
pkg/querier/shift_test.go
Normal file
229
pkg/querier/shift_test.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestAdjustTimeRangeForShift tests the time range adjustment logic
|
||||
func TestAdjustTimeRangeForShift(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
spec qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
|
||||
timeRange qbtypes.TimeRange
|
||||
requestType qbtypes.RequestType
|
||||
expectedFromMS uint64
|
||||
expectedToMS uint64
|
||||
}{
|
||||
{
|
||||
name: "no shift",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 1000000,
|
||||
To: 2000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
expectedFromMS: 1000000,
|
||||
expectedToMS: 2000000,
|
||||
},
|
||||
{
|
||||
name: "shift by 60 seconds using timeShift function",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "60"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 1000000,
|
||||
To: 2000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
expectedFromMS: 940000, // 1000000 - 60000
|
||||
expectedToMS: 1940000, // 2000000 - 60000
|
||||
},
|
||||
{
|
||||
name: "shift by negative 30 seconds (future shift)",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "-30"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 1000000,
|
||||
To: 2000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
expectedFromMS: 1030000, // 1000000 - (-30000)
|
||||
expectedToMS: 2030000, // 2000000 - (-30000)
|
||||
},
|
||||
{
|
||||
name: "no shift for raw request type even with timeShift function",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 1000000,
|
||||
To: 2000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeRaw,
|
||||
expectedFromMS: 1000000, // No shift for raw queries
|
||||
expectedToMS: 2000000,
|
||||
},
|
||||
{
|
||||
name: "shift applied for scalar request type with timeShift function",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 10000000,
|
||||
To: 20000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeScalar,
|
||||
expectedFromMS: 6400000, // 10000000 - 3600000
|
||||
expectedToMS: 16400000, // 20000000 - 3600000
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := adjustTimeRangeForShift(tt.spec, tt.timeRange, tt.requestType)
|
||||
assert.Equal(t, tt.expectedFromMS, result.From, "fromMS mismatch")
|
||||
assert.Equal(t, tt.expectedToMS, result.To, "toMS mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractShiftFromBuilderQuery tests the shift extraction logic
|
||||
func TestExtractShiftFromBuilderQuery(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
spec qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
|
||||
expectedShiftBy int64
|
||||
}{
|
||||
{
|
||||
name: "extract from timeShift function with float64",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: float64(3600)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 3600,
|
||||
},
|
||||
{
|
||||
name: "extract from timeShift function with int64",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: int64(3600)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 3600,
|
||||
},
|
||||
{
|
||||
name: "extract from timeShift function with string",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 3600,
|
||||
},
|
||||
{
|
||||
name: "no timeShift function",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameAbsolute,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 0,
|
||||
},
|
||||
{
|
||||
name: "invalid timeShift value",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "invalid"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 0,
|
||||
},
|
||||
{
|
||||
name: "multiple functions with timeShift",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameAbsolute,
|
||||
},
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "1800"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: qbtypes.FunctionNameClampMax,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "100"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 1800,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
shiftBy := extractShiftFromBuilderQuery(tt.spec)
|
||||
assert.Equal(t, tt.expectedShiftBy, shiftBy)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,10 @@
|
||||
package agentConf
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// Interface for features implemented via agent config.
|
||||
// Eg: ingestion side signal pre-processing features like log processing pipelines etc
|
||||
@@ -11,12 +15,13 @@ type AgentFeature interface {
|
||||
// Recommend config for an agent based on its `currentConfYaml` and
|
||||
// `configVersion` for the feature's settings
|
||||
RecommendAgentConfig(
|
||||
orgId valuer.UUID,
|
||||
currentConfYaml []byte,
|
||||
configVersion *ConfigVersion,
|
||||
configVersion *opamptypes.AgentConfigVersion,
|
||||
) (
|
||||
recommendedConfYaml []byte,
|
||||
|
||||
// stored as agent_config_versions.last_config in current agentConf model
|
||||
// stored as agent_config_version.config in current agentConf model
|
||||
// TODO(Raj): maybe refactor agentConf further and clean this up
|
||||
serializedSettingsUsed string,
|
||||
|
||||
|
||||
@@ -4,10 +4,14 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
@@ -15,42 +19,33 @@ import (
|
||||
|
||||
// Repo handles DDL and DML ops on ingestion rules
|
||||
type Repo struct {
|
||||
db *sqlx.DB
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func (r *Repo) GetConfigHistory(
|
||||
ctx context.Context, typ ElementTypeDef, limit int,
|
||||
) ([]ConfigVersion, *model.ApiError) {
|
||||
var c []ConfigVersion
|
||||
err := r.db.SelectContext(ctx, &c, fmt.Sprintf(`SELECT
|
||||
version,
|
||||
id,
|
||||
element_type,
|
||||
COALESCE(created_by, -1) as created_by,
|
||||
created_at,
|
||||
COALESCE((SELECT display_name FROM users
|
||||
WHERE id = v.created_by), "unknown") created_by_name,
|
||||
active,
|
||||
is_valid,
|
||||
disabled,
|
||||
deploy_status,
|
||||
deploy_result,
|
||||
coalesce(last_hash, '') as last_hash,
|
||||
coalesce(last_config, '{}') as last_config
|
||||
FROM agent_config_versions AS v
|
||||
WHERE element_type = $1
|
||||
ORDER BY created_at desc, version desc
|
||||
limit %v`, limit),
|
||||
typ)
|
||||
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, limit int,
|
||||
) ([]opamptypes.AgentConfigVersion, *model.ApiError) {
|
||||
var c []opamptypes.AgentConfigVersion
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&c).
|
||||
ColumnExpr("id, version, element_type, deploy_status, deploy_result, created_at").
|
||||
ColumnExpr("COALESCE(created_by, '') as created_by").
|
||||
ColumnExpr(`COALESCE((SELECT display_name FROM users WHERE users.id = acv.created_by), 'unknown') as created_by_name`).
|
||||
ColumnExpr("COALESCE(hash, '') as hash, COALESCE(config, '{}') as config").
|
||||
Where("acv.element_type = ?", typ).
|
||||
Where("acv.org_id = ?", orgId).
|
||||
OrderExpr("acv.created_at DESC, acv.version DESC").
|
||||
Limit(limit).
|
||||
Scan(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
incompleteStatuses := []DeployStatus{DeployInitiated, Deploying}
|
||||
incompleteStatuses := []opamptypes.DeployStatus{opamptypes.DeployInitiated, opamptypes.Deploying}
|
||||
for idx := 1; idx < len(c); idx++ {
|
||||
if slices.Contains(incompleteStatuses, c[idx].DeployStatus) {
|
||||
c[idx].DeployStatus = DeployStatusUnknown
|
||||
c[idx].DeployStatus = opamptypes.DeployStatusUnknown
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,32 +53,24 @@ func (r *Repo) GetConfigHistory(
|
||||
}
|
||||
|
||||
func (r *Repo) GetConfigVersion(
|
||||
ctx context.Context, typ ElementTypeDef, v int,
|
||||
) (*ConfigVersion, *model.ApiError) {
|
||||
var c ConfigVersion
|
||||
err := r.db.GetContext(ctx, &c, `SELECT
|
||||
id,
|
||||
version,
|
||||
element_type,
|
||||
COALESCE(created_by, -1) as created_by,
|
||||
created_at,
|
||||
COALESCE((SELECT display_name FROM users
|
||||
WHERE id = v.created_by), "unknown") created_by_name,
|
||||
active,
|
||||
is_valid,
|
||||
disabled,
|
||||
deploy_status,
|
||||
deploy_result,
|
||||
coalesce(last_hash, '') as last_hash,
|
||||
coalesce(last_config, '{}') as last_config
|
||||
FROM agent_config_versions v
|
||||
WHERE element_type = $1
|
||||
AND version = $2`, typ, v)
|
||||
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, v int,
|
||||
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
|
||||
var c opamptypes.AgentConfigVersion
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&c).
|
||||
ColumnExpr("id, version, element_type, deploy_status, deploy_result, created_at").
|
||||
ColumnExpr("COALESCE(created_by, '') as created_by").
|
||||
ColumnExpr(`COALESCE((SELECT display_name FROM users WHERE users.id = acv.created_by), 'unknown') as created_by_name`).
|
||||
ColumnExpr("COALESCE(hash, '') as hash, COALESCE(config, '{}') as config").
|
||||
Where("acv.element_type = ?", typ).
|
||||
Where("acv.version = ?", v).
|
||||
Where("acv.org_id = ?", orgId).
|
||||
Scan(ctx)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(err)
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, model.NotFoundError(err)
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
@@ -91,33 +78,23 @@ func (r *Repo) GetConfigVersion(
|
||||
}
|
||||
|
||||
func (r *Repo) GetLatestVersion(
|
||||
ctx context.Context, typ ElementTypeDef,
|
||||
) (*ConfigVersion, *model.ApiError) {
|
||||
var c ConfigVersion
|
||||
err := r.db.GetContext(ctx, &c, `SELECT
|
||||
id,
|
||||
version,
|
||||
element_type,
|
||||
COALESCE(created_by, -1) as created_by,
|
||||
created_at,
|
||||
COALESCE((SELECT display_name FROM users
|
||||
WHERE id = v.created_by), "unknown") created_by_name,
|
||||
active,
|
||||
is_valid,
|
||||
disabled,
|
||||
deploy_status,
|
||||
deploy_result
|
||||
FROM agent_config_versions AS v
|
||||
WHERE element_type = $1
|
||||
AND version = (
|
||||
SELECT MAX(version)
|
||||
FROM agent_config_versions
|
||||
WHERE element_type=$2)`, typ, typ)
|
||||
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType,
|
||||
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
|
||||
var c opamptypes.AgentConfigVersion
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&c).
|
||||
ColumnExpr("id, version, element_type, deploy_status, deploy_result, created_at").
|
||||
ColumnExpr("COALESCE(created_by, '') as created_by").
|
||||
ColumnExpr(`COALESCE((SELECT display_name FROM users WHERE users.id = acv.created_by), 'unknown') as created_by_name`).
|
||||
Where("acv.element_type = ?", typ).
|
||||
Where("acv.org_id = ?", orgId).
|
||||
Where("version = (SELECT MAX(version) FROM agent_config_version WHERE acv.element_type = ?)", typ).
|
||||
Scan(ctx)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(err)
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, model.NotFoundError(err)
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
@@ -125,18 +102,18 @@ func (r *Repo) GetLatestVersion(
|
||||
}
|
||||
|
||||
func (r *Repo) insertConfig(
|
||||
ctx context.Context, userId string, c *ConfigVersion, elements []string,
|
||||
ctx context.Context, orgId valuer.UUID, userId valuer.UUID, c *opamptypes.AgentConfigVersion, elements []string,
|
||||
) (fnerr *model.ApiError) {
|
||||
|
||||
if string(c.ElementType) == "" {
|
||||
if c.ElementType.StringValue() == "" {
|
||||
return model.BadRequest(fmt.Errorf(
|
||||
"element type is required for creating agent config version",
|
||||
))
|
||||
}
|
||||
|
||||
// allowing empty elements for logs - use case is deleting all pipelines
|
||||
if len(elements) == 0 && c.ElementType != ElementTypeLogPipelines {
|
||||
zap.L().Error("insert config called with no elements ", zap.String("ElementType", string(c.ElementType)))
|
||||
if len(elements) == 0 && c.ElementType != opamptypes.ElementTypeLogPipelines {
|
||||
zap.L().Error("insert config called with no elements ", zap.String("ElementType", c.ElementType.StringValue()))
|
||||
return model.BadRequest(fmt.Errorf("config must have atleast one element"))
|
||||
}
|
||||
|
||||
@@ -144,20 +121,20 @@ func (r *Repo) insertConfig(
|
||||
// the version can not be set by the user, we want to auto-assign the versions
|
||||
// in a monotonically increasing order starting with 1. hence, we reject insert
|
||||
// requests with version anything other than 0. here, 0 indicates un-assigned
|
||||
zap.L().Error("invalid version assignment while inserting agent config", zap.Int("version", c.Version), zap.String("ElementType", string(c.ElementType)))
|
||||
zap.L().Error("invalid version assignment while inserting agent config", zap.Int("version", c.Version), zap.String("ElementType", c.ElementType.StringValue()))
|
||||
return model.BadRequest(fmt.Errorf(
|
||||
"user defined versions are not supported in the agent config",
|
||||
))
|
||||
}
|
||||
|
||||
configVersion, err := r.GetLatestVersion(ctx, c.ElementType)
|
||||
configVersion, err := r.GetLatestVersion(ctx, orgId, c.ElementType)
|
||||
if err != nil && err.Type() != model.ErrorNotFound {
|
||||
zap.L().Error("failed to fetch latest config version", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("failed to fetch latest config version"))
|
||||
}
|
||||
|
||||
if configVersion != nil {
|
||||
c.Version = updateVersion(configVersion.Version)
|
||||
c.IncrementVersion(configVersion.Version)
|
||||
} else {
|
||||
// first version
|
||||
c.Version = 1
|
||||
@@ -166,57 +143,34 @@ func (r *Repo) insertConfig(
|
||||
defer func() {
|
||||
if fnerr != nil {
|
||||
// remove all the damage (invalid rows from db)
|
||||
_, _ = r.db.Exec("DELETE FROM agent_config_versions WHERE id = $1", c.ID)
|
||||
_, _ = r.db.Exec("DELETE FROM agent_config_elements WHERE version_id=$1", c.ID)
|
||||
r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigVersion)).Where("id = ?", c.ID).Where("org_id = ?", orgId).Exec(ctx)
|
||||
r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigElement)).Where("version_id = ?", c.ID).Exec(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
// insert config
|
||||
configQuery := `INSERT INTO agent_config_versions(
|
||||
id,
|
||||
version,
|
||||
created_by,
|
||||
element_type,
|
||||
active,
|
||||
is_valid,
|
||||
disabled,
|
||||
deploy_status,
|
||||
deploy_result)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)`
|
||||
|
||||
_, dbErr := r.db.ExecContext(ctx,
|
||||
configQuery,
|
||||
c.ID,
|
||||
c.Version,
|
||||
userId,
|
||||
c.ElementType,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
c.DeployStatus,
|
||||
c.DeployResult)
|
||||
_, dbErr := r.store.
|
||||
BunDB().
|
||||
NewInsert().
|
||||
Model(c).
|
||||
Exec(ctx)
|
||||
|
||||
if dbErr != nil {
|
||||
zap.L().Error("error in inserting config version: ", zap.Error(dbErr))
|
||||
return model.InternalError(errors.Wrap(dbErr, "failed to insert ingestion rule"))
|
||||
}
|
||||
|
||||
elementsQuery := `INSERT INTO agent_config_elements(
|
||||
id,
|
||||
version_id,
|
||||
element_type,
|
||||
element_id)
|
||||
VALUES ($1, $2, $3, $4)`
|
||||
|
||||
for _, e := range elements {
|
||||
_, dbErr = r.db.ExecContext(
|
||||
ctx,
|
||||
elementsQuery,
|
||||
uuid.NewString(),
|
||||
c.ID,
|
||||
c.ElementType,
|
||||
e,
|
||||
)
|
||||
agentConfigElement := &opamptypes.AgentConfigElement{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
VersionID: c.ID,
|
||||
ElementType: c.ElementType.StringValue(),
|
||||
ElementID: e,
|
||||
}
|
||||
_, dbErr = r.store.BunDB().NewInsert().Model(agentConfigElement).Exec(ctx)
|
||||
if dbErr != nil {
|
||||
return model.InternalError(dbErr)
|
||||
}
|
||||
@@ -226,40 +180,49 @@ func (r *Repo) insertConfig(
|
||||
}
|
||||
|
||||
func (r *Repo) updateDeployStatus(ctx context.Context,
|
||||
elementType ElementTypeDef,
|
||||
orgId valuer.UUID,
|
||||
elementType opamptypes.ElementType,
|
||||
version int,
|
||||
status string,
|
||||
result string,
|
||||
lastHash string,
|
||||
lastconf string) *model.ApiError {
|
||||
|
||||
updateQuery := `UPDATE agent_config_versions
|
||||
set deploy_status = $1,
|
||||
deploy_result = $2,
|
||||
last_hash = COALESCE($3, last_hash),
|
||||
last_config = $4
|
||||
WHERE version=$5
|
||||
AND element_type = $6`
|
||||
// check if it has org orgID prefix
|
||||
// ensuring it here and also ensuring in coordinator.go
|
||||
if !strings.HasPrefix(lastHash, orgId.String()) {
|
||||
lastHash = orgId.String() + lastHash
|
||||
}
|
||||
|
||||
_, err := r.db.ExecContext(ctx, updateQuery, status, result, lastHash, lastconf, version, string(elementType))
|
||||
_, err := r.store.BunDB().NewUpdate().
|
||||
Model(new(opamptypes.AgentConfigVersion)).
|
||||
Set("deploy_status = ?", status).
|
||||
Set("deploy_result = ?", result).
|
||||
Set("hash = COALESCE(?, hash)", lastHash).
|
||||
Set("config = ?", lastconf).
|
||||
Where("version = ?", version).
|
||||
Where("element_type = ?", elementType).
|
||||
Where("org_id = ?", orgId).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to update deploy status", zap.Error(err))
|
||||
return model.BadRequest(fmt.Errorf("failed to update deploy status"))
|
||||
return model.BadRequest(fmt.Errorf("failed to update deploy status"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) updateDeployStatusByHash(
|
||||
ctx context.Context, confighash string, status string, result string,
|
||||
ctx context.Context, orgId valuer.UUID, confighash string, status string, result string,
|
||||
) *model.ApiError {
|
||||
|
||||
updateQuery := `UPDATE agent_config_versions
|
||||
set deploy_status = $1,
|
||||
deploy_result = $2
|
||||
WHERE last_hash=$4`
|
||||
|
||||
_, err := r.db.ExecContext(ctx, updateQuery, status, result, confighash)
|
||||
_, err := r.store.BunDB().NewUpdate().
|
||||
Model(new(opamptypes.AgentConfigVersion)).
|
||||
Set("deploy_status = ?", status).
|
||||
Set("deploy_result = ?", result).
|
||||
Where("hash = ?", confighash).
|
||||
Where("org_id = ?", orgId).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to update deploy status", zap.Error(err))
|
||||
return model.InternalError(errors.Wrap(err, "failed to update deploy status"))
|
||||
|
||||
@@ -12,8 +12,10 @@ import (
|
||||
filterprocessor "github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig/filterprocessor"
|
||||
tsp "github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig/tailsampler"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
@@ -39,7 +41,7 @@ type Manager struct {
|
||||
}
|
||||
|
||||
type ManagerOptions struct {
|
||||
DB *sqlx.DB
|
||||
Store sqlstore.SQLStore
|
||||
|
||||
// When acting as opamp.AgentConfigProvider, agent conf recommendations are
|
||||
// applied to the base conf in the order the features have been specified here.
|
||||
@@ -60,7 +62,7 @@ func Initiate(options *ManagerOptions) (*Manager, error) {
|
||||
}
|
||||
|
||||
m = &Manager{
|
||||
Repo: Repo{options.DB},
|
||||
Repo: Repo{options.Store},
|
||||
agentFeatures: options.AgentFeatures,
|
||||
configSubscribers: map[string]func(){},
|
||||
}
|
||||
@@ -90,7 +92,7 @@ func (m *Manager) notifyConfigUpdateSubscribers() {
|
||||
}
|
||||
|
||||
// Implements opamp.AgentConfigProvider
|
||||
func (m *Manager) RecommendAgentConfig(currentConfYaml []byte) (
|
||||
func (m *Manager) RecommendAgentConfig(orgId valuer.UUID, currentConfYaml []byte) (
|
||||
recommendedConfYaml []byte,
|
||||
// Opaque id of the recommended config, used for reporting deployment status updates
|
||||
configId string,
|
||||
@@ -100,13 +102,13 @@ func (m *Manager) RecommendAgentConfig(currentConfYaml []byte) (
|
||||
settingVersionsUsed := []string{}
|
||||
|
||||
for _, feature := range m.agentFeatures {
|
||||
featureType := ElementTypeDef(feature.AgentFeatureType())
|
||||
latestConfig, apiErr := GetLatestVersion(context.Background(), featureType)
|
||||
featureType := opamptypes.NewElementType(string(feature.AgentFeatureType()))
|
||||
latestConfig, apiErr := GetLatestVersion(context.Background(), orgId, featureType)
|
||||
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
|
||||
return nil, "", errors.Wrap(apiErr.ToError(), "failed to get latest agent config version")
|
||||
}
|
||||
|
||||
updatedConf, serializedSettingsUsed, apiErr := feature.RecommendAgentConfig(recommendation, latestConfig)
|
||||
updatedConf, serializedSettingsUsed, apiErr := feature.RecommendAgentConfig(orgId, recommendation, latestConfig)
|
||||
if apiErr != nil {
|
||||
return nil, "", errors.Wrap(apiErr.ToError(), fmt.Sprintf(
|
||||
"failed to generate agent config recommendation for %s", featureType,
|
||||
@@ -129,9 +131,10 @@ func (m *Manager) RecommendAgentConfig(currentConfYaml []byte) (
|
||||
|
||||
_ = m.updateDeployStatus(
|
||||
context.Background(),
|
||||
orgId,
|
||||
featureType,
|
||||
configVersion,
|
||||
string(DeployInitiated),
|
||||
opamptypes.DeployInitiated.StringValue(),
|
||||
"Deployment has started",
|
||||
configId,
|
||||
serializedSettingsUsed,
|
||||
@@ -154,52 +157,53 @@ func (m *Manager) RecommendAgentConfig(currentConfYaml []byte) (
|
||||
|
||||
// Implements opamp.AgentConfigProvider
|
||||
func (m *Manager) ReportConfigDeploymentStatus(
|
||||
orgId valuer.UUID,
|
||||
agentId string,
|
||||
configId string,
|
||||
err error,
|
||||
) {
|
||||
featureConfigIds := strings.Split(configId, ",")
|
||||
for _, featureConfId := range featureConfigIds {
|
||||
newStatus := string(Deployed)
|
||||
newStatus := opamptypes.Deployed.StringValue()
|
||||
message := "Deployment was successful"
|
||||
if err != nil {
|
||||
newStatus = string(DeployFailed)
|
||||
newStatus = opamptypes.DeployFailed.StringValue()
|
||||
message = fmt.Sprintf("%s: %s", agentId, err.Error())
|
||||
}
|
||||
_ = m.updateDeployStatusByHash(
|
||||
context.Background(), featureConfId, newStatus, message,
|
||||
context.Background(), orgId, featureConfId, newStatus, message,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func GetLatestVersion(
|
||||
ctx context.Context, elementType ElementTypeDef,
|
||||
) (*ConfigVersion, *model.ApiError) {
|
||||
return m.GetLatestVersion(ctx, elementType)
|
||||
ctx context.Context, orgId valuer.UUID, elementType opamptypes.ElementType,
|
||||
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
|
||||
return m.GetLatestVersion(ctx, orgId, elementType)
|
||||
}
|
||||
|
||||
func GetConfigVersion(
|
||||
ctx context.Context, elementType ElementTypeDef, version int,
|
||||
) (*ConfigVersion, *model.ApiError) {
|
||||
return m.GetConfigVersion(ctx, elementType, version)
|
||||
ctx context.Context, orgId valuer.UUID, elementType opamptypes.ElementType, version int,
|
||||
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
|
||||
return m.GetConfigVersion(ctx, orgId, elementType, version)
|
||||
}
|
||||
|
||||
func GetConfigHistory(
|
||||
ctx context.Context, typ ElementTypeDef, limit int,
|
||||
) ([]ConfigVersion, *model.ApiError) {
|
||||
return m.GetConfigHistory(ctx, typ, limit)
|
||||
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, limit int,
|
||||
) ([]opamptypes.AgentConfigVersion, *model.ApiError) {
|
||||
return m.GetConfigHistory(ctx, orgId, typ, limit)
|
||||
}
|
||||
|
||||
// StartNewVersion launches a new config version for given set of elements
|
||||
func StartNewVersion(
|
||||
ctx context.Context, userId string, eleType ElementTypeDef, elementIds []string,
|
||||
) (*ConfigVersion, *model.ApiError) {
|
||||
ctx context.Context, orgId valuer.UUID, userId valuer.UUID, eleType opamptypes.ElementType, elementIds []string,
|
||||
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
|
||||
|
||||
// create a new version
|
||||
cfg := NewConfigVersion(eleType)
|
||||
cfg := opamptypes.NewAgentConfigVersion(orgId, userId, eleType)
|
||||
|
||||
// insert new config and elements into database
|
||||
err := m.insertConfig(ctx, userId, cfg, elementIds)
|
||||
err := m.insertConfig(ctx, orgId, userId, cfg, elementIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -213,22 +217,22 @@ func NotifyConfigUpdate(ctx context.Context) {
|
||||
m.notifyConfigUpdateSubscribers()
|
||||
}
|
||||
|
||||
func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiError {
|
||||
func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, version int) *model.ApiError {
|
||||
|
||||
configVersion, err := GetConfigVersion(ctx, typ, version)
|
||||
configVersion, err := GetConfigVersion(ctx, orgId, typ, version)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to fetch config version during redeploy", zap.Error(err))
|
||||
return model.WrapApiError(err, "failed to fetch details of the config version")
|
||||
}
|
||||
|
||||
if configVersion == nil || (configVersion != nil && configVersion.LastConf == "") {
|
||||
if configVersion == nil || (configVersion != nil && configVersion.Config == "") {
|
||||
zap.L().Debug("config version has no conf yaml", zap.Any("configVersion", configVersion))
|
||||
return model.BadRequest(fmt.Errorf("the config version can not be redeployed"))
|
||||
}
|
||||
switch typ {
|
||||
case ElementTypeSamplingRules:
|
||||
case opamptypes.ElementTypeSamplingRules:
|
||||
var config *tsp.Config
|
||||
if err := yaml.Unmarshal([]byte(configVersion.LastConf), &config); err != nil {
|
||||
if err := yaml.Unmarshal([]byte(configVersion.Config), &config); err != nil {
|
||||
zap.L().Debug("failed to read last conf correctly", zap.Error(err))
|
||||
return model.BadRequest(fmt.Errorf("failed to read the stored config correctly"))
|
||||
}
|
||||
@@ -245,10 +249,10 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
|
||||
return model.InternalError(fmt.Errorf("failed to deploy the config"))
|
||||
}
|
||||
|
||||
_ = m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, configVersion.LastConf)
|
||||
case ElementTypeDropRules:
|
||||
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeSamplingRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, configVersion.Config)
|
||||
case opamptypes.ElementTypeDropRules:
|
||||
var filterConfig *filterprocessor.Config
|
||||
if err := yaml.Unmarshal([]byte(configVersion.LastConf), &filterConfig); err != nil {
|
||||
if err := yaml.Unmarshal([]byte(configVersion.Config), &filterConfig); err != nil {
|
||||
zap.L().Error("failed to read last conf correctly", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("failed to read the stored config correctly"))
|
||||
}
|
||||
@@ -263,14 +267,14 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
|
||||
return err
|
||||
}
|
||||
|
||||
_ = m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, configVersion.LastConf)
|
||||
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeSamplingRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, configVersion.Config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpsertFilterProcessor updates the agent config with new filter processor params
|
||||
func UpsertFilterProcessor(ctx context.Context, version int, config *filterprocessor.Config) error {
|
||||
func UpsertFilterProcessor(ctx context.Context, orgId valuer.UUID, version int, config *filterprocessor.Config) error {
|
||||
if !atomic.CompareAndSwapUint32(&m.lock, 0, 1) {
|
||||
return fmt.Errorf("agent updater is busy")
|
||||
}
|
||||
@@ -294,7 +298,7 @@ func UpsertFilterProcessor(ctx context.Context, version int, config *filterproce
|
||||
zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr))
|
||||
}
|
||||
|
||||
_ = m.updateDeployStatus(ctx, ElementTypeDropRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml))
|
||||
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeDropRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, string(processorConfYaml))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -303,9 +307,9 @@ func UpsertFilterProcessor(ctx context.Context, version int, config *filterproce
|
||||
// successful deployment if no error is received.
|
||||
// this method is currently expected to be called only once in the lifecycle
|
||||
// but can be improved in future to accept continuous request status updates from opamp
|
||||
func (m *Manager) OnConfigUpdate(agentId string, hash string, err error) {
|
||||
func (m *Manager) OnConfigUpdate(orgId valuer.UUID, agentId string, hash string, err error) {
|
||||
|
||||
status := string(Deployed)
|
||||
status := opamptypes.Deployed.StringValue()
|
||||
|
||||
message := "Deployment was successful"
|
||||
|
||||
@@ -314,15 +318,15 @@ func (m *Manager) OnConfigUpdate(agentId string, hash string, err error) {
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
status = string(DeployFailed)
|
||||
status = opamptypes.DeployFailed.StringValue()
|
||||
message = fmt.Sprintf("%s: %s", agentId, err.Error())
|
||||
}
|
||||
|
||||
_ = m.updateDeployStatusByHash(context.Background(), hash, status, message)
|
||||
_ = m.updateDeployStatusByHash(context.Background(), orgId, hash, status, message)
|
||||
}
|
||||
|
||||
// UpsertSamplingProcessor updates the agent config with new filter processor params
|
||||
func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Config) error {
|
||||
func UpsertSamplingProcessor(ctx context.Context, orgId valuer.UUID, version int, config *tsp.Config) error {
|
||||
if !atomic.CompareAndSwapUint32(&m.lock, 0, 1) {
|
||||
return fmt.Errorf("agent updater is busy")
|
||||
}
|
||||
@@ -345,6 +349,6 @@ func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Confi
|
||||
zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr))
|
||||
}
|
||||
|
||||
_ = m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml))
|
||||
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeSamplingRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, string(processorConfYaml))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
package agentConf
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type ElementTypeDef string
|
||||
|
||||
const (
|
||||
ElementTypeSamplingRules ElementTypeDef = "sampling_rules"
|
||||
ElementTypeDropRules ElementTypeDef = "drop_rules"
|
||||
ElementTypeLogPipelines ElementTypeDef = "log_pipelines"
|
||||
ElementTypeLbExporter ElementTypeDef = "lb_exporter"
|
||||
)
|
||||
|
||||
type DeployStatus string
|
||||
|
||||
const (
|
||||
PendingDeploy DeployStatus = "DIRTY"
|
||||
Deploying DeployStatus = "DEPLOYING"
|
||||
Deployed DeployStatus = "DEPLOYED"
|
||||
DeployInitiated DeployStatus = "IN_PROGRESS"
|
||||
DeployFailed DeployStatus = "FAILED"
|
||||
DeployStatusUnknown DeployStatus = "UNKNOWN"
|
||||
)
|
||||
|
||||
type ConfigVersion struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
Version int `json:"version" db:"version"`
|
||||
ElementType ElementTypeDef `json:"elementType" db:"element_type"`
|
||||
Active bool `json:"active" db:"active"`
|
||||
IsValid bool `json:"is_valid" db:"is_valid"`
|
||||
Disabled bool `json:"disabled" db:"disabled"`
|
||||
|
||||
DeployStatus DeployStatus `json:"deployStatus" db:"deploy_status"`
|
||||
DeployResult string `json:"deployResult" db:"deploy_result"`
|
||||
|
||||
LastHash string `json:"lastHash" db:"last_hash"`
|
||||
LastConf string `json:"lastConf" db:"last_config"`
|
||||
|
||||
CreatedBy string `json:"createdBy" db:"created_by"`
|
||||
CreatedByName string `json:"createdByName" db:"created_by_name"`
|
||||
CreatedAt time.Time `json:"createdAt" db:"created_at"`
|
||||
}
|
||||
|
||||
func NewConfigVersion(typeDef ElementTypeDef) *ConfigVersion {
|
||||
return &ConfigVersion{
|
||||
ID: uuid.NewString(),
|
||||
ElementType: typeDef,
|
||||
Active: false,
|
||||
IsValid: false,
|
||||
Disabled: false,
|
||||
DeployStatus: PendingDeploy,
|
||||
LastHash: "",
|
||||
LastConf: "{}",
|
||||
// todo: get user id from context?
|
||||
// CreatedBy
|
||||
}
|
||||
}
|
||||
|
||||
func updateVersion(v int) int {
|
||||
return v + 1
|
||||
}
|
||||
|
||||
type ConfigElements struct {
|
||||
VersionID string
|
||||
Version int
|
||||
ElementType ElementTypeDef
|
||||
ElementId string
|
||||
}
|
||||
@@ -63,6 +63,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
|
||||
@@ -3460,10 +3461,9 @@ func (aH *APIHandler) InstallIntegration(w http.ResponseWriter, r *http.Request)
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, model.UnauthorizedError(errors.New("unauthorized")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -4142,8 +4142,6 @@ func (aH *APIHandler) logAggregate(w http.ResponseWriter, r *http.Request) {
|
||||
aH.WriteJSON(w, r, res)
|
||||
}
|
||||
|
||||
const logPipelines = "log_pipelines"
|
||||
|
||||
func parseAgentConfigVersion(r *http.Request) (int, *model.ApiError) {
|
||||
versionString := mux.Vars(r)["version"]
|
||||
|
||||
@@ -4191,6 +4189,12 @@ func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
orgID, errv2 := valuer.NewUUID(claims.OrgID)
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
version, err := parseAgentConfigVersion(r)
|
||||
if err != nil {
|
||||
RespondError(w, model.WrapApiError(err, "Failed to parse agent config version"), nil)
|
||||
@@ -4201,9 +4205,9 @@ func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Re
|
||||
var apierr *model.ApiError
|
||||
|
||||
if version != -1 {
|
||||
payload, apierr = aH.listLogsPipelinesByVersion(context.Background(), claims.OrgID, version)
|
||||
payload, apierr = aH.listLogsPipelinesByVersion(context.Background(), orgID, version)
|
||||
} else {
|
||||
payload, apierr = aH.listLogsPipelines(context.Background(), claims.OrgID)
|
||||
payload, apierr = aH.listLogsPipelines(context.Background(), orgID)
|
||||
}
|
||||
|
||||
if apierr != nil {
|
||||
@@ -4214,12 +4218,12 @@ func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
// listLogsPipelines lists logs piplines for latest version
|
||||
func (aH *APIHandler) listLogsPipelines(ctx context.Context, orgID string) (
|
||||
func (aH *APIHandler) listLogsPipelines(ctx context.Context, orgID valuer.UUID) (
|
||||
*logparsingpipeline.PipelinesResponse, *model.ApiError,
|
||||
) {
|
||||
// get lateset agent config
|
||||
latestVersion := -1
|
||||
lastestConfig, err := agentConf.GetLatestVersion(ctx, logPipelines)
|
||||
lastestConfig, err := agentConf.GetLatestVersion(ctx, orgID, opamptypes.ElementTypeLogPipelines)
|
||||
if err != nil && err.Type() != model.ErrorNotFound {
|
||||
return nil, model.WrapApiError(err, "failed to get latest agent config version")
|
||||
}
|
||||
@@ -4228,14 +4232,14 @@ func (aH *APIHandler) listLogsPipelines(ctx context.Context, orgID string) (
|
||||
latestVersion = lastestConfig.Version
|
||||
}
|
||||
|
||||
payload, err := aH.LogsParsingPipelineController.GetPipelinesByVersion(ctx, latestVersion)
|
||||
payload, err := aH.LogsParsingPipelineController.GetPipelinesByVersion(ctx, orgID, latestVersion)
|
||||
if err != nil {
|
||||
return nil, model.WrapApiError(err, "failed to get pipelines")
|
||||
}
|
||||
|
||||
// todo(Nitya): make a new API for history pagination
|
||||
limit := 10
|
||||
history, err := agentConf.GetConfigHistory(ctx, logPipelines, limit)
|
||||
history, err := agentConf.GetConfigHistory(ctx, orgID, opamptypes.ElementTypeLogPipelines, limit)
|
||||
if err != nil {
|
||||
return nil, model.WrapApiError(err, "failed to get config history")
|
||||
}
|
||||
@@ -4244,17 +4248,17 @@ func (aH *APIHandler) listLogsPipelines(ctx context.Context, orgID string) (
|
||||
}
|
||||
|
||||
// listLogsPipelinesByVersion lists pipelines along with config version history
|
||||
func (aH *APIHandler) listLogsPipelinesByVersion(ctx context.Context, orgID string, version int) (
|
||||
func (aH *APIHandler) listLogsPipelinesByVersion(ctx context.Context, orgID valuer.UUID, version int) (
|
||||
*logparsingpipeline.PipelinesResponse, *model.ApiError,
|
||||
) {
|
||||
payload, err := aH.LogsParsingPipelineController.GetPipelinesByVersion(ctx, version)
|
||||
payload, err := aH.LogsParsingPipelineController.GetPipelinesByVersion(ctx, orgID, version)
|
||||
if err != nil {
|
||||
return nil, model.WrapApiError(err, "failed to get pipelines by version")
|
||||
}
|
||||
|
||||
// todo(Nitya): make a new API for history pagination
|
||||
limit := 10
|
||||
history, err := agentConf.GetConfigHistory(ctx, logPipelines, limit)
|
||||
history, err := agentConf.GetConfigHistory(ctx, orgID, opamptypes.ElementTypeLogPipelines, limit)
|
||||
if err != nil {
|
||||
return nil, model.WrapApiError(err, "failed to retrieve agent config history")
|
||||
}
|
||||
@@ -4270,6 +4274,18 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// prepare config by calling gen func
|
||||
orgID, errv2 := valuer.NewUUID(claims.OrgID)
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
userID, errv2 := valuer.NewUUID(claims.UserID)
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
req := pipelinetypes.PostablePipelines{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
@@ -4290,7 +4306,7 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
||||
return nil, validationErr
|
||||
}
|
||||
|
||||
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, claims.OrgID, postable)
|
||||
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, orgID, userID, postable)
|
||||
}
|
||||
|
||||
res, err := createPipeline(r.Context(), req.Pipelines)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
@@ -41,24 +41,19 @@ func NewLogParsingPipelinesController(
|
||||
|
||||
// PipelinesResponse is used to prepare http response for pipelines config related requests
|
||||
type PipelinesResponse struct {
|
||||
*agentConf.ConfigVersion
|
||||
*opamptypes.AgentConfigVersion
|
||||
|
||||
Pipelines []pipelinetypes.GettablePipeline `json:"pipelines"`
|
||||
History []agentConf.ConfigVersion `json:"history"`
|
||||
History []opamptypes.AgentConfigVersion `json:"history"`
|
||||
}
|
||||
|
||||
// ApplyPipelines stores new or changed pipelines and initiates a new config update
|
||||
func (ic *LogParsingPipelineController) ApplyPipelines(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
orgID valuer.UUID,
|
||||
userID valuer.UUID,
|
||||
postable []pipelinetypes.PostablePipeline,
|
||||
) (*PipelinesResponse, *model.ApiError) {
|
||||
// get user id from context
|
||||
claims, errv2 := authtypes.ClaimsFromContext(ctx)
|
||||
if errv2 != nil {
|
||||
return nil, model.UnauthorizedError(fmt.Errorf("failed to get userId from context"))
|
||||
}
|
||||
|
||||
var pipelines []pipelinetypes.GettablePipeline
|
||||
|
||||
// scan through postable pipelines, to select the existing pipelines or insert missing ones
|
||||
@@ -87,13 +82,12 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
|
||||
elements[i] = p.ID.StringValue()
|
||||
}
|
||||
|
||||
// prepare config by calling gen func
|
||||
cfg, err := agentConf.StartNewVersion(ctx, claims.UserID, agentConf.ElementTypeLogPipelines, elements)
|
||||
cfg, err := agentConf.StartNewVersion(ctx, orgID, userID, opamptypes.ElementTypeLogPipelines, elements)
|
||||
if err != nil || cfg == nil {
|
||||
return nil, err
|
||||
return nil, model.InternalError(fmt.Errorf("failed to start new version: %w", err))
|
||||
}
|
||||
|
||||
return ic.GetPipelinesByVersion(ctx, cfg.Version)
|
||||
return ic.GetPipelinesByVersion(ctx, orgID, cfg.Version)
|
||||
}
|
||||
|
||||
func (ic *LogParsingPipelineController) ValidatePipelines(
|
||||
@@ -142,21 +136,12 @@ func (ic *LogParsingPipelineController) ValidatePipelines(
|
||||
// Returns effective list of pipelines including user created
|
||||
// pipelines and pipelines for installed integrations
|
||||
func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||
ctx context.Context, version int,
|
||||
ctx context.Context, orgID valuer.UUID, version int,
|
||||
) ([]pipelinetypes.GettablePipeline, *model.ApiError) {
|
||||
|
||||
result := []pipelinetypes.GettablePipeline{}
|
||||
|
||||
// todo(nitya): remove this once we fix agents in multitenancy
|
||||
defaultOrgID, err := ic.GetDefaultOrgID(ctx)
|
||||
if err != nil {
|
||||
// we don't want to fail the request if we can't get the default org ID
|
||||
// we will just return an empty list of pipelines
|
||||
zap.L().Warn("failed to get default org ID", zap.Error(err))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if version >= 0 {
|
||||
savedPipelines, errors := ic.getPipelinesByVersion(ctx, defaultOrgID, version)
|
||||
savedPipelines, errors := ic.getPipelinesByVersion(ctx, orgID.String(), version)
|
||||
if errors != nil {
|
||||
zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Errors("errors", errors))
|
||||
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version %v", errors))
|
||||
@@ -164,7 +149,7 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||
result = savedPipelines
|
||||
}
|
||||
|
||||
integrationPipelines, apiErr := ic.GetIntegrationPipelines(ctx, defaultOrgID)
|
||||
integrationPipelines, apiErr := ic.GetIntegrationPipelines(ctx, orgID.String())
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "could not get pipelines for installed integrations",
|
||||
@@ -208,18 +193,18 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||
|
||||
// GetPipelinesByVersion responds with version info and associated pipelines
|
||||
func (ic *LogParsingPipelineController) GetPipelinesByVersion(
|
||||
ctx context.Context, version int,
|
||||
ctx context.Context, orgId valuer.UUID, version int,
|
||||
) (*PipelinesResponse, *model.ApiError) {
|
||||
|
||||
pipelines, errors := ic.getEffectivePipelinesByVersion(ctx, version)
|
||||
pipelines, errors := ic.getEffectivePipelinesByVersion(ctx, orgId, version)
|
||||
if errors != nil {
|
||||
zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Error(errors))
|
||||
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version %v", errors))
|
||||
}
|
||||
|
||||
var configVersion *agentConf.ConfigVersion
|
||||
var configVersion *opamptypes.AgentConfigVersion
|
||||
if version >= 0 {
|
||||
cv, err := agentConf.GetConfigVersion(ctx, agentConf.ElementTypeLogPipelines, version)
|
||||
cv, err := agentConf.GetConfigVersion(ctx, orgId, opamptypes.ElementTypeLogPipelines, version)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to get config for version", zap.Int("version", version), zap.Error(err))
|
||||
return nil, model.WrapApiError(err, "failed to get config for given version")
|
||||
@@ -228,8 +213,8 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
|
||||
}
|
||||
|
||||
return &PipelinesResponse{
|
||||
ConfigVersion: configVersion,
|
||||
Pipelines: pipelines,
|
||||
AgentConfigVersion: configVersion,
|
||||
Pipelines: pipelines,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -268,8 +253,9 @@ func (pc *LogParsingPipelineController) AgentFeatureType() agentConf.AgentFeatur
|
||||
|
||||
// Implements agentConf.AgentFeature interface.
|
||||
func (pc *LogParsingPipelineController) RecommendAgentConfig(
|
||||
orgId valuer.UUID,
|
||||
currentConfYaml []byte,
|
||||
configVersion *agentConf.ConfigVersion,
|
||||
configVersion *opamptypes.AgentConfigVersion,
|
||||
) (
|
||||
recommendedConfYaml []byte,
|
||||
serializedSettingsUsed string,
|
||||
@@ -281,7 +267,7 @@ func (pc *LogParsingPipelineController) RecommendAgentConfig(
|
||||
}
|
||||
|
||||
pipelinesResp, apiErr := pc.GetPipelinesByVersion(
|
||||
context.Background(), pipelinesVersion,
|
||||
context.Background(), orgId, pipelinesVersion,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, "", apiErr
|
||||
|
||||
@@ -32,7 +32,7 @@ func NewRepo(sqlStore sqlstore.SQLStore) Repo {
|
||||
|
||||
// insertPipeline stores a given postable pipeline to database
|
||||
func (r *Repo) insertPipeline(
|
||||
ctx context.Context, orgID string, postable *pipelinetypes.PostablePipeline,
|
||||
ctx context.Context, orgID valuer.UUID, postable *pipelinetypes.PostablePipeline,
|
||||
) (*pipelinetypes.GettablePipeline, *model.ApiError) {
|
||||
if err := postable.IsValid(); err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err,
|
||||
@@ -60,7 +60,7 @@ func (r *Repo) insertPipeline(
|
||||
|
||||
insertRow := &pipelinetypes.GettablePipeline{
|
||||
StoreablePipeline: pipelinetypes.StoreablePipeline{
|
||||
OrgID: orgID,
|
||||
OrgID: orgID.String(),
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
@@ -102,11 +102,11 @@ func (r *Repo) getPipelinesByVersion(
|
||||
storablePipelines := []pipelinetypes.StoreablePipeline{}
|
||||
err := r.sqlStore.BunDB().NewSelect().
|
||||
Model(&storablePipelines).
|
||||
Join("JOIN agent_config_elements e ON p.id = e.element_id").
|
||||
Join("JOIN agent_config_versions v ON v.id = e.version_id").
|
||||
Where("e.element_type = ?", logPipelines). // TODO: nitya - add org_id to this as well
|
||||
Where("v.version = ?", version). // TODO: nitya - add org_id to this as well
|
||||
Where("p.org_id = ?", orgID).
|
||||
Join("JOIN agent_config_element e ON p.id = e.element_id").
|
||||
Join("JOIN agent_config_version v ON v.id = e.version_id").
|
||||
Where("e.element_type = ?", logPipelines).
|
||||
Where("v.version = ?", version).
|
||||
Where("v.org_id = ?", orgID).
|
||||
Order("p.order_id ASC").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
@@ -131,20 +131,6 @@ func (r *Repo) getPipelinesByVersion(
|
||||
return gettablePipelines, errors
|
||||
}
|
||||
|
||||
func (r *Repo) GetDefaultOrgID(ctx context.Context) (string, *model.ApiError) {
|
||||
var orgs []types.Organization
|
||||
err := r.sqlStore.BunDB().NewSelect().
|
||||
Model(&orgs).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return "", model.InternalError(errors.Wrap(err, "failed to get default org ID"))
|
||||
}
|
||||
if len(orgs) == 0 {
|
||||
return "", model.InternalError(errors.New("no orgs found"))
|
||||
}
|
||||
return orgs[0].ID.StringValue(), nil
|
||||
}
|
||||
|
||||
// GetPipelines returns pipeline and errors (if any)
|
||||
func (r *Repo) GetPipeline(
|
||||
ctx context.Context, orgID string, id string,
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/collector/otelcol"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/plog"
|
||||
"go.opentelemetry.io/collector/processor"
|
||||
)
|
||||
|
||||
func SimulatePipelinesProcessing(
|
||||
@@ -42,7 +42,7 @@ func SimulatePipelinesProcessing(
|
||||
}
|
||||
simulatorInputPLogs := SignozLogsToPLogs(logs)
|
||||
|
||||
processorFactories, err := processor.MakeFactoryMap(
|
||||
processorFactories, err := otelcol.MakeFactoryMap(
|
||||
signozlogspipelineprocessor.NewFactory(),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
package opamp
|
||||
@@ -1,11 +1,20 @@
|
||||
package opamp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/sharder"
|
||||
"github.com/SigNoz/signoz/pkg/sharder/noopsharder"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/knadh/koanf"
|
||||
"github.com/knadh/koanf/parsers/yaml"
|
||||
"github.com/knadh/koanf/providers/rawbytes"
|
||||
@@ -21,6 +30,9 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
|
||||
tb := newTestbed(t)
|
||||
|
||||
orgID, err := utils.GetTestOrgId(tb.sqlStore)
|
||||
require.Nil(err)
|
||||
|
||||
require.Equal(
|
||||
0, len(tb.testConfigProvider.ConfigUpdateSubscribers),
|
||||
"there should be no agent config subscribers at the start",
|
||||
@@ -35,7 +47,8 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
// Even if there are no recommended changes to the agent's initial config
|
||||
require.False(tb.testConfigProvider.HasRecommendations())
|
||||
agent1Conn := &MockOpAmpConnection{}
|
||||
agent1Id := "testAgent1"
|
||||
agent1Id := valuer.GenerateUUID().String()
|
||||
// get orgId from the db
|
||||
tb.opampServer.OnMessage(
|
||||
agent1Conn,
|
||||
&protobufs.AgentToServer{
|
||||
@@ -57,7 +70,7 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
|
||||
tb.testConfigProvider.ZPagesEndpoint = "localhost:55555"
|
||||
require.True(tb.testConfigProvider.HasRecommendations())
|
||||
agent2Id := "testAgent2"
|
||||
agent2Id := valuer.GenerateUUID().String()
|
||||
agent2Conn := &MockOpAmpConnection{}
|
||||
tb.opampServer.OnMessage(
|
||||
agent2Conn,
|
||||
@@ -97,10 +110,10 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
},
|
||||
})
|
||||
expectedConfId := tb.testConfigProvider.ZPagesEndpoint
|
||||
require.True(tb.testConfigProvider.HasReportedDeploymentStatus(expectedConfId, agent2Id),
|
||||
require.True(tb.testConfigProvider.HasReportedDeploymentStatus(orgID, expectedConfId, agent2Id),
|
||||
"Server should report deployment success to config provider on receiving update from agent.",
|
||||
)
|
||||
require.True(tb.testConfigProvider.ReportedDeploymentStatuses[expectedConfId][agent2Id])
|
||||
require.True(tb.testConfigProvider.ReportedDeploymentStatuses[orgID.String()+expectedConfId][agent2Id])
|
||||
require.Nil(
|
||||
agent2Conn.LatestMsgFromServer(),
|
||||
"Server should not recommend a RemoteConfig if agent is already running it.",
|
||||
@@ -130,10 +143,10 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
},
|
||||
})
|
||||
expectedConfId = tb.testConfigProvider.ZPagesEndpoint
|
||||
require.True(tb.testConfigProvider.HasReportedDeploymentStatus(expectedConfId, agent2Id),
|
||||
require.True(tb.testConfigProvider.HasReportedDeploymentStatus(orgID, expectedConfId, agent2Id),
|
||||
"Server should report deployment failure to config provider on receiving update from agent.",
|
||||
)
|
||||
require.False(tb.testConfigProvider.ReportedDeploymentStatuses[expectedConfId][agent2Id])
|
||||
require.False(tb.testConfigProvider.ReportedDeploymentStatuses[orgID.String()+expectedConfId][agent2Id])
|
||||
|
||||
lastAgent1Msg = agent1Conn.LatestMsgFromServer()
|
||||
agent1Conn.ClearMsgsFromServer()
|
||||
@@ -158,26 +171,88 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestOpAMPServerAgentLimit(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
tb := newTestbed(t)
|
||||
// Create 51 agents and check if the first one gets deleted
|
||||
var agentConnections []*MockOpAmpConnection
|
||||
var agentIds []string
|
||||
for i := 0; i < 51; i++ {
|
||||
agentConn := &MockOpAmpConnection{}
|
||||
agentId := valuer.GenerateUUID().String()
|
||||
agentIds = append(agentIds, agentId)
|
||||
tb.opampServer.OnMessage(
|
||||
agentConn,
|
||||
&protobufs.AgentToServer{
|
||||
InstanceUid: agentId,
|
||||
EffectiveConfig: &protobufs.EffectiveConfig{
|
||||
ConfigMap: initialAgentConf(),
|
||||
},
|
||||
},
|
||||
)
|
||||
agentConnections = append(agentConnections, agentConn)
|
||||
}
|
||||
|
||||
// Perform a DB level check to ensure the first agent is removed
|
||||
count, err := tb.sqlStore.BunDB().NewSelect().
|
||||
Model(new(opamptypes.StorableAgent)).
|
||||
Where("agent_id = ?", agentIds[0]).
|
||||
Count(context.Background())
|
||||
require.Nil(err, "Error querying the database for agent count")
|
||||
require.Equal(0, count, "First agent should be removed from the database after exceeding the limit of 50 agents")
|
||||
|
||||
// verify there are 50 agents in the db
|
||||
count, err = tb.sqlStore.BunDB().NewSelect().
|
||||
Model(new(opamptypes.StorableAgent)).
|
||||
Count(context.Background())
|
||||
require.Nil(err, "Error querying the database for agent count")
|
||||
require.Equal(50, count, "There should be 50 agents in the database")
|
||||
|
||||
// Check if the 51st agent received a config
|
||||
lastAgentConn := agentConnections[50]
|
||||
lastAgentMsg := lastAgentConn.LatestMsgFromServer()
|
||||
require.NotNil(
|
||||
lastAgentMsg,
|
||||
"51st agent should receive a remote config from the server",
|
||||
)
|
||||
|
||||
tb.opampServer.Stop()
|
||||
require.Equal(
|
||||
0, len(tb.testConfigProvider.ConfigUpdateSubscribers),
|
||||
"Opamp server should have unsubscribed to config provider updates after shutdown",
|
||||
)
|
||||
}
|
||||
|
||||
type testbed struct {
|
||||
testConfigProvider *MockAgentConfigProvider
|
||||
opampServer *Server
|
||||
t *testing.T
|
||||
sqlStore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func newTestbed(t *testing.T) *testbed {
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
_, err := model.InitDB(testDB.SQLxDB())
|
||||
if err != nil {
|
||||
t.Fatalf("could not init opamp model: %v", err)
|
||||
}
|
||||
|
||||
providerSettings := instrumentationtest.New().ToProviderSettings()
|
||||
sharder, err := noopsharder.New(context.TODO(), providerSettings, sharder.Config{})
|
||||
require.Nil(t, err)
|
||||
orgGetter := implorganization.NewGetter(implorganization.NewStore(testDB), sharder)
|
||||
model.InitDB(testDB, slog.Default(), orgGetter)
|
||||
testConfigProvider := NewMockAgentConfigProvider()
|
||||
opampServer := InitializeServer(nil, testConfigProvider)
|
||||
|
||||
// create a test org
|
||||
err = utils.CreateTestOrg(t, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create test org: %v", err)
|
||||
}
|
||||
|
||||
return &testbed{
|
||||
testConfigProvider: testConfigProvider,
|
||||
opampServer: opampServer,
|
||||
t: t,
|
||||
sqlStore: testDB,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -60,13 +60,13 @@ func UpsertControlProcessors(
|
||||
|
||||
agenthash, err := addIngestionControlToAgent(agent, signal, processors, false)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to push ingestion rules config to agent", zap.String("agentID", agent.ID), zap.Error(err))
|
||||
zap.L().Error("failed to push ingestion rules config to agent", zap.String("agentID", agent.AgentID), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
if agenthash != "" {
|
||||
// subscribe callback
|
||||
model.ListenToConfigUpdate(agent.ID, agenthash, callback)
|
||||
model.ListenToConfigUpdate(agent.OrgID, agent.AgentID, agenthash, callback)
|
||||
}
|
||||
|
||||
hash = agenthash
|
||||
@@ -78,7 +78,7 @@ func UpsertControlProcessors(
|
||||
// addIngestionControlToAgent adds ingestion contorl rules to agent config
|
||||
func addIngestionControlToAgent(agent *model.Agent, signal string, processors map[string]interface{}, withLB bool) (string, error) {
|
||||
confHash := ""
|
||||
config := agent.EffectiveConfig
|
||||
config := agent.Config
|
||||
c, err := yaml.Parser().Unmarshal([]byte(config))
|
||||
if err != nil {
|
||||
return confHash, err
|
||||
@@ -89,7 +89,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
|
||||
// add ingestion control spec
|
||||
err = makeIngestionControlSpec(agentConf, Signal(signal), processors)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare ingestion control processors for agent", zap.String("agentID", agent.ID), zap.Error(err))
|
||||
zap.L().Error("failed to prepare ingestion control processors for agent", zap.String("agentID", agent.AgentID), zap.Error(err))
|
||||
return confHash, err
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
|
||||
return confHash, err
|
||||
}
|
||||
confHash = string(hash.Sum(nil))
|
||||
agent.EffectiveConfig = string(configR)
|
||||
agent.Config = string(configR)
|
||||
err = agent.Upsert()
|
||||
if err != nil {
|
||||
return confHash, err
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/knadh/koanf"
|
||||
"github.com/knadh/koanf/parsers/yaml"
|
||||
@@ -67,7 +68,7 @@ func (ta *MockAgentConfigProvider) HasRecommendations() bool {
|
||||
}
|
||||
|
||||
// AgentConfigProvider interface
|
||||
func (ta *MockAgentConfigProvider) RecommendAgentConfig(baseConfYaml []byte) (
|
||||
func (ta *MockAgentConfigProvider) RecommendAgentConfig(orgId valuer.UUID, baseConfYaml []byte) (
|
||||
[]byte, string, error,
|
||||
) {
|
||||
if len(ta.ZPagesEndpoint) < 1 {
|
||||
@@ -92,11 +93,14 @@ func (ta *MockAgentConfigProvider) RecommendAgentConfig(baseConfYaml []byte) (
|
||||
|
||||
// AgentConfigProvider interface
|
||||
func (ta *MockAgentConfigProvider) ReportConfigDeploymentStatus(
|
||||
orgId valuer.UUID,
|
||||
agentId string,
|
||||
configId string,
|
||||
err error,
|
||||
) {
|
||||
confIdReports := ta.ReportedDeploymentStatuses[configId]
|
||||
// using orgID + configId as key to avoid collisions with other orgs
|
||||
// check code in model/coordinator.go for more details
|
||||
confIdReports := ta.ReportedDeploymentStatuses[orgId.String()+configId]
|
||||
if confIdReports == nil {
|
||||
confIdReports = map[string]bool{}
|
||||
ta.ReportedDeploymentStatuses[configId] = confIdReports
|
||||
@@ -106,10 +110,12 @@ func (ta *MockAgentConfigProvider) ReportConfigDeploymentStatus(
|
||||
}
|
||||
|
||||
// Test helper.
|
||||
func (ta *MockAgentConfigProvider) HasReportedDeploymentStatus(
|
||||
func (ta *MockAgentConfigProvider) HasReportedDeploymentStatus(orgID valuer.UUID,
|
||||
configId string, agentId string,
|
||||
) bool {
|
||||
confIdReports := ta.ReportedDeploymentStatuses[configId]
|
||||
// using orgID + configId as key to avoid collisions with other orgs
|
||||
// check code in model/coordinator.go for more details
|
||||
confIdReports := ta.ReportedDeploymentStatuses[orgID.String()+configId]
|
||||
if confIdReports == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -4,36 +4,24 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/open-telemetry/opamp-go/protobufs"
|
||||
"github.com/open-telemetry/opamp-go/server/types"
|
||||
opampTypes "github.com/open-telemetry/opamp-go/server/types"
|
||||
)
|
||||
|
||||
type AgentStatus int
|
||||
|
||||
const (
|
||||
AgentStatusUnknown AgentStatus = iota
|
||||
AgentStatusConnected
|
||||
AgentStatusDisconnected
|
||||
)
|
||||
|
||||
// set in agent description when agent is capable of supporting
|
||||
// lb exporter configuration. values: 1 (true) or 0 (false)
|
||||
const lbExporterFlag = "capabilities.lbexporter"
|
||||
|
||||
type Agent struct {
|
||||
ID string `json:"agentId" yaml:"agentId" db:"agent_id"`
|
||||
StartedAt time.Time `json:"startedAt" yaml:"startedAt" db:"started_at"`
|
||||
TerminatedAt time.Time `json:"terminatedAt" yaml:"terminatedAt" db:"terminated_at"`
|
||||
EffectiveConfig string `json:"effectiveConfig" yaml:"effectiveConfig" db:"effective_config"`
|
||||
CurrentStatus AgentStatus `json:"currentStatus" yaml:"currentStatus" db:"current_status"`
|
||||
remoteConfig *protobufs.AgentRemoteConfig
|
||||
Status *protobufs.AgentToServer
|
||||
opamptypes.StorableAgent
|
||||
remoteConfig *protobufs.AgentRemoteConfig
|
||||
Status *protobufs.AgentToServer
|
||||
|
||||
// can this agent be load balancer
|
||||
CanLB bool
|
||||
@@ -41,13 +29,24 @@ type Agent struct {
|
||||
// is this agent setup as load balancer
|
||||
IsLb bool
|
||||
|
||||
conn types.Connection
|
||||
conn opampTypes.Connection
|
||||
connMutex sync.Mutex
|
||||
mux sync.RWMutex
|
||||
store sqlstore.SQLStore
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func New(ID string, conn types.Connection) *Agent {
|
||||
return &Agent{ID: ID, StartedAt: time.Now(), CurrentStatus: AgentStatusConnected, conn: conn}
|
||||
// set in agent description when agent is capable of supporting
|
||||
// lb exporter configuration. values: 1 (true) or 0 (false)
|
||||
const lbExporterFlag = "capabilities.lbexporter"
|
||||
|
||||
func New(store sqlstore.SQLStore, logger *slog.Logger, orgID valuer.UUID, agentID string, conn opampTypes.Connection) *Agent {
|
||||
return &Agent{
|
||||
StorableAgent: opamptypes.NewStorableAgent(store, orgID, agentID, opamptypes.AgentStatusConnected),
|
||||
conn: conn,
|
||||
store: store,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Upsert inserts or updates the agent in the database.
|
||||
@@ -55,17 +54,13 @@ func (agent *Agent) Upsert() error {
|
||||
agent.mux.Lock()
|
||||
defer agent.mux.Unlock()
|
||||
|
||||
_, err := db.NamedExec(`INSERT OR REPLACE INTO agents (
|
||||
agent_id,
|
||||
started_at,
|
||||
effective_config,
|
||||
current_status
|
||||
) VALUES (
|
||||
:agent_id,
|
||||
:started_at,
|
||||
:effective_config,
|
||||
:current_status
|
||||
)`, agent)
|
||||
_, err := agent.store.BunDB().NewInsert().
|
||||
Model(&agent.StorableAgent).
|
||||
On("CONFLICT (agent_id) DO UPDATE").
|
||||
Set("updated_at = EXCLUDED.updated_at").
|
||||
Set("config = EXCLUDED.config").
|
||||
Set("status = EXCLUDED.status").
|
||||
Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -73,6 +68,27 @@ func (agent *Agent) Upsert() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// keep only the last 50 agents in the database
|
||||
func (agent *Agent) KeepOnlyLast50Agents(ctx context.Context) {
|
||||
// Delete all agents except the last 50 in a single query
|
||||
_, err := agent.store.BunDB().
|
||||
NewDelete().
|
||||
Model(new(opamptypes.StorableAgent)).
|
||||
Where("org_id = ?", agent.OrgID).
|
||||
Where("agent_id NOT IN (?)",
|
||||
agent.store.BunDB().
|
||||
NewSelect().
|
||||
ColumnExpr("distinct(agent_id)").
|
||||
Model(new(opamptypes.StorableAgent)).
|
||||
Where("org_id = ?", agent.OrgID).
|
||||
OrderExpr("created_at DESC").
|
||||
Limit(50)).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
agent.logger.Error("failed to delete old agents", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// extracts lb exporter support flag from agent description. the flag
|
||||
// is used to decide if lb exporter can be enabled on the agent.
|
||||
func ExtractLbFlag(agentDescr *protobufs.AgentDescription) bool {
|
||||
@@ -135,11 +151,11 @@ func (agent *Agent) updateAgentDescription(newStatus *protobufs.AgentToServer) (
|
||||
// todo: need to address multiple agent scenario here
|
||||
// for now, the first response will be sent back to the UI
|
||||
if agent.Status.RemoteConfigStatus.Status == protobufs.RemoteConfigStatuses_RemoteConfigStatuses_APPLIED {
|
||||
onConfigSuccess(agent.ID, string(agent.Status.RemoteConfigStatus.LastRemoteConfigHash))
|
||||
onConfigSuccess(agent.OrgID, agent.AgentID, string(agent.Status.RemoteConfigStatus.LastRemoteConfigHash))
|
||||
}
|
||||
|
||||
if agent.Status.RemoteConfigStatus.Status == protobufs.RemoteConfigStatuses_RemoteConfigStatuses_FAILED {
|
||||
onConfigFailure(agent.ID, string(agent.Status.RemoteConfigStatus.LastRemoteConfigHash), agent.Status.RemoteConfigStatus.ErrorMessage)
|
||||
onConfigFailure(agent.OrgID, agent.AgentID, string(agent.Status.RemoteConfigStatus.LastRemoteConfigHash), agent.Status.RemoteConfigStatus.ErrorMessage)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -159,7 +175,7 @@ func (agent *Agent) updateHealth(newStatus *protobufs.AgentToServer) {
|
||||
agent.Status.Health = newStatus.Health
|
||||
|
||||
if agent.Status != nil && agent.Status.Health != nil && agent.Status.Health.Healthy {
|
||||
agent.StartedAt = time.Unix(0, int64(agent.Status.Health.StartTimeUnixNano)).UTC()
|
||||
agent.TimeAuditable.UpdatedAt = time.Unix(0, int64(agent.Status.Health.StartTimeUnixNano)).UTC()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,10 +206,10 @@ func (agent *Agent) updateEffectiveConfig(newStatus *protobufs.AgentToServer, re
|
||||
agent.Status.EffectiveConfig = newStatus.EffectiveConfig
|
||||
|
||||
// Convert to string for displaying purposes.
|
||||
agent.EffectiveConfig = ""
|
||||
agent.Config = ""
|
||||
// There should be only one config in the map.
|
||||
for _, cfg := range newStatus.EffectiveConfig.ConfigMap.ConfigMap {
|
||||
agent.EffectiveConfig = string(cfg.Body)
|
||||
agent.Config = string(cfg.Body)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -269,7 +285,8 @@ func (agent *Agent) processStatusUpdate(
|
||||
agent.SendToAgent(response)
|
||||
|
||||
ListenToConfigUpdate(
|
||||
agent.ID,
|
||||
agent.OrgID,
|
||||
agent.AgentID,
|
||||
string(response.RemoteConfig.ConfigHash),
|
||||
configProvider.ReportConfigDeploymentStatus,
|
||||
)
|
||||
@@ -277,9 +294,9 @@ func (agent *Agent) processStatusUpdate(
|
||||
}
|
||||
|
||||
func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool {
|
||||
recommendedConfig, confId, err := configProvider.RecommendAgentConfig([]byte(agent.EffectiveConfig))
|
||||
recommendedConfig, confId, err := configProvider.RecommendAgentConfig(agent.OrgID, []byte(agent.Config))
|
||||
if err != nil {
|
||||
zap.L().Error("could not generate config recommendation for agent", zap.String("agentID", agent.ID), zap.Error(err))
|
||||
zap.L().Error("could not generate config recommendation for agent", zap.String("agentID", agent.AgentID), zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -1,19 +1,22 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/open-telemetry/opamp-go/protobufs"
|
||||
"github.com/open-telemetry/opamp-go/server/types"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var db *sqlx.DB
|
||||
|
||||
var AllAgents = Agents{
|
||||
agentsById: map[string]*Agent{},
|
||||
connections: map[types.Connection]map[string]bool{},
|
||||
@@ -23,6 +26,9 @@ type Agents struct {
|
||||
mux sync.RWMutex
|
||||
agentsById map[string]*Agent
|
||||
connections map[types.Connection]map[string]bool
|
||||
store sqlstore.SQLStore
|
||||
OrgGetter organization.Getter
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func (a *Agents) Count() int {
|
||||
@@ -30,15 +36,16 @@ func (a *Agents) Count() int {
|
||||
}
|
||||
|
||||
// Initialize the database and create schema if needed
|
||||
func InitDB(qsDB *sqlx.DB) (*sqlx.DB, error) {
|
||||
db = qsDB
|
||||
func InitDB(sqlStore sqlstore.SQLStore, logger *slog.Logger, orgGetter organization.Getter) {
|
||||
|
||||
AllAgents = Agents{
|
||||
agentsById: make(map[string]*Agent),
|
||||
connections: make(map[types.Connection]map[string]bool),
|
||||
mux: sync.RWMutex{},
|
||||
store: sqlStore,
|
||||
OrgGetter: orgGetter,
|
||||
logger: logger,
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// RemoveConnection removes the connection all Agent instances associated with the
|
||||
@@ -49,8 +56,8 @@ func (agents *Agents) RemoveConnection(conn types.Connection) {
|
||||
|
||||
for instanceId := range agents.connections[conn] {
|
||||
agent := agents.agentsById[instanceId]
|
||||
agent.CurrentStatus = AgentStatusDisconnected
|
||||
agent.TerminatedAt = time.Now()
|
||||
agent.StorableAgent.Status = opamptypes.AgentStatusDisconnected
|
||||
agent.StorableAgent.TerminatedAt = time.Now()
|
||||
_ = agent.Upsert()
|
||||
delete(agents.agentsById, instanceId)
|
||||
}
|
||||
@@ -67,27 +74,32 @@ func (agents *Agents) FindAgent(agentID string) *Agent {
|
||||
// FindOrCreateAgent returns the Agent instance associated with the given agentID.
|
||||
// If the Agent instance does not exist, it is created and added to the list of
|
||||
// Agent instances.
|
||||
func (agents *Agents) FindOrCreateAgent(agentID string, conn types.Connection) (*Agent, bool, error) {
|
||||
func (agents *Agents) FindOrCreateAgent(agentID string, conn types.Connection, orgID valuer.UUID) (*Agent, bool, error) {
|
||||
agents.mux.Lock()
|
||||
defer agents.mux.Unlock()
|
||||
var created bool
|
||||
agent, ok := agents.agentsById[agentID]
|
||||
var err error
|
||||
if !ok || agent == nil {
|
||||
agent = New(agentID, conn)
|
||||
err = agent.Upsert()
|
||||
if err != nil {
|
||||
return nil, created, err
|
||||
}
|
||||
agents.agentsById[agentID] = agent
|
||||
|
||||
if agents.connections[conn] == nil {
|
||||
agents.connections[conn] = map[string]bool{}
|
||||
}
|
||||
agents.connections[conn][agentID] = true
|
||||
created = true
|
||||
if ok && agent != nil {
|
||||
return agent, false, nil
|
||||
}
|
||||
return agent, created, nil
|
||||
|
||||
if !ok && orgID.IsZero() {
|
||||
return nil, false, errors.New("cannot create agent without orgId")
|
||||
}
|
||||
|
||||
agent = New(agents.store, agents.logger, orgID, agentID, conn)
|
||||
err := agent.Upsert()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
agent.KeepOnlyLast50Agents(context.Background())
|
||||
agents.agentsById[agentID] = agent
|
||||
|
||||
if agents.connections[conn] == nil {
|
||||
agents.connections[conn] = map[string]bool{}
|
||||
}
|
||||
agents.connections[conn][agentID] = true
|
||||
return agent, true, nil
|
||||
}
|
||||
|
||||
func (agents *Agents) GetAllAgents() []*Agent {
|
||||
@@ -108,18 +120,19 @@ func (agents *Agents) RecommendLatestConfigToAll(
|
||||
) error {
|
||||
for _, agent := range agents.GetAllAgents() {
|
||||
newConfig, confId, err := provider.RecommendAgentConfig(
|
||||
[]byte(agent.EffectiveConfig),
|
||||
agent.OrgID,
|
||||
[]byte(agent.Config),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf(
|
||||
"could not generate conf recommendation for %v", agent.ID,
|
||||
"could not generate conf recommendation for %v", agent.AgentID,
|
||||
))
|
||||
}
|
||||
|
||||
// Recommendation is same as current config
|
||||
if string(newConfig) == agent.EffectiveConfig {
|
||||
if string(newConfig) == agent.Config {
|
||||
zap.L().Info(
|
||||
"Recommended config same as current effective config for agent", zap.String("agentID", agent.ID),
|
||||
"Recommended config same as current effective config for agent", zap.String("agentID", agent.AgentID),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
@@ -144,7 +157,7 @@ func (agents *Agents) RecommendLatestConfigToAll(
|
||||
RemoteConfig: newRemoteConfig,
|
||||
})
|
||||
|
||||
ListenToConfigUpdate(agent.ID, confId, provider.ReportConfigDeploymentStatus)
|
||||
ListenToConfigUpdate(agent.OrgID, agent.AgentID, confId, provider.ReportConfigDeploymentStatus)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package model
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
// Interface for source of otel collector config recommendations.
|
||||
type AgentConfigProvider interface {
|
||||
// Generate recommended config for an agent based on its `currentConfYaml`
|
||||
// and current state of user facing settings for agent based features.
|
||||
RecommendAgentConfig(currentConfYaml []byte) (
|
||||
RecommendAgentConfig(orgId valuer.UUID, currentConfYaml []byte) (
|
||||
recommendedConfYaml []byte,
|
||||
// Opaque id of the recommended config, used for reporting deployment status updates
|
||||
configId string,
|
||||
@@ -13,6 +15,7 @@ type AgentConfigProvider interface {
|
||||
|
||||
// Report deployment status for config recommendations generated by RecommendAgentConfig
|
||||
ReportConfigDeploymentStatus(
|
||||
orgId valuer.UUID,
|
||||
agentId string,
|
||||
configId string,
|
||||
err error,
|
||||
|
||||
@@ -3,6 +3,8 @@ package model
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// communicates with calling apis when config is applied or fails
|
||||
@@ -15,7 +17,7 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
type OnChangeCallback func(agentId string, hash string, err error)
|
||||
type OnChangeCallback func(orgId valuer.UUID, agentId string, hash string, err error)
|
||||
|
||||
// responsible for managing subscribers on config change
|
||||
type Coordinator struct {
|
||||
@@ -25,42 +27,49 @@ type Coordinator struct {
|
||||
subscribers map[string][]OnChangeCallback
|
||||
}
|
||||
|
||||
func onConfigSuccess(agentId string, hash string) {
|
||||
notifySubscribers(agentId, hash, nil)
|
||||
func getSubscriberKey(orgId valuer.UUID, hash string) string {
|
||||
return orgId.String() + hash
|
||||
}
|
||||
|
||||
func onConfigFailure(agentId string, hash string, errorMessage string) {
|
||||
notifySubscribers(agentId, hash, fmt.Errorf(errorMessage))
|
||||
func onConfigSuccess(orgId valuer.UUID, agentId string, hash string) {
|
||||
key := getSubscriberKey(orgId, hash)
|
||||
notifySubscribers(orgId, agentId, key, nil)
|
||||
}
|
||||
|
||||
func onConfigFailure(orgId valuer.UUID, agentId string, hash string, errorMessage string) {
|
||||
key := getSubscriberKey(orgId, hash)
|
||||
notifySubscribers(orgId, agentId, key, fmt.Errorf(errorMessage))
|
||||
}
|
||||
|
||||
// OnSuccess listens to config changes and notifies subscribers
|
||||
func notifySubscribers(agentId string, hash string, err error) {
|
||||
func notifySubscribers(orgId valuer.UUID, agentId string, key string, err error) {
|
||||
// this method currently does not handle multi-agent scenario.
|
||||
// as soon as a message is delivered, we release all the subscribers
|
||||
// for a given hash
|
||||
subs, ok := coordinator.subscribers[hash]
|
||||
// for a given key
|
||||
subs, ok := coordinator.subscribers[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for _, s := range subs {
|
||||
s(agentId, hash, err)
|
||||
s(orgId, agentId, key, err)
|
||||
}
|
||||
|
||||
// delete all subscribers for this hash, assume future
|
||||
// delete all subscribers for this key, assume future
|
||||
// notifies will be disabled. the first response is processed
|
||||
delete(coordinator.subscribers, hash)
|
||||
delete(coordinator.subscribers, key)
|
||||
}
|
||||
|
||||
// callers subscribe to this function to listen on config change requests
|
||||
func ListenToConfigUpdate(agentId string, hash string, ss OnChangeCallback) {
|
||||
func ListenToConfigUpdate(orgId valuer.UUID, agentId string, hash string, ss OnChangeCallback) {
|
||||
coordinator.mutex.Lock()
|
||||
defer coordinator.mutex.Unlock()
|
||||
|
||||
if subs, ok := coordinator.subscribers[hash]; ok {
|
||||
key := getSubscriberKey(orgId, hash)
|
||||
if subs, ok := coordinator.subscribers[key]; ok {
|
||||
subs = append(subs, ss)
|
||||
coordinator.subscribers[hash] = subs
|
||||
coordinator.subscribers[key] = subs
|
||||
} else {
|
||||
coordinator.subscribers[hash] = []OnChangeCallback{ss}
|
||||
coordinator.subscribers[key] = []OnChangeCallback{ss}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,10 @@ package opamp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
model "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/open-telemetry/opamp-go/protobufs"
|
||||
"github.com/open-telemetry/opamp-go/server"
|
||||
"github.com/open-telemetry/opamp-go/server/types"
|
||||
@@ -53,6 +55,7 @@ func (srv *Server) Start(listener string) error {
|
||||
ListenEndpoint: listener,
|
||||
}
|
||||
|
||||
// This will have to send request to all the agents of all tenants
|
||||
unsubscribe := srv.agentConfigProvider.SubscribeToConfigUpdates(func() {
|
||||
err := srv.agents.RecommendLatestConfigToAll(srv.agentConfigProvider)
|
||||
if err != nil {
|
||||
@@ -78,21 +81,47 @@ func (srv *Server) onDisconnect(conn types.Connection) {
|
||||
srv.agents.RemoveConnection(conn)
|
||||
}
|
||||
|
||||
// When the agent sends the message for the first time, then we need to know the orgID
|
||||
// For the subsequent requests, agents don't send the attributes unless something is changed
|
||||
// but we keep them in context mapped which is mapped to the instanceID, so we would know the
|
||||
// orgID from the context
|
||||
// note :- there can only be 50 agents in the db for a given orgID, we don't have a check in-memory but we delete from the db after insert.
|
||||
func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer) *protobufs.ServerToAgent {
|
||||
agentID := msg.InstanceUid
|
||||
|
||||
agent, created, err := srv.agents.FindOrCreateAgent(agentID, conn)
|
||||
// find the orgID, if nothing is found keep it empty.
|
||||
// the find or create agent will return an error if orgID is empty
|
||||
// thus retry will happen
|
||||
var orgID valuer.UUID
|
||||
orgIDs, err := srv.agents.OrgGetter.ListByOwnedKeyRange(context.Background())
|
||||
if err == nil && len(orgIDs) == 1 {
|
||||
orgID = orgIDs[0].ID
|
||||
}
|
||||
|
||||
agent, created, err := srv.agents.FindOrCreateAgent(agentID, conn, orgID)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to find or create agent", zap.String("agentID", agentID), zap.Error(err))
|
||||
// TODO: handle error
|
||||
|
||||
// Return error response according to OpAMP protocol
|
||||
return &protobufs.ServerToAgent{
|
||||
InstanceUid: agentID,
|
||||
ErrorResponse: &protobufs.ServerErrorResponse{
|
||||
Type: protobufs.ServerErrorResponseType_ServerErrorResponseType_Unavailable,
|
||||
Details: &protobufs.ServerErrorResponse_RetryInfo{
|
||||
RetryInfo: &protobufs.RetryInfo{
|
||||
RetryAfterNanoseconds: uint64(5 * time.Second), // minimum recommended retry interval
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if created {
|
||||
agent.CanLB = model.ExtractLbFlag(msg.AgentDescription)
|
||||
zap.L().Debug(
|
||||
"New agent added", zap.Bool("canLb", agent.CanLB),
|
||||
zap.String("ID", agent.ID),
|
||||
zap.Any("status", agent.CurrentStatus),
|
||||
zap.String("agentID", agent.AgentID),
|
||||
zap.Any("status", agent.Status),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -119,6 +148,6 @@ func Ready() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func Subscribe(agentId string, hash string, f model.OnChangeCallback) {
|
||||
model.ListenToConfigUpdate(agentId, hash, f)
|
||||
func Subscribe(orgId valuer.UUID, agentId string, hash string, f model.OnChangeCallback) {
|
||||
model.ListenToConfigUpdate(orgId, agentId, hash, f)
|
||||
}
|
||||
|
||||
@@ -179,13 +179,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
s.privateHTTP = privateServer
|
||||
|
||||
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opAmpModel.InitDB(serverOptions.SigNoz.SQLStore, serverOptions.SigNoz.Instrumentation.Logger(), serverOptions.SigNoz.Modules.OrgGetter)
|
||||
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
Store: serverOptions.SigNoz.SQLStore,
|
||||
AgentFeatures: []agentConf.AgentFeature{
|
||||
logParsingPipelineController,
|
||||
},
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http/httptest"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
@@ -24,7 +25,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
||||
opampModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/queryBuilderToExpr"
|
||||
@@ -35,7 +36,9 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/knadh/koanf/parsers/yaml"
|
||||
@@ -47,7 +50,8 @@ import (
|
||||
)
|
||||
|
||||
func TestLogPipelinesLifecycle(t *testing.T) {
|
||||
testbed := NewLogPipelinesTestBed(t, nil)
|
||||
agentID := valuer.GenerateUUID().String()
|
||||
testbed := NewLogPipelinesTestBed(t, nil, agentID)
|
||||
require := require.New(t)
|
||||
|
||||
getPipelinesResp := testbed.GetPipelinesFromQS()
|
||||
@@ -134,19 +138,19 @@ func TestLogPipelinesLifecycle(t *testing.T) {
|
||||
"pipelines config history should not be empty after 1st configuration",
|
||||
)
|
||||
require.Equal(
|
||||
agentConf.DeployInitiated, getPipelinesResp.History[0].DeployStatus,
|
||||
opamptypes.DeployInitiated, getPipelinesResp.History[0].DeployStatus,
|
||||
"pipelines deployment should be in progress after 1st configuration",
|
||||
)
|
||||
|
||||
// Deployment status should get updated after acknowledgement from opamp client
|
||||
testbed.simulateOpampClientAcknowledgementForLatestConfig()
|
||||
testbed.simulateOpampClientAcknowledgementForLatestConfig(agentID)
|
||||
|
||||
getPipelinesResp = testbed.GetPipelinesFromQS()
|
||||
assertPipelinesResponseMatchesPostedPipelines(
|
||||
t, postablePipelines, getPipelinesResp,
|
||||
)
|
||||
require.Equal(
|
||||
agentConf.Deployed,
|
||||
opamptypes.Deployed,
|
||||
getPipelinesResp.History[0].DeployStatus,
|
||||
"pipeline deployment should be complete after acknowledgment from opamp client",
|
||||
)
|
||||
@@ -166,19 +170,19 @@ func TestLogPipelinesLifecycle(t *testing.T) {
|
||||
"there should be 2 history entries after posting pipelines config for the 2nd time",
|
||||
)
|
||||
require.Equal(
|
||||
agentConf.DeployInitiated, getPipelinesResp.History[0].DeployStatus,
|
||||
opamptypes.DeployInitiated, getPipelinesResp.History[0].DeployStatus,
|
||||
"deployment should be in progress for latest pipeline config",
|
||||
)
|
||||
|
||||
// Deployment status should get updated again on receiving msg from client.
|
||||
testbed.simulateOpampClientAcknowledgementForLatestConfig()
|
||||
testbed.simulateOpampClientAcknowledgementForLatestConfig(agentID)
|
||||
|
||||
getPipelinesResp = testbed.GetPipelinesFromQS()
|
||||
assertPipelinesResponseMatchesPostedPipelines(
|
||||
t, postablePipelines, getPipelinesResp,
|
||||
)
|
||||
require.Equal(
|
||||
agentConf.Deployed,
|
||||
opamptypes.Deployed,
|
||||
getPipelinesResp.History[0].DeployStatus,
|
||||
"deployment for latest pipeline config should be complete after acknowledgment from opamp client",
|
||||
)
|
||||
@@ -186,7 +190,8 @@ func TestLogPipelinesLifecycle(t *testing.T) {
|
||||
|
||||
func TestLogPipelinesHistory(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testbed := NewLogPipelinesTestBed(t, nil)
|
||||
agentID := valuer.GenerateUUID().String()
|
||||
testbed := NewLogPipelinesTestBed(t, nil, agentID)
|
||||
|
||||
// Only the latest config version can be "IN_PROGRESS",
|
||||
// other incomplete deployments should have status "UNKNOWN"
|
||||
@@ -232,7 +237,7 @@ func TestLogPipelinesHistory(t *testing.T) {
|
||||
testbed.PostPipelinesToQS(postablePipelines)
|
||||
getPipelinesResp = testbed.GetPipelinesFromQS()
|
||||
require.Equal(1, len(getPipelinesResp.History))
|
||||
require.Equal(agentConf.DeployInitiated, getPipelinesResp.History[0].DeployStatus)
|
||||
require.Equal(opamptypes.DeployInitiated, getPipelinesResp.History[0].DeployStatus)
|
||||
|
||||
postablePipelines.Pipelines[0].Config = append(
|
||||
postablePipelines.Pipelines[0].Config,
|
||||
@@ -251,8 +256,8 @@ func TestLogPipelinesHistory(t *testing.T) {
|
||||
getPipelinesResp = testbed.GetPipelinesFromQS()
|
||||
|
||||
require.Equal(2, len(getPipelinesResp.History))
|
||||
require.Equal(agentConf.DeployInitiated, getPipelinesResp.History[0].DeployStatus)
|
||||
require.Equal(agentConf.DeployStatusUnknown, getPipelinesResp.History[1].DeployStatus)
|
||||
require.Equal(opamptypes.DeployInitiated, getPipelinesResp.History[0].DeployStatus)
|
||||
require.Equal(opamptypes.DeployStatusUnknown, getPipelinesResp.History[1].DeployStatus)
|
||||
}
|
||||
|
||||
func TestLogPipelinesValidation(t *testing.T) {
|
||||
@@ -389,7 +394,8 @@ func TestLogPipelinesValidation(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
testbed := NewLogPipelinesTestBed(t, nil)
|
||||
agentID := valuer.GenerateUUID().String()
|
||||
testbed := NewLogPipelinesTestBed(t, nil, agentID)
|
||||
testbed.PostPipelinesToQSExpectingStatusCode(
|
||||
pipelinetypes.PostablePipelines{
|
||||
Pipelines: []pipelinetypes.PostablePipeline{tc.Pipeline},
|
||||
@@ -460,6 +466,7 @@ type LogPipelinesTestBed struct {
|
||||
agentConfMgr *agentConf.Manager
|
||||
opampServer *opamp.Server
|
||||
opampClientConn *opamp.MockOpAmpConnection
|
||||
store sqlstore.SQLStore
|
||||
userModule user.Module
|
||||
}
|
||||
|
||||
@@ -469,9 +476,6 @@ func NewTestbedWithoutOpamp(t *testing.T, sqlStore sqlstore.SQLStore) *LogPipeli
|
||||
sqlStore = utils.NewQueryServiceDBForTests(t)
|
||||
}
|
||||
|
||||
// create test org
|
||||
// utils.CreateTestOrg(t, sqlStore)
|
||||
|
||||
ic, err := integrations.NewController(sqlStore)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create integrations controller: %v", err)
|
||||
@@ -508,17 +512,17 @@ func NewTestbedWithoutOpamp(t *testing.T, sqlStore sqlstore.SQLStore) *LogPipeli
|
||||
t.Fatalf("could not create a new ApiHandler: %v", err)
|
||||
}
|
||||
|
||||
// organizationModule := implorganization.NewModule(implorganization.NewStore(store))
|
||||
user, apiErr := createTestUser(modules.OrgSetter, modules.User)
|
||||
if apiErr != nil {
|
||||
t.Fatalf("could not create a test user: %v", apiErr)
|
||||
}
|
||||
|
||||
// Mock an available opamp agent
|
||||
testDB, err := opampModel.InitDB(sqlStore.SQLxDB())
|
||||
require.Nil(t, err, "failed to init opamp model")
|
||||
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: testDB,
|
||||
Store: sqlStore,
|
||||
AgentFeatures: []agentConf.AgentFeature{
|
||||
apiHandler.LogsParsingPipelineController,
|
||||
}})
|
||||
@@ -529,15 +533,22 @@ func NewTestbedWithoutOpamp(t *testing.T, sqlStore sqlstore.SQLStore) *LogPipeli
|
||||
testUser: user,
|
||||
apiHandler: apiHandler,
|
||||
agentConfMgr: agentConfMgr,
|
||||
store: sqlStore,
|
||||
userModule: modules.User,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLogPipelinesTestBed(t *testing.T, testDB sqlstore.SQLStore) *LogPipelinesTestBed {
|
||||
func NewLogPipelinesTestBed(t *testing.T, testDB sqlstore.SQLStore, agentID string) *LogPipelinesTestBed {
|
||||
testbed := NewTestbedWithoutOpamp(t, testDB)
|
||||
|
||||
providerSettings := instrumentationtest.New().ToProviderSettings()
|
||||
sharder, err := noopsharder.New(context.TODO(), providerSettings, sharder.Config{})
|
||||
orgGetter := implorganization.NewGetter(implorganization.NewStore(testbed.store), sharder)
|
||||
|
||||
model.InitDB(testbed.store, slog.Default(), orgGetter)
|
||||
|
||||
opampServer := opamp.InitializeServer(nil, testbed.agentConfMgr)
|
||||
err := opampServer.Start(opamp.GetAvailableLocalAddress())
|
||||
err = opampServer.Start(opamp.GetAvailableLocalAddress())
|
||||
require.Nil(t, err, "failed to start opamp server")
|
||||
|
||||
t.Cleanup(func() {
|
||||
@@ -548,7 +559,7 @@ func NewLogPipelinesTestBed(t *testing.T, testDB sqlstore.SQLStore) *LogPipeline
|
||||
opampServer.OnMessage(
|
||||
opampClientConnection,
|
||||
&protobufs.AgentToServer{
|
||||
InstanceUid: "test",
|
||||
InstanceUid: agentID,
|
||||
EffectiveConfig: &protobufs.EffectiveConfig{
|
||||
ConfigMap: newInitialAgentConfigMap(),
|
||||
},
|
||||
@@ -743,10 +754,10 @@ func assertPipelinesRecommendedInRemoteConfig(
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *LogPipelinesTestBed) simulateOpampClientAcknowledgementForLatestConfig() {
|
||||
func (tb *LogPipelinesTestBed) simulateOpampClientAcknowledgementForLatestConfig(agentID string) {
|
||||
lastMsg := tb.opampClientConn.LatestMsgFromServer()
|
||||
tb.opampServer.OnMessage(tb.opampClientConn, &protobufs.AgentToServer{
|
||||
InstanceUid: "test",
|
||||
InstanceUid: agentID,
|
||||
EffectiveConfig: &protobufs.EffectiveConfig{
|
||||
ConfigMap: lastMsg.RemoteConfig.Config,
|
||||
},
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
mockhouse "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -42,6 +43,9 @@ func TestSignozIntegrationLifeCycle(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testbed := NewIntegrationsTestBed(t, nil)
|
||||
|
||||
merr := utils.CreateTestOrg(t, testbed.store)
|
||||
require.NoError(merr)
|
||||
|
||||
installedResp := testbed.GetInstalledIntegrationsFromQS()
|
||||
require.Equal(
|
||||
len(installedResp.Integrations), 0,
|
||||
@@ -125,8 +129,12 @@ func TestLogPipelinesForInstalledSignozIntegrations(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
utils.CreateTestOrg(t, testDB)
|
||||
|
||||
agentID := valuer.GenerateUUID().String()
|
||||
|
||||
integrationsTB := NewIntegrationsTestBed(t, testDB)
|
||||
pipelinesTB := NewLogPipelinesTestBed(t, testDB)
|
||||
pipelinesTB := NewLogPipelinesTestBed(t, testDB, agentID)
|
||||
|
||||
availableIntegrationsResp := integrationsTB.GetAvailableIntegrationsFromQS()
|
||||
availableIntegrations := availableIntegrationsResp.Integrations
|
||||
@@ -380,6 +388,7 @@ type IntegrationsTestBed struct {
|
||||
testUser *types.User
|
||||
qsHttpHandler http.Handler
|
||||
mockClickhouse mockhouse.ClickConnMockCommon
|
||||
store sqlstore.SQLStore
|
||||
userModule user.Module
|
||||
}
|
||||
|
||||
@@ -618,6 +627,7 @@ func NewIntegrationsTestBed(t *testing.T, testDB sqlstore.SQLStore) *Integration
|
||||
testUser: user,
|
||||
qsHttpHandler: router,
|
||||
mockClickhouse: mockClickhouse,
|
||||
store: testDB,
|
||||
userModule: modules.User,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/sqlmigrator"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlitesqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
@@ -69,6 +71,7 @@ func NewTestSqliteDB(t *testing.T) (sqlStore sqlstore.SQLStore, testDBFilePath s
|
||||
sqlmigration.NewUpdateApiMonitoringFiltersFactory(sqlStore),
|
||||
sqlmigration.NewAddKeyOrganizationFactory(sqlStore),
|
||||
sqlmigration.NewUpdateDashboardFactory(sqlStore),
|
||||
sqlmigration.NewUpdateAgentsFactory(sqlStore),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -87,3 +90,30 @@ func NewQueryServiceDBForTests(t *testing.T) sqlstore.SQLStore {
|
||||
sqlStore, _ := NewTestSqliteDB(t)
|
||||
return sqlStore
|
||||
}
|
||||
|
||||
func CreateTestOrg(t *testing.T, store sqlstore.SQLStore) error {
|
||||
org := &types.Organization{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
Name: "testOrg",
|
||||
}
|
||||
_, err := store.BunDB().NewInsert().Model(org).Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetTestOrgId(store sqlstore.SQLStore) (valuer.UUID, error) {
|
||||
var orgID valuer.UUID
|
||||
err := store.BunDB().NewSelect().
|
||||
Model(&types.Organization{}).
|
||||
Column("id").
|
||||
Limit(1).
|
||||
Scan(context.Background(), &orgID)
|
||||
if err != nil {
|
||||
return orgID, err
|
||||
}
|
||||
return orgID, nil
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func CollisionHandledFinalExpr(
|
||||
return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", nil, err
|
||||
return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
} else {
|
||||
for _, key := range keysForField {
|
||||
|
||||
@@ -46,7 +46,7 @@ func (b *defaultConditionBuilder) ConditionFor(
|
||||
) (string, error) {
|
||||
|
||||
if key.FieldContext != telemetrytypes.FieldContextResource {
|
||||
return "", nil
|
||||
return "true", nil
|
||||
}
|
||||
|
||||
column, err := b.fm.ColumnFor(ctx, key)
|
||||
|
||||
@@ -22,7 +22,7 @@ type filterExpressionVisitor struct {
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
warnings []string
|
||||
fieldKeys map[string][]*telemetrytypes.TelemetryFieldKey
|
||||
errors []error
|
||||
errors []string
|
||||
builder *sqlbuilder.SelectBuilder
|
||||
fullTextColumn *telemetrytypes.TelemetryFieldKey
|
||||
jsonBodyPrefix string
|
||||
@@ -90,11 +90,14 @@ func PrepareWhereClause(query string, opts FilterExprVisitorOpts) (*sqlbuilder.W
|
||||
combinedErrors := errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"found %d syntax errors while parsing the filter expression: %v",
|
||||
"found %d syntax errors while parsing the filter expression",
|
||||
len(parserErrorListener.SyntaxErrors),
|
||||
parserErrorListener.SyntaxErrors,
|
||||
)
|
||||
return nil, nil, combinedErrors
|
||||
additionals := make([]string, len(parserErrorListener.SyntaxErrors))
|
||||
for _, err := range parserErrorListener.SyntaxErrors {
|
||||
additionals = append(additionals, err.Error())
|
||||
}
|
||||
return nil, nil, combinedErrors.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
// Visit the parse tree with our ClickHouse visitor
|
||||
@@ -105,11 +108,10 @@ func PrepareWhereClause(query string, opts FilterExprVisitorOpts) (*sqlbuilder.W
|
||||
combinedErrors := errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"found %d errors while parsing the search expression: %v",
|
||||
"found %d errors while parsing the search expression",
|
||||
len(visitor.errors),
|
||||
visitor.errors,
|
||||
)
|
||||
return nil, nil, combinedErrors
|
||||
return nil, nil, combinedErrors.WithAdditional(visitor.errors...)
|
||||
}
|
||||
|
||||
whereClause := sqlbuilder.NewWhereClause().AddWhereExpr(visitor.builder.Args, cond)
|
||||
@@ -234,15 +236,11 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
// Handle standalone key/value as a full text search term
|
||||
if ctx.GetChildCount() == 1 {
|
||||
if v.skipFullTextFilter {
|
||||
return ""
|
||||
return "true"
|
||||
}
|
||||
|
||||
if v.fullTextColumn == nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"full text search is not supported",
|
||||
))
|
||||
v.errors = append(v.errors, "full text search is not supported")
|
||||
return ""
|
||||
}
|
||||
child := ctx.GetChild(0)
|
||||
@@ -251,7 +249,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
keyText := keyCtx.GetText()
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, keyText, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -266,12 +264,12 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
} else if valCtx.KEY() != nil {
|
||||
text = valCtx.KEY().GetText()
|
||||
} else {
|
||||
v.errors = append(v.errors, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "unsupported value type: %s", valCtx.GetText()))
|
||||
v.errors = append(v.errors, fmt.Sprintf("unsupported value type: %s", valCtx.GetText()))
|
||||
return ""
|
||||
}
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, text, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -419,7 +417,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, value, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
conds = append(conds, condition)
|
||||
@@ -459,7 +457,7 @@ func (v *filterExpressionVisitor) VisitValueList(ctx *grammar.ValueListContext)
|
||||
func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) any {
|
||||
|
||||
if v.skipFullTextFilter {
|
||||
return ""
|
||||
return "true"
|
||||
}
|
||||
|
||||
var text string
|
||||
@@ -471,16 +469,12 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
|
||||
}
|
||||
|
||||
if v.fullTextColumn == nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"full text search is not supported",
|
||||
))
|
||||
v.errors = append(v.errors, "full text search is not supported")
|
||||
return ""
|
||||
}
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, text, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -498,34 +492,19 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
|
||||
functionName = "hasAll"
|
||||
} else {
|
||||
// Default fallback
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"unknown function `%s`",
|
||||
ctx.GetText(),
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("unknown function `%s`", ctx.GetText()))
|
||||
return ""
|
||||
}
|
||||
params := v.Visit(ctx.FunctionParamList()).([]any)
|
||||
|
||||
if len(params) < 2 {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` expects key and value parameters",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` expects key and value parameters", functionName))
|
||||
return ""
|
||||
}
|
||||
|
||||
keys, ok := params[0].([]*telemetrytypes.TelemetryFieldKey)
|
||||
if !ok {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` expects key parameter to be a field key",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` expects key parameter to be a field key", functionName))
|
||||
return ""
|
||||
}
|
||||
value := params[1:]
|
||||
@@ -536,12 +515,7 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
|
||||
if strings.HasPrefix(key.Name, v.jsonBodyPrefix) {
|
||||
fieldName, _ = v.jsonKeyToKey(context.Background(), key, qbtypes.FilterOperatorUnknown, value)
|
||||
} else {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` supports only body JSON search",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` supports only body JSON search", functionName))
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -603,12 +577,7 @@ func (v *filterExpressionVisitor) VisitValue(ctx *grammar.ValueContext) any {
|
||||
} else if ctx.NUMBER() != nil {
|
||||
number, err := strconv.ParseFloat(ctx.NUMBER().GetText(), 64)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"failed to parse number %s",
|
||||
ctx.NUMBER().GetText(),
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to parse number %s", ctx.NUMBER().GetText()))
|
||||
return ""
|
||||
}
|
||||
return number
|
||||
@@ -648,19 +617,11 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
|
||||
|
||||
if len(fieldKeysForName) == 0 {
|
||||
if strings.HasPrefix(fieldKey.Name, v.jsonBodyPrefix) && v.jsonBodyPrefix != "" && keyName == "" {
|
||||
v.errors = append(v.errors, errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"missing key for body json search - expected key of the form `body.key` (ex: `body.status`)",
|
||||
))
|
||||
v.errors = append(v.errors, "missing key for body json search - expected key of the form `body.key` (ex: `body.status`)")
|
||||
} else {
|
||||
// TODO(srikanthccv): do we want to return an error here?
|
||||
// should we infer the type and auto-magically build a key for expression?
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"key `%s` not found",
|
||||
fieldKey.Name,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("key `%s` not found", fieldKey.Name))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -110,6 +110,7 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedM
|
||||
sqlmigration.NewUpdateDashboardFactory(sqlstore),
|
||||
sqlmigration.NewDropFeatureSetFactory(),
|
||||
sqlmigration.NewDropDeprecatedTablesFactory(),
|
||||
sqlmigration.NewUpdateAgentsFactory(sqlstore),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
323
pkg/sqlmigration/041_update_agents.go
Normal file
323
pkg/sqlmigration/041_update_agents.go
Normal file
@@ -0,0 +1,323 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type updateAgents struct {
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
type existingAgent41 struct {
|
||||
bun.BaseModel `bun:"table:agents"`
|
||||
AgentID string `bun:"agent_id,pk,type:text,unique"`
|
||||
StartedAt time.Time `bun:"started_at,notnull"`
|
||||
TerminatedAt time.Time `bun:"terminated_at"`
|
||||
CurrentStatus opamptypes.AgentStatus `bun:"current_status,type:text,notnull"`
|
||||
EffectiveConfig string `bun:"effective_config,type:text,notnull"`
|
||||
}
|
||||
|
||||
type newAgent41 struct {
|
||||
bun.BaseModel `bun:"table:agent"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
// AgentID is needed as the ID from opamp client is ULID and not UUID, so we are keeping it like this
|
||||
AgentID string `json:"agentId" yaml:"agentId" bun:"agent_id,type:text,notnull,unique"`
|
||||
OrgID string `json:"orgId" yaml:"orgId" bun:"org_id,type:text,notnull"`
|
||||
TerminatedAt time.Time `json:"terminatedAt" yaml:"terminatedAt" bun:"terminated_at"`
|
||||
Status opamptypes.AgentStatus `json:"currentStatus" yaml:"currentStatus" bun:"status,type:text,notnull"`
|
||||
Config string `bun:"config,type:text,notnull"`
|
||||
}
|
||||
|
||||
type existingAgentConfigVersions41 struct {
|
||||
bun.BaseModel `bun:"table:agent_config_versions"`
|
||||
ID string `bun:"id,pk,type:text"`
|
||||
CreatedBy string `bun:"created_by,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,default:CURRENT_TIMESTAMP"`
|
||||
UpdatedBy string `bun:"updated_by,type:text"`
|
||||
UpdatedAt time.Time `bun:"updated_at,default:CURRENT_TIMESTAMP"`
|
||||
Version int `bun:"version,default:1,unique:element_version_idx"`
|
||||
Active int `bun:"active"`
|
||||
IsValid int `bun:"is_valid"`
|
||||
Disabled int `bun:"disabled"`
|
||||
ElementType opamptypes.ElementType `bun:"element_type,notnull,type:varchar(120),unique:element_version_idx"`
|
||||
DeployStatus opamptypes.DeployStatus `bun:"deploy_status,notnull,type:varchar(80),default:'DIRTY'"`
|
||||
DeploySequence int `bun:"deploy_sequence"`
|
||||
DeployResult string `bun:"deploy_result,type:text"`
|
||||
LastHash string `bun:"last_hash,type:text"`
|
||||
LastConfig string `bun:"last_config,type:text"`
|
||||
}
|
||||
|
||||
type newAgentConfigVersion41 struct {
|
||||
bun.BaseModel `bun:"table:agent_config_version"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
OrgID string `json:"orgId" bun:"org_id,type:text,notnull,unique:element_version_org_idx"`
|
||||
Version int `json:"version" bun:"version,unique:element_version_org_idx"`
|
||||
ElementType opamptypes.ElementType `json:"elementType" bun:"element_type,type:text,notnull,unique:element_version_org_idx"`
|
||||
DeployStatus opamptypes.DeployStatus `json:"deployStatus" bun:"deploy_status,type:text,notnull,default:'dirty'"`
|
||||
DeploySequence int `json:"deploySequence" bun:"deploy_sequence"`
|
||||
DeployResult string `json:"deployResult" bun:"deploy_result,type:text"`
|
||||
Hash string `json:"lastHash" bun:"hash,type:text"`
|
||||
Config string `json:"config" bun:"config,type:text"`
|
||||
}
|
||||
|
||||
type existingAgentConfigElement41 struct {
|
||||
bun.BaseModel `bun:"table:agent_config_elements"`
|
||||
|
||||
ID string `bun:"id,pk,type:text"`
|
||||
CreatedBy string `bun:"created_by,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,default:CURRENT_TIMESTAMP"`
|
||||
UpdatedBy string `bun:"updated_by,type:text"`
|
||||
UpdatedAt time.Time `bun:"updated_at,default:CURRENT_TIMESTAMP"`
|
||||
ElementID string `bun:"element_id,type:text,notnull,unique:agent_config_elements_u1"`
|
||||
ElementType string `bun:"element_type,type:varchar(120),notnull,unique:agent_config_elements_u1"`
|
||||
VersionID string `bun:"version_id,type:text,notnull,unique:agent_config_elements_u1"`
|
||||
}
|
||||
|
||||
type newAgentConfigElement41 struct {
|
||||
bun.BaseModel `bun:"table:agent_config_element"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
ElementID string `bun:"element_id,type:text,notnull,unique:element_type_version_idx"`
|
||||
ElementType string `bun:"element_type,type:text,notnull,unique:element_type_version_idx"`
|
||||
VersionID string `bun:"version_id,type:text,notnull,unique:element_type_version_idx"`
|
||||
}
|
||||
|
||||
func NewUpdateAgentsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("update_agents"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return newUpdateAgents(ctx, ps, c, sqlstore)
|
||||
})
|
||||
}
|
||||
|
||||
func newUpdateAgents(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
|
||||
return &updateAgents{
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (migration *updateAgents) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *updateAgents) Up(ctx context.Context, db *bun.DB) error {
|
||||
|
||||
// begin transaction
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
// get all org ids
|
||||
var orgIDs []string
|
||||
if err := tx.NewSelect().Model(new(types.Organization)).Column("id").Scan(ctx, &orgIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// there are multiple orgs, so we don't need to update the agents table
|
||||
if len(orgIDs) > 1 {
|
||||
return errors.Newf(errors.TypeInternal, errors.CodeInternal, "multiple orgs found: %v", orgIDs)
|
||||
}
|
||||
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingAgent41), new(newAgent41), []string{OrgReference}, func(ctx context.Context) error {
|
||||
existingAgents := make([]*existingAgent41, 0)
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingAgents).
|
||||
Scan(ctx)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == nil && len(existingAgents) > 0 {
|
||||
newAgents, err := migration.
|
||||
CopyOldAgentToNewAgent(ctx, tx, existingAgents, orgIDs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newAgents).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingAgentConfigVersions41), new(newAgentConfigVersion41), []string{OrgReference}, func(ctx context.Context) error {
|
||||
existingAgentConfigVersions := make([]*existingAgentConfigVersions41, 0)
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingAgentConfigVersions).
|
||||
Scan(ctx)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == nil && len(existingAgentConfigVersions) > 0 {
|
||||
newAgentConfigVersions, err := migration.
|
||||
CopyOldAgentConfigVersionToNewAgentConfigVersion(ctx, tx, existingAgentConfigVersions, orgIDs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newAgentConfigVersions).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingAgentConfigElement41), new(newAgentConfigElement41), []string{AgentConfigVersionReference}, func(ctx context.Context) error {
|
||||
existingAgentConfigElements := make([]*existingAgentConfigElement41, 0)
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingAgentConfigElements).
|
||||
Scan(ctx)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == nil && len(existingAgentConfigElements) > 0 {
|
||||
newAgentConfigElements, err := migration.
|
||||
CopyOldAgentConfigElementToNewAgentConfigElement(ctx, tx, existingAgentConfigElements, orgIDs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newAgentConfigElements).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *updateAgents) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *updateAgents) CopyOldAgentToNewAgent(ctx context.Context, tx bun.IDB, existingAgents []*existingAgent41, orgID string) ([]*newAgent41, error) {
|
||||
newAgents := make([]*newAgent41, 0)
|
||||
for _, existingAgent := range existingAgents {
|
||||
newAgents = append(newAgents, &newAgent41{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
AgentID: existingAgent.AgentID,
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Unix(existingAgent.StartedAt.Unix(), 0),
|
||||
UpdatedAt: time.Unix(existingAgent.StartedAt.Unix(), 0),
|
||||
},
|
||||
Status: existingAgent.CurrentStatus,
|
||||
Config: existingAgent.EffectiveConfig,
|
||||
TerminatedAt: existingAgent.TerminatedAt,
|
||||
OrgID: orgID,
|
||||
})
|
||||
}
|
||||
return newAgents, nil
|
||||
}
|
||||
|
||||
func (migration *updateAgents) CopyOldAgentConfigVersionToNewAgentConfigVersion(ctx context.Context, tx bun.IDB, existingAgentConfigVersions []*existingAgentConfigVersions41, orgID string) ([]*newAgentConfigVersion41, error) {
|
||||
newAgentConfigVersions := make([]*newAgentConfigVersion41, 0)
|
||||
for _, existingAgentConfigVersion := range existingAgentConfigVersions {
|
||||
versionID, err := valuer.NewUUID(existingAgentConfigVersion.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newAgentConfigVersions = append(newAgentConfigVersions, &newAgentConfigVersion41{
|
||||
Identifiable: types.Identifiable{ID: versionID},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Unix(existingAgentConfigVersion.CreatedAt.Unix(), 0),
|
||||
UpdatedAt: time.Unix(existingAgentConfigVersion.UpdatedAt.Unix(), 0),
|
||||
},
|
||||
UserAuditable: types.UserAuditable{
|
||||
CreatedBy: existingAgentConfigVersion.CreatedBy,
|
||||
UpdatedBy: existingAgentConfigVersion.UpdatedBy,
|
||||
},
|
||||
OrgID: orgID,
|
||||
Version: existingAgentConfigVersion.Version,
|
||||
ElementType: existingAgentConfigVersion.ElementType,
|
||||
DeployStatus: existingAgentConfigVersion.DeployStatus,
|
||||
DeploySequence: existingAgentConfigVersion.DeploySequence,
|
||||
DeployResult: existingAgentConfigVersion.DeployResult,
|
||||
Hash: orgID + existingAgentConfigVersion.LastHash,
|
||||
Config: existingAgentConfigVersion.LastConfig,
|
||||
})
|
||||
}
|
||||
return newAgentConfigVersions, nil
|
||||
}
|
||||
|
||||
func (migration *updateAgents) CopyOldAgentConfigElementToNewAgentConfigElement(ctx context.Context, tx bun.IDB, existingAgentConfigElements []*existingAgentConfigElement41, orgID string) ([]*newAgentConfigElement41, error) {
|
||||
newAgentConfigElements := make([]*newAgentConfigElement41, 0)
|
||||
for _, existingAgentConfigElement := range existingAgentConfigElements {
|
||||
elementID, err := valuer.NewUUID(existingAgentConfigElement.ElementID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newAgentConfigElements = append(newAgentConfigElements, &newAgentConfigElement41{
|
||||
Identifiable: types.Identifiable{ID: elementID},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Unix(existingAgentConfigElement.CreatedAt.Unix(), 0),
|
||||
UpdatedAt: time.Unix(existingAgentConfigElement.UpdatedAt.Unix(), 0),
|
||||
},
|
||||
VersionID: existingAgentConfigElement.VersionID,
|
||||
ElementID: existingAgentConfigElement.ElementID,
|
||||
ElementType: existingAgentConfigElement.ElementType,
|
||||
})
|
||||
}
|
||||
return newAgentConfigElements, nil
|
||||
}
|
||||
@@ -25,11 +25,12 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = "org"
|
||||
UserReference = "user"
|
||||
UserReferenceNoCascade = "user_no_cascade"
|
||||
FactorPasswordReference = "factor_password"
|
||||
CloudIntegrationReference = "cloud_integration"
|
||||
OrgReference = "org"
|
||||
UserReference = "user"
|
||||
UserReferenceNoCascade = "user_no_cascade"
|
||||
FactorPasswordReference = "factor_password"
|
||||
CloudIntegrationReference = "cloud_integration"
|
||||
AgentConfigVersionReference = "agent_config_version"
|
||||
)
|
||||
|
||||
func New(
|
||||
|
||||
@@ -18,19 +18,21 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
Org string = "org"
|
||||
User string = "user"
|
||||
UserNoCascade string = "user_no_cascade"
|
||||
FactorPassword string = "factor_password"
|
||||
CloudIntegration string = "cloud_integration"
|
||||
Org string = "org"
|
||||
User string = "user"
|
||||
UserNoCascade string = "user_no_cascade"
|
||||
FactorPassword string = "factor_password"
|
||||
CloudIntegration string = "cloud_integration"
|
||||
AgentConfigVersion string = "agent_config_version"
|
||||
)
|
||||
|
||||
const (
|
||||
OrgReference string = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference string = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
UserNoCascadeReference string = `("user_id") REFERENCES "users" ("id")`
|
||||
FactorPasswordReference string = `("password_id") REFERENCES "factor_password" ("id")`
|
||||
CloudIntegrationReference string = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
OrgReference string = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference string = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
UserNoCascadeReference string = `("user_id") REFERENCES "users" ("id")`
|
||||
FactorPasswordReference string = `("password_id") REFERENCES "factor_password" ("id")`
|
||||
CloudIntegrationReference string = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
AgentConfigVersionReference string = `("version_id") REFERENCES "agent_config_version" ("id")`
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -269,6 +271,8 @@ func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.I
|
||||
fkReferences = append(fkReferences, FactorPasswordReference)
|
||||
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
|
||||
fkReferences = append(fkReferences, CloudIntegrationReference)
|
||||
} else if reference == AgentConfigVersion && !slices.Contains(fkReferences, AgentConfigVersionReference) {
|
||||
fkReferences = append(fkReferences, AgentConfigVersionReference)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -53,10 +53,6 @@ func (dialect *dialect) AddPrimaryKey(ctx context.Context, bun bun.IDB, oldModel
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) IndexExists(ctx context.Context, bun bun.IDB, table string, index string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) DropColumnWithForeignKeyConstraint(ctx context.Context, bun bun.IDB, model interface{}, column string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -173,7 +173,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -186,7 +186,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,10 @@ package telemetrylogs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
@@ -2315,7 +2317,15 @@ func TestFilterExprLogs(t *testing.T) {
|
||||
require.Equal(t, tc.expectedArgs, args)
|
||||
} else {
|
||||
require.Error(t, err, "Expected error for query: %s", tc.query)
|
||||
require.Contains(t, err.Error(), tc.expectedErrorContains)
|
||||
_, _, _, _, _, a := errors.Unwrapb(err)
|
||||
contains := false
|
||||
for _, warn := range a {
|
||||
if strings.Contains(warn, tc.expectedErrorContains) {
|
||||
contains = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, contains)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
@@ -14,10 +13,6 @@ import (
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnsupportedAggregation = errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported aggregation")
|
||||
)
|
||||
|
||||
type logQueryStatementBuilder struct {
|
||||
logger *slog.Logger
|
||||
metadataStore telemetrytypes.MetadataStore
|
||||
@@ -165,12 +160,18 @@ func (b *logQueryStatementBuilder) buildListQuery(
|
||||
|
||||
// Add order by
|
||||
for _, orderBy := range query.Order {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction))
|
||||
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
|
||||
}
|
||||
|
||||
// Add limit and offset
|
||||
if query.Limit > 0 {
|
||||
sb.Limit(query.Limit)
|
||||
} else {
|
||||
sb.Limit(100)
|
||||
}
|
||||
|
||||
if query.Offset > 0 {
|
||||
@@ -381,9 +382,9 @@ func (b *logQueryStatementBuilder) buildScalarQuery(
|
||||
for _, orderBy := range query.Order {
|
||||
idx, ok := aggOrderBy(orderBy, query)
|
||||
if ok {
|
||||
sb.OrderBy(fmt.Sprintf("__result_%d %s", idx, orderBy.Direction))
|
||||
sb.OrderBy(fmt.Sprintf("__result_%d %s", idx, orderBy.Direction.StringValue()))
|
||||
} else {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction))
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -420,19 +421,25 @@ func (b *logQueryStatementBuilder) addFilterCondition(
|
||||
keys map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) ([]string, error) {
|
||||
|
||||
// add filter expression
|
||||
filterWhereClause, warnings, err := querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
|
||||
FieldMapper: b.fm,
|
||||
ConditionBuilder: b.cb,
|
||||
FieldKeys: keys,
|
||||
SkipResourceFilter: true,
|
||||
FullTextColumn: b.fullTextColumn,
|
||||
JsonBodyPrefix: b.jsonBodyPrefix,
|
||||
JsonKeyToKey: b.jsonKeyToKey,
|
||||
})
|
||||
var filterWhereClause *sqlbuilder.WhereClause
|
||||
var warnings []string
|
||||
var err error
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if query.Filter != nil && query.Filter.Expression != "" {
|
||||
// add filter expression
|
||||
filterWhereClause, warnings, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
|
||||
FieldMapper: b.fm,
|
||||
ConditionBuilder: b.cb,
|
||||
FieldKeys: keys,
|
||||
SkipResourceFilter: true,
|
||||
FullTextColumn: b.fullTextColumn,
|
||||
JsonBodyPrefix: b.jsonBodyPrefix,
|
||||
JsonKeyToKey: b.jsonKeyToKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if filterWhereClause != nil {
|
||||
|
||||
@@ -70,6 +70,45 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "test",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
Aggregations: []qbtypes.LogAggregation{
|
||||
{
|
||||
Expression: "count()",
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
Order: []qbtypes.OrderBy{
|
||||
{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY ALL ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) IN (SELECT `service.name` FROM __limit_cte) GROUP BY ALL",
|
||||
Args: []any{"cartservice", "%service.name%", "%service.name%cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
fm := NewFieldMapper()
|
||||
|
||||
@@ -95,7 +95,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -108,7 +108,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
@@ -54,8 +55,10 @@ func NewTelemetryMetaStore(
|
||||
relatedMetadataDBName string,
|
||||
relatedMetadataTblName string,
|
||||
) telemetrytypes.MetadataStore {
|
||||
metadataSettings := factory.NewScopedProviderSettings(settings, "github.com/SigNoz/signoz/pkg/telemetrymetadata")
|
||||
|
||||
t := &telemetryMetaStore{
|
||||
logger: metadataSettings.Logger(),
|
||||
telemetrystore: telemetrystore,
|
||||
tracesDBName: tracesDBName,
|
||||
tracesFieldsTblName: tracesFieldsTblName,
|
||||
@@ -879,3 +882,90 @@ func (t *telemetryMetaStore) GetAllValues(ctx context.Context, fieldValueSelecto
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) FetchTemporality(ctx context.Context, metricName string) (metrictypes.Temporality, error) {
|
||||
if metricName == "" {
|
||||
return metrictypes.Unknown, errors.Newf(errors.TypeInternal, errors.CodeInternal, "metric name cannot be empty")
|
||||
}
|
||||
|
||||
temporalityMap, err := t.FetchTemporalityMulti(ctx, metricName)
|
||||
if err != nil {
|
||||
return metrictypes.Unknown, err
|
||||
}
|
||||
|
||||
temporality, ok := temporalityMap[metricName]
|
||||
if !ok {
|
||||
return metrictypes.Unknown, nil
|
||||
}
|
||||
|
||||
return temporality, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) FetchTemporalityMulti(ctx context.Context, metricNames ...string) (map[string]metrictypes.Temporality, error) {
|
||||
if len(metricNames) == 0 {
|
||||
return make(map[string]metrictypes.Temporality), nil
|
||||
}
|
||||
|
||||
result := make(map[string]metrictypes.Temporality)
|
||||
|
||||
// Build query to fetch temporality for all metrics
|
||||
// We use attr_string_value where attr_name = '__temporality__'
|
||||
// Note: The columns are mixed in the current data - temporality column contains metric_name
|
||||
// and metric_name column contains temporality value, so we use the correct mapping
|
||||
sb := sqlbuilder.Select(
|
||||
"temporality as metric_name",
|
||||
"argMax(attr_string_value, last_reported_unix_milli) as temporality_value",
|
||||
).From(t.metricsDBName + "." + t.metricsFieldsTblName)
|
||||
|
||||
// Filter by metric names (in the temporality column due to data mix-up)
|
||||
sb.Where(sb.In("temporality", metricNames))
|
||||
|
||||
// Only fetch temporality metadata rows (where attr_name = '__temporality__')
|
||||
sb.Where(sb.E("attr_name", "__temporality__"))
|
||||
|
||||
// Group by metric name to get one temporality per metric
|
||||
sb.GroupBy("temporality")
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
t.logger.DebugContext(ctx, "fetching metric temporality", "query", query, "args", args)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to fetch metric temporality")
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Process results
|
||||
for rows.Next() {
|
||||
var metricName, temporalityStr string
|
||||
if err := rows.Scan(&metricName, &temporalityStr); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to scan temporality result")
|
||||
}
|
||||
|
||||
// Convert string to Temporality type
|
||||
var temporality metrictypes.Temporality
|
||||
switch temporalityStr {
|
||||
case "Delta":
|
||||
temporality = metrictypes.Delta
|
||||
case "Cumulative":
|
||||
temporality = metrictypes.Cumulative
|
||||
case "Unspecified":
|
||||
temporality = metrictypes.Unspecified
|
||||
default:
|
||||
// Unknown or empty temporality
|
||||
temporality = metrictypes.Unknown
|
||||
}
|
||||
|
||||
result[metricName] = temporality
|
||||
}
|
||||
|
||||
// For metrics not found in the database, set to Unknown
|
||||
for _, metricName := range metricNames {
|
||||
if _, exists := result[metricName]; !exists {
|
||||
result[metricName] = metrictypes.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -305,7 +305,7 @@ func (b *metricQueryStatementBuilder) buildTimeSeriesCTE(
|
||||
sb.LTE("unix_milli", end),
|
||||
)
|
||||
|
||||
if query.Aggregations[0].Temporality != metrictypes.Unspecified {
|
||||
if query.Aggregations[0].Temporality != metrictypes.Unknown {
|
||||
sb.Where(sb.ILike("temporality", query.Aggregations[0].Temporality.StringValue()))
|
||||
}
|
||||
|
||||
|
||||
@@ -147,8 +147,8 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY ALL) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ALL ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ALL) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983448000), false, "big-data-node-1", "system.memory.usage", uint64(1747947419000), uint64(1747983448000), 0},
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY ALL) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ALL ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ALL) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983448000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947419000), uint64(1747983448000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
|
||||
@@ -250,7 +250,7 @@ func (m *defaultFieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -263,7 +263,7 @@ func (m *defaultFieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -179,12 +179,18 @@ func (b *traceQueryStatementBuilder) buildListQuery(
|
||||
|
||||
// Add order by
|
||||
for _, orderBy := range query.Order {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
|
||||
}
|
||||
|
||||
// Add limit and offset
|
||||
if query.Limit > 0 {
|
||||
sb.Limit(query.Limit)
|
||||
} else {
|
||||
sb.Limit(100)
|
||||
}
|
||||
|
||||
if query.Offset > 0 {
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type Agent struct {
|
||||
bun.BaseModel `bun:"table:agents"`
|
||||
AgentID string `bun:"agent_id,pk,type:text"`
|
||||
StartedAt time.Time `bun:"started_at,type:datetime,notnull"`
|
||||
TerminatedAt time.Time `bun:"terminated_at,type:datetime"`
|
||||
CurrentStatus string `bun:"current_status,type:text,notnull"`
|
||||
EffectiveConfig string `bun:"effective_config,type:text,notnull"`
|
||||
}
|
||||
|
||||
type AgentConfigVersion struct {
|
||||
bun.BaseModel `bun:"table:agent_config_versions"`
|
||||
|
||||
ID string `bun:"id,pk,type:text"`
|
||||
CreatedBy string `bun:"created_by,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,default:CURRENT_TIMESTAMP"`
|
||||
UpdatedBy string `bun:"updated_by,type:text"`
|
||||
UpdatedAt time.Time `bun:"updated_at,default:CURRENT_TIMESTAMP"`
|
||||
Version int `bun:"version,default:1,unique:element_version_idx"`
|
||||
Active int `bun:"active"`
|
||||
IsValid int `bun:"is_valid"`
|
||||
Disabled int `bun:"disabled"`
|
||||
ElementType string `bun:"element_type,notnull,type:varchar(120),unique:element_version_idx"`
|
||||
DeployStatus string `bun:"deploy_status,notnull,type:varchar(80),default:'DIRTY'"`
|
||||
DeploySequence int `bun:"deploy_sequence"`
|
||||
DeployResult string `bun:"deploy_result,type:text"`
|
||||
LastHash string `bun:"last_hash,type:text"`
|
||||
LastConfig string `bun:"last_config,type:text"`
|
||||
}
|
||||
|
||||
type AgentConfigElement struct {
|
||||
bun.BaseModel `bun:"table:agent_config_elements"`
|
||||
|
||||
ID string `bun:"id,pk,type:text"`
|
||||
CreatedBy string `bun:"created_by,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,default:CURRENT_TIMESTAMP"`
|
||||
UpdatedBy string `bun:"updated_by,type:text"`
|
||||
UpdatedAt time.Time `bun:"updated_at,default:CURRENT_TIMESTAMP"`
|
||||
ElementID string `bun:"element_id,type:text,notnull,unique:agent_config_elements_u1"`
|
||||
ElementType string `bun:"element_type,type:varchar(120),notnull,unique:agent_config_elements_u1"`
|
||||
VersionID string `bun:"version_id,type:text,notnull,unique:agent_config_elements_u1"`
|
||||
}
|
||||
@@ -8,6 +8,6 @@ type TimeAuditable struct {
|
||||
}
|
||||
|
||||
type UserAuditable struct {
|
||||
CreatedBy string `bun:"created_by" json:"createdBy"`
|
||||
UpdatedBy string `bun:"updated_by" json:"updatedBy"`
|
||||
CreatedBy string `bun:"created_by,type:text" json:"createdBy"`
|
||||
UpdatedBy string `bun:"updated_by,type:text" json:"updatedBy"`
|
||||
}
|
||||
|
||||
@@ -13,7 +13,8 @@ type Temporality struct {
|
||||
var (
|
||||
Delta = Temporality{valuer.NewString("delta")}
|
||||
Cumulative = Temporality{valuer.NewString("cumulative")}
|
||||
Unspecified = Temporality{valuer.NewString("")}
|
||||
Unspecified = Temporality{valuer.NewString("unspecified")}
|
||||
Unknown = Temporality{valuer.NewString("")}
|
||||
)
|
||||
|
||||
// Type is the type of the metric in OTLP data model
|
||||
|
||||
129
pkg/types/opamptypes/agent.go
Normal file
129
pkg/types/opamptypes/agent.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package opamptypes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type AgentStatus int
|
||||
|
||||
const (
|
||||
AgentStatusUnknown AgentStatus = iota
|
||||
AgentStatusConnected
|
||||
AgentStatusDisconnected
|
||||
)
|
||||
|
||||
type StorableAgent struct {
|
||||
bun.BaseModel `bun:"table:agent"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
// AgentID is needed as the ID from opamp client is ULID and not UUID, so we are keeping it like this
|
||||
AgentID string `json:"agentId" yaml:"agentId" bun:"agent_id,type:text,notnull,unique"`
|
||||
OrgID valuer.UUID `json:"orgId" yaml:"orgId" bun:"org_id,type:text,notnull"`
|
||||
TerminatedAt time.Time `json:"terminatedAt" yaml:"terminatedAt" bun:"terminated_at"`
|
||||
Status AgentStatus `json:"currentStatus" yaml:"currentStatus" bun:"status,type:text,notnull"`
|
||||
Config string `bun:"config,type:text,notnull"`
|
||||
}
|
||||
|
||||
func NewStorableAgent(store sqlstore.SQLStore, orgID valuer.UUID, agentID string, status AgentStatus) StorableAgent {
|
||||
return StorableAgent{
|
||||
OrgID: orgID,
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
AgentID: agentID,
|
||||
TimeAuditable: types.TimeAuditable{CreatedAt: time.Now(), UpdatedAt: time.Now()},
|
||||
Status: status,
|
||||
}
|
||||
}
|
||||
|
||||
type ElementType struct{ valuer.String }
|
||||
|
||||
var (
|
||||
ElementTypeSamplingRules = ElementType{valuer.NewString("sampling_rules")}
|
||||
ElementTypeDropRules = ElementType{valuer.NewString("drop_rules")}
|
||||
ElementTypeLogPipelines = ElementType{valuer.NewString("log_pipelines")}
|
||||
ElementTypeLbExporter = ElementType{valuer.NewString("lb_exporter")}
|
||||
)
|
||||
|
||||
// NewElementType creates a new ElementType from a string value.
|
||||
// Returns the corresponding ElementType constant if the string matches,
|
||||
// otherwise returns an empty ElementType.
|
||||
func NewElementType(value string) ElementType {
|
||||
switch valuer.NewString(value) {
|
||||
case ElementTypeSamplingRules.String:
|
||||
return ElementTypeSamplingRules
|
||||
case ElementTypeDropRules.String:
|
||||
return ElementTypeDropRules
|
||||
case ElementTypeLogPipelines.String:
|
||||
return ElementTypeLogPipelines
|
||||
case ElementTypeLbExporter.String:
|
||||
return ElementTypeLbExporter
|
||||
default:
|
||||
return ElementType{valuer.NewString("")}
|
||||
}
|
||||
}
|
||||
|
||||
type DeployStatus struct{ valuer.String }
|
||||
|
||||
var (
|
||||
PendingDeploy = DeployStatus{valuer.NewString("dirty")}
|
||||
Deploying = DeployStatus{valuer.NewString("deploying")}
|
||||
Deployed = DeployStatus{valuer.NewString("deployed")}
|
||||
DeployInitiated = DeployStatus{valuer.NewString("in_progress")}
|
||||
DeployFailed = DeployStatus{valuer.NewString("failed")}
|
||||
DeployStatusUnknown = DeployStatus{valuer.NewString("unknown")}
|
||||
)
|
||||
|
||||
type AgentConfigVersion struct {
|
||||
bun.BaseModel `bun:"table:agent_config_version,alias:acv"`
|
||||
|
||||
// this is only for reading
|
||||
// keeping it here since we query the actual data from users table
|
||||
CreatedByName string `json:"createdByName" bun:"created_by_name,scanonly"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
OrgID valuer.UUID `json:"orgId" bun:"org_id,type:text,notnull,unique:element_version_org_idx"`
|
||||
Version int `json:"version" bun:"version,unique:element_version_org_idx"`
|
||||
ElementType ElementType `json:"elementType" bun:"element_type,type:text,notnull,unique:element_version_org_idx"`
|
||||
DeployStatus DeployStatus `json:"deployStatus" bun:"deploy_status,type:text,notnull,default:'dirty'"`
|
||||
DeploySequence int `json:"deploySequence" bun:"deploy_sequence"`
|
||||
DeployResult string `json:"deployResult" bun:"deploy_result,type:text"`
|
||||
Hash string `json:"lastHash" bun:"hash,type:text"`
|
||||
Config string `json:"config" bun:"config,type:text"`
|
||||
}
|
||||
|
||||
func NewAgentConfigVersion(orgId valuer.UUID, userId valuer.UUID, elementType ElementType) *AgentConfigVersion {
|
||||
return &AgentConfigVersion{
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
UserAuditable: types.UserAuditable{CreatedBy: userId.String(), UpdatedBy: userId.String()},
|
||||
OrgID: orgId,
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
ElementType: elementType,
|
||||
DeployStatus: PendingDeploy,
|
||||
Hash: "",
|
||||
Config: "{}",
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AgentConfigVersion) IncrementVersion(lastVersion int) {
|
||||
a.Version = lastVersion + 1
|
||||
}
|
||||
|
||||
type AgentConfigElement struct {
|
||||
bun.BaseModel `bun:"table:agent_config_element"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
ElementID string `bun:"element_id,type:text,notnull,unique:element_type_version_idx"`
|
||||
ElementType string `bun:"element_type,type:text,notnull,unique:element_type_version_idx"`
|
||||
VersionID valuer.UUID `bun:"version_id,type:text,notnull,unique:element_type_version_idx"`
|
||||
}
|
||||
@@ -328,6 +328,8 @@ type MetricAggregation struct {
|
||||
TableHints *metrictypes.MetricTableHints `json:"-"`
|
||||
// value filter to apply to the query
|
||||
ValueFilter *metrictypes.MetricValueFilter `json:"-"`
|
||||
// reduce to operator for metric scalar requests
|
||||
ReduceTo ReduceTo `json:"reduceTo,omitempty"`
|
||||
}
|
||||
|
||||
type Filter struct {
|
||||
@@ -379,7 +381,7 @@ type FunctionArg struct {
|
||||
// name of the argument
|
||||
Name string `json:"name,omitempty"`
|
||||
// value of the argument
|
||||
Value string `json:"value"`
|
||||
Value any `json:"value"`
|
||||
}
|
||||
|
||||
type Function struct {
|
||||
|
||||
@@ -55,4 +55,25 @@ type QueryBuilderQuery[T any] struct {
|
||||
|
||||
// functions to apply to the query
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
|
||||
// ShiftBy is extracted from timeShift function for internal use
|
||||
// This field is not serialized to JSON
|
||||
ShiftBy int64 `json:"-"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (q *QueryBuilderQuery[T]) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
type Alias QueryBuilderQuery[T]
|
||||
|
||||
var temp Alias
|
||||
// Use UnmarshalJSONWithContext for better error messages
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "query spec"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*q = QueryBuilderQuery[T](temp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,10 +20,30 @@ type QueryBuilderFormula struct {
|
||||
// expression to apply to the query
|
||||
Expression string `json:"expression"`
|
||||
|
||||
// order by keys and directions
|
||||
Order []OrderBy `json:"order,omitempty"`
|
||||
|
||||
// limit the maximum number of rows to return
|
||||
Limit int `json:"limit,omitempty"`
|
||||
|
||||
// having clause to apply to the formula result
|
||||
Having *Having `json:"having,omitempty"`
|
||||
|
||||
// functions to apply to the formula result
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (f *QueryBuilderFormula) UnmarshalJSON(data []byte) error {
|
||||
type Alias QueryBuilderFormula
|
||||
var temp Alias
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "formula spec"); err != nil {
|
||||
return err
|
||||
}
|
||||
*f = QueryBuilderFormula(temp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// small container to store the query name and index or alias reference
|
||||
// for a variable in the formula expression
|
||||
// read below for more details on aggregation references
|
||||
|
||||
@@ -93,9 +93,20 @@ func ApplyFunction(fn Function, result *TimeSeries) *TimeSeries {
|
||||
return result
|
||||
}
|
||||
|
||||
// parseFloat64Arg parses a string argument to float64
|
||||
func parseFloat64Arg(value string) (float64, error) {
|
||||
return strconv.ParseFloat(value, 64)
|
||||
// parseFloat64Arg parses an argument to float64
|
||||
func parseFloat64Arg(value any) (float64, error) {
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
return v, nil
|
||||
case int64:
|
||||
return float64(v), nil
|
||||
case int:
|
||||
return float64(v), nil
|
||||
case string:
|
||||
return strconv.ParseFloat(v, 64)
|
||||
default:
|
||||
return 0, strconv.ErrSyntax
|
||||
}
|
||||
}
|
||||
|
||||
// getEWMAAlpha calculates the alpha value for EWMA functions
|
||||
|
||||
163
pkg/types/querybuildertypes/querybuildertypesv5/json_decoder.go
Normal file
163
pkg/types/querybuildertypes/querybuildertypesv5/json_decoder.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// UnmarshalJSONWithSuggestions unmarshals JSON data into the target struct
|
||||
// and provides field name suggestions for unknown fields
|
||||
func UnmarshalJSONWithSuggestions(data []byte, target any) error {
|
||||
return UnmarshalJSONWithContext(data, target, "")
|
||||
}
|
||||
|
||||
// UnmarshalJSONWithContext unmarshals JSON with context information for better error messages
|
||||
func UnmarshalJSONWithContext(data []byte, target any, context string) error {
|
||||
// First, try to unmarshal with DisallowUnknownFields to catch unknown fields
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
dec.DisallowUnknownFields()
|
||||
|
||||
err := dec.Decode(target)
|
||||
if err == nil {
|
||||
// No error, successful unmarshal
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if it's an unknown field error
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
// Extract the unknown field name
|
||||
unknownField := extractUnknownField(err.Error())
|
||||
if unknownField != "" {
|
||||
// Get valid field names from the target struct
|
||||
validFields := getJSONFieldNames(target)
|
||||
|
||||
// Build error message with context
|
||||
errorMsg := "unknown field %q"
|
||||
if context != "" {
|
||||
errorMsg = "unknown field %q in " + context
|
||||
}
|
||||
|
||||
// Find closest match with max distance of 3 (reasonable for typos)
|
||||
if suggestion, found := telemetrytypes.SuggestCorrection(unknownField, validFields); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
errorMsg,
|
||||
unknownField,
|
||||
).WithAdditional(
|
||||
suggestion,
|
||||
)
|
||||
}
|
||||
|
||||
// No good suggestion found
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
errorMsg,
|
||||
unknownField,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(validFields, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the original error if it's not an unknown field error
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid JSON: %v", err)
|
||||
}
|
||||
|
||||
// extractUnknownField extracts the field name from an unknown field error message
|
||||
func extractUnknownField(errMsg string) string {
|
||||
// The error message format is: json: unknown field "fieldname"
|
||||
parts := strings.Split(errMsg, `"`)
|
||||
if len(parts) >= 2 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getJSONFieldNames extracts all JSON field names from a struct
|
||||
func getJSONFieldNames(v any) []string {
|
||||
var fields []string
|
||||
|
||||
t := reflect.TypeOf(v)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Struct {
|
||||
return fields
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
jsonTag := field.Tag.Get("json")
|
||||
|
||||
if jsonTag == "" || jsonTag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract the field name from the JSON tag
|
||||
fieldName := strings.Split(jsonTag, ",")[0]
|
||||
if fieldName != "" {
|
||||
fields = append(fields, fieldName)
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// wrapUnmarshalError wraps UnmarshalJSONWithContext errors with appropriate context
|
||||
// It preserves errors that already have additional context or unknown field errors
|
||||
func wrapUnmarshalError(err error, errorFormat string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wrap with the provided error format
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
errorFormat,
|
||||
args...,
|
||||
)
|
||||
}
|
||||
|
||||
// wrapValidationError rewraps validation errors with context while preserving additional hints
|
||||
// It extracts the inner message from the error and creates a new error with the provided format
|
||||
// The innerMsg is automatically appended to the args for formatting
|
||||
func wrapValidationError(err error, contextIdentifier string, errorFormat string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with the provided format
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
errorFormat,
|
||||
contextIdentifier,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
@@ -12,6 +12,7 @@ var (
|
||||
QueryTypeFormula = QueryType{valuer.NewString("builder_formula")}
|
||||
QueryTypeSubQuery = QueryType{valuer.NewString("builder_sub_query")}
|
||||
QueryTypeJoin = QueryType{valuer.NewString("builder_join")}
|
||||
QueryTypeTraceOperator = QueryType{valuer.NewString("builder_trace_operator")}
|
||||
QueryTypeClickHouseSQL = QueryType{valuer.NewString("clickhouse_sql")}
|
||||
QueryTypePromQL = QueryType{valuer.NewString("promql")}
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@ package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
@@ -17,12 +18,11 @@ type QueryEnvelope struct {
|
||||
// implement custom json unmarshaler for the QueryEnvelope
|
||||
func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
var shadow struct {
|
||||
Name string `json:"name"`
|
||||
Type QueryType `json:"type"`
|
||||
Spec json.RawMessage `json:"spec"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &shadow); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid query envelope")
|
||||
if err := UnmarshalJSONWithSuggestions(data, &shadow); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.Type = shadow.Type
|
||||
@@ -34,62 +34,85 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
Signal telemetrytypes.Signal `json:"signal"`
|
||||
}
|
||||
if err := json.Unmarshal(shadow.Spec, &header); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "cannot detect builder signal")
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"cannot detect builder signal: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
switch header.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
var spec QueryBuilderQuery[TraceAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid trace builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid trace builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalLogs:
|
||||
var spec QueryBuilderQuery[LogAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid log builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid log builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalMetrics:
|
||||
var spec QueryBuilderQuery[MetricAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid metric builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid metric builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
default:
|
||||
return errors.WrapInvalidInputf(nil, errors.CodeInvalidInput, "unknown builder signal %q", header.Signal)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown builder signal %q",
|
||||
header.Signal,
|
||||
).WithAdditional(
|
||||
"Valid signals are: traces, logs, metrics",
|
||||
)
|
||||
}
|
||||
|
||||
case QueryTypeFormula:
|
||||
var spec QueryBuilderFormula
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid formula spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "formula spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid formula spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypeJoin:
|
||||
var spec QueryBuilderJoin
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "join spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid join spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypeTraceOperator:
|
||||
var spec QueryBuilderTraceOperator
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid join spec")
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid trace operator spec")
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypePromQL:
|
||||
var spec PromQuery
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid PromQL spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "PromQL spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid PromQL spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypeClickHouseSQL:
|
||||
var spec ClickHouseQuery
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid ClickHouse SQL spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "ClickHouse SQL spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid ClickHouse SQL spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
default:
|
||||
return errors.WrapInvalidInputf(nil, errors.CodeInvalidInput, "unknown query type %q", shadow.Type)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type %q",
|
||||
shadow.Type,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_sub_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -100,6 +123,59 @@ type CompositeQuery struct {
|
||||
Queries []QueryEnvelope `json:"queries"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to provide better error messages
|
||||
func (c *CompositeQuery) UnmarshalJSON(data []byte) error {
|
||||
type Alias CompositeQuery
|
||||
|
||||
// First do a normal unmarshal without DisallowUnknownFields
|
||||
var temp Alias
|
||||
if err := json.Unmarshal(data, &temp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then check for unknown fields at this level only
|
||||
var check map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &check); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for unknown fields at this level
|
||||
validFields := map[string]bool{
|
||||
"queries": true,
|
||||
}
|
||||
|
||||
for field := range check {
|
||||
if !validFields[field] {
|
||||
// Find closest match
|
||||
var fieldNames []string
|
||||
for f := range validFields {
|
||||
fieldNames = append(fieldNames, f)
|
||||
}
|
||||
|
||||
if suggestion, found := telemetrytypes.SuggestCorrection(field, fieldNames); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q in composite query",
|
||||
field,
|
||||
).WithAdditional(
|
||||
suggestion,
|
||||
)
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q in composite query",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(fieldNames, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
*c = CompositeQuery(temp)
|
||||
return nil
|
||||
}
|
||||
|
||||
type QueryRangeRequest struct {
|
||||
// SchemaVersion is the version of the schema to use for the request payload.
|
||||
SchemaVersion string `json:"schemaVersion"`
|
||||
@@ -120,6 +196,69 @@ type QueryRangeRequest struct {
|
||||
FormatOptions *FormatOptions `json:"formatOptions,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (r *QueryRangeRequest) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
type Alias QueryRangeRequest
|
||||
|
||||
// First do a normal unmarshal without DisallowUnknownFields to let nested structures handle their own validation
|
||||
var temp Alias
|
||||
if err := json.Unmarshal(data, &temp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then check for unknown fields at this level only
|
||||
var check map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &check); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for unknown fields at the top level
|
||||
validFields := map[string]bool{
|
||||
"schemaVersion": true,
|
||||
"start": true,
|
||||
"end": true,
|
||||
"requestType": true,
|
||||
"compositeQuery": true,
|
||||
"variables": true,
|
||||
"noCache": true,
|
||||
"formatOptions": true,
|
||||
}
|
||||
|
||||
for field := range check {
|
||||
if !validFields[field] {
|
||||
// Find closest match
|
||||
var fieldNames []string
|
||||
for f := range validFields {
|
||||
fieldNames = append(fieldNames, f)
|
||||
}
|
||||
|
||||
if suggestion, found := telemetrytypes.SuggestCorrection(field, fieldNames); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q",
|
||||
field,
|
||||
).WithAdditional(
|
||||
suggestion,
|
||||
)
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(fieldNames, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*r = QueryRangeRequest(temp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type FormatOptions struct {
|
||||
FillGaps bool `json:"fillGaps,omitempty"`
|
||||
FormatTableResultForUI bool `json:"formatTableResultForUI,omitempty"`
|
||||
|
||||
@@ -0,0 +1,150 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQueryRangeRequest_UnmarshalJSON_ErrorMessages(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonData string
|
||||
wantErrMsg string
|
||||
wantAdditionalHints []string
|
||||
}{
|
||||
{
|
||||
name: "unknown field 'function' in query spec",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "spans_count"
|
||||
}],
|
||||
"function": [{
|
||||
"name": "absolute",
|
||||
"args": []
|
||||
}]
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "function" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"did you mean: 'functions'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field 'filters' in query spec",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "metrics",
|
||||
"aggregations": [{
|
||||
"metricName": "test"
|
||||
}],
|
||||
"filters": {
|
||||
"expression": "test = 1"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "filters" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"did you mean: 'filter'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field at top level",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQueries": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "compositeQueries"`,
|
||||
wantAdditionalHints: []string{
|
||||
"did you mean: 'compositeQuery'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field with no good suggestion",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "metrics",
|
||||
"aggregations": [{
|
||||
"metricName": "test"
|
||||
}],
|
||||
"randomField": "value"
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "randomField" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"Valid fields are:",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var req QueryRangeRequest
|
||||
err := json.Unmarshal([]byte(tt.jsonData), &req)
|
||||
|
||||
require.Error(t, err)
|
||||
|
||||
// Check main error message
|
||||
assert.Contains(t, err.Error(), tt.wantErrMsg)
|
||||
|
||||
// Check if it's an error from our package using Unwrapb
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Check additional hints if we have any
|
||||
if len(additionals) > 0 {
|
||||
for _, hint := range tt.wantAdditionalHints {
|
||||
found := false
|
||||
for _, additional := range additionals {
|
||||
if strings.Contains(additional, hint) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Expected to find hint '%s' in additionals: %v", hint, additionals)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -102,6 +102,341 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid trace operator query with simple expression",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1640995200000,
|
||||
"end": 1640998800000,
|
||||
"requestType": "time_series",
|
||||
"compositeQuery": {
|
||||
"queries": [
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"filter": {
|
||||
"expression": "service.name = 'checkoutservice'"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "builder_trace_operator",
|
||||
"spec": {
|
||||
"name": "trace_flow_analysis",
|
||||
"expression": "A => B",
|
||||
"filter": {
|
||||
"expression": "trace_duration > 200ms AND span_count >= 5"
|
||||
},
|
||||
"orderBy": [{
|
||||
"key": {
|
||||
"name": "trace_duration"
|
||||
},
|
||||
"direction": "desc"
|
||||
}],
|
||||
"limit": 100,
|
||||
"cursor": "eyJsYXN0X3RyYWNlX2lkIjoiYWJjZGVmIn0="
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"variables": {
|
||||
"service": "frontend"
|
||||
}
|
||||
}`,
|
||||
expected: QueryRangeRequest{
|
||||
SchemaVersion: "v1",
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &Filter{
|
||||
Expression: "service.name = 'checkoutservice'",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "trace_flow_analysis",
|
||||
Expression: "A => B",
|
||||
Filter: &Filter{
|
||||
Expression: "trace_duration > 200ms AND span_count >= 5",
|
||||
},
|
||||
Order: []OrderBy{{
|
||||
Key: OrderByKey{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "trace_duration"}},
|
||||
Direction: OrderDirectionDesc,
|
||||
}},
|
||||
Limit: 100,
|
||||
Cursor: "eyJsYXN0X3RyYWNlX2lkIjoiYWJjZGVmIn0=",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Variables: map[string]any{
|
||||
"service": "frontend",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
|
||||
{
|
||||
name: "valid trace operator with complex expression and span_count ordering",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1640995200000,
|
||||
"end": 1640998800000,
|
||||
"requestType": "time_series",
|
||||
"compositeQuery": {
|
||||
"queries": [
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"filter": { "expression": "service.name = 'frontend'" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "B",
|
||||
"signal": "traces",
|
||||
"filter": { "expression": "hasError = true" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "C",
|
||||
"signal": "traces",
|
||||
"filter": { "expression": "response_status_code = '200'" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "builder_trace_operator",
|
||||
"spec": {
|
||||
"name": "complex_trace_analysis",
|
||||
"expression": "A => (B && NOT C)",
|
||||
"filter": { "expression": "trace_duration BETWEEN 100ms AND 5s AND span_count IN (5, 10, 15)" },
|
||||
"orderBy": [{
|
||||
"key": { "name": "span_count" },
|
||||
"direction": "asc"
|
||||
}],
|
||||
"limit": 50,
|
||||
"functions": [{ "name": "absolute", "args": [] }]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`,
|
||||
expected: QueryRangeRequest{
|
||||
SchemaVersion: "v1",
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &Filter{Expression: "service.name = 'frontend'"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &Filter{Expression: "hasError = true"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "C",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &Filter{Expression: "response_status_code = '200'"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "complex_trace_analysis",
|
||||
Expression: "A => (B && NOT C)",
|
||||
Filter: &Filter{Expression: "trace_duration BETWEEN 100ms AND 5s AND span_count IN (5, 10, 15)"},
|
||||
Order: []OrderBy{{
|
||||
Key: OrderByKey{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: OrderBySpanCount.StringValue()}},
|
||||
Direction: OrderDirectionAsc,
|
||||
}},
|
||||
Limit: 50,
|
||||
Functions: []Function{{Name: FunctionNameAbsolute, Args: []FunctionArg{}}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid trace operator with NOT expression",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1640995200000,
|
||||
"end": 1640998800000,
|
||||
"requestType": "time_series",
|
||||
"compositeQuery": {
|
||||
"queries": [
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"filter": {
|
||||
"expression": "service.name = 'frontend'"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "builder_trace_operator",
|
||||
"spec": {
|
||||
"name": "not_trace_analysis",
|
||||
"expression": "NOT A",
|
||||
"filter": {
|
||||
"expression": "trace_duration < 1s"
|
||||
},
|
||||
"disabled": false
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`,
|
||||
expected: QueryRangeRequest{
|
||||
SchemaVersion: "v1",
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &Filter{
|
||||
Expression: "service.name = 'frontend'",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "not_trace_analysis",
|
||||
Expression: "NOT A",
|
||||
Filter: &Filter{
|
||||
Expression: "trace_duration < 1s",
|
||||
},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "trace operator with binary NOT (exclusion)",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1640995200000,
|
||||
"end": 1640998800000,
|
||||
"requestType": "time_series",
|
||||
"compositeQuery": {
|
||||
"queries": [
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"filter": {
|
||||
"expression": "service.name = 'frontend'"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "B",
|
||||
"signal": "traces",
|
||||
"filter": {
|
||||
"expression": "hasError = true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "builder_trace_operator",
|
||||
"spec": {
|
||||
"name": "exclusion_analysis",
|
||||
"expression": "A NOT B",
|
||||
"filter": {
|
||||
"expression": "span_count > 3"
|
||||
},
|
||||
"limit": 75
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`,
|
||||
expected: QueryRangeRequest{
|
||||
SchemaVersion: "v1",
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &Filter{
|
||||
Expression: "service.name = 'frontend'",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &Filter{
|
||||
Expression: "hasError = true",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "exclusion_analysis",
|
||||
Expression: "A NOT B",
|
||||
Filter: &Filter{
|
||||
Expression: "span_count > 3",
|
||||
},
|
||||
Limit: 75,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid log builder query",
|
||||
jsonData: `{
|
||||
@@ -120,8 +455,8 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
"expression": "severity_text = 'ERROR'"
|
||||
},
|
||||
"selectFields": [{
|
||||
"key": "body",
|
||||
"type": "log"
|
||||
"name": "body",
|
||||
"fieldContext": "log"
|
||||
}],
|
||||
"limit": 50,
|
||||
"offset": 10
|
||||
@@ -177,8 +512,8 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
}],
|
||||
"stepInterval": 120,
|
||||
"groupBy": [{
|
||||
"key": "method",
|
||||
"type": "tag"
|
||||
"name": "method",
|
||||
"fieldContext": "attribute"
|
||||
}]
|
||||
}
|
||||
}]
|
||||
@@ -270,7 +605,7 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
"name": "error_rate",
|
||||
"expression": "A / B * 100",
|
||||
"functions": [{
|
||||
"name": "cut_off_min",
|
||||
"name": "cutOffMin",
|
||||
"args": [{
|
||||
"value": "0.3"
|
||||
}]
|
||||
@@ -436,10 +771,9 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "B",
|
||||
"type": "builder_formula",
|
||||
"spec": {
|
||||
"name": "rate",
|
||||
"name": "B",
|
||||
"expression": "A * 100"
|
||||
}
|
||||
}
|
||||
@@ -465,7 +799,7 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
{
|
||||
Type: QueryTypeFormula,
|
||||
Spec: QueryBuilderFormula{
|
||||
Name: "rate",
|
||||
Name: "B",
|
||||
Expression: "A * 100",
|
||||
},
|
||||
},
|
||||
@@ -526,7 +860,6 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
"requestType": "time_series",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"name": "A",
|
||||
"type": "unknown_type",
|
||||
"spec": {}
|
||||
}]
|
||||
@@ -543,9 +876,9 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
"requestType": "time_series",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"name": "A",
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "unknown_signal",
|
||||
"aggregations": []
|
||||
}
|
||||
@@ -563,9 +896,9 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
"requestType": "time_series",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"name": "A",
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"aggregations": [],
|
||||
"stepInterval": "invalid_duration"
|
||||
@@ -650,6 +983,21 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
assert.Equal(t, expectedSpec.Right.Name, actualSpec.Right.Name)
|
||||
assert.Equal(t, expectedSpec.Type, actualSpec.Type)
|
||||
assert.Equal(t, expectedSpec.On, actualSpec.On)
|
||||
case QueryTypeTraceOperator:
|
||||
expectedSpec := expectedQuery.Spec.(QueryBuilderTraceOperator)
|
||||
actualSpec, ok := actualQuery.Spec.(QueryBuilderTraceOperator)
|
||||
require.True(t, ok, "Expected QueryBuilderTraceOperator but got %T", actualQuery.Spec)
|
||||
assert.Equal(t, expectedSpec.Name, actualSpec.Name)
|
||||
assert.Equal(t, expectedSpec.Expression, actualSpec.Expression)
|
||||
assert.Equal(t, expectedSpec.Limit, actualSpec.Limit)
|
||||
assert.Equal(t, expectedSpec.Cursor, actualSpec.Cursor)
|
||||
assert.Equal(t, len(expectedSpec.Order), len(actualSpec.Order))
|
||||
for i, expectedOrder := range expectedSpec.Order {
|
||||
if i < len(actualSpec.Order) {
|
||||
assert.Equal(t, expectedOrder.Key.Name, actualSpec.Order[i].Key.Name)
|
||||
assert.Equal(t, expectedOrder.Direction, actualSpec.Order[i].Direction)
|
||||
}
|
||||
}
|
||||
case QueryTypePromQL:
|
||||
expectedSpec := expectedQuery.Spec.(PromQuery)
|
||||
actualSpec, ok := actualQuery.Spec.(PromQuery)
|
||||
@@ -673,3 +1021,507 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTraceExpression(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expression string
|
||||
expectError bool
|
||||
checkResult func(t *testing.T, result *TraceOperand)
|
||||
}{
|
||||
{
|
||||
name: "simple query reference",
|
||||
expression: "A",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.QueryRef)
|
||||
assert.Equal(t, "A", result.QueryRef.Name)
|
||||
assert.Nil(t, result.Operator)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple implication",
|
||||
expression: "A => B",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorDirectDescendant, *result.Operator)
|
||||
assert.NotNil(t, result.Left)
|
||||
assert.NotNil(t, result.Right)
|
||||
assert.Equal(t, "A", result.Left.QueryRef.Name)
|
||||
assert.Equal(t, "B", result.Right.QueryRef.Name)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "and operation",
|
||||
expression: "A && B",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorAnd, *result.Operator)
|
||||
assert.Equal(t, "A", result.Left.QueryRef.Name)
|
||||
assert.Equal(t, "B", result.Right.QueryRef.Name)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "or operation",
|
||||
expression: "A || B",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorOr, *result.Operator)
|
||||
assert.Equal(t, "A", result.Left.QueryRef.Name)
|
||||
assert.Equal(t, "B", result.Right.QueryRef.Name)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unary NOT operation",
|
||||
expression: "NOT A",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorNot, *result.Operator)
|
||||
assert.NotNil(t, result.Left)
|
||||
assert.Nil(t, result.Right)
|
||||
assert.Equal(t, "A", result.Left.QueryRef.Name)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "binary NOT operation",
|
||||
expression: "A NOT B",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorExclude, *result.Operator)
|
||||
assert.NotNil(t, result.Left)
|
||||
assert.NotNil(t, result.Right)
|
||||
assert.Equal(t, "A", result.Left.QueryRef.Name)
|
||||
assert.Equal(t, "B", result.Right.QueryRef.Name)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "complex expression with precedence",
|
||||
expression: "A => B && C || D",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
// Should parse as: A => (B && (C || D)) due to precedence: NOT > || > && > =>
|
||||
// The parsing finds operators from lowest precedence first
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorDirectDescendant, *result.Operator)
|
||||
assert.Equal(t, "A", result.Left.QueryRef.Name)
|
||||
|
||||
// Right side should be an AND operation (next lowest precedence after =>)
|
||||
assert.NotNil(t, result.Right.Operator)
|
||||
assert.Equal(t, TraceOperatorAnd, *result.Right.Operator)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple parentheses",
|
||||
expression: "(A)",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.QueryRef)
|
||||
assert.Equal(t, "A", result.QueryRef.Name)
|
||||
assert.Nil(t, result.Operator)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "parentheses expression",
|
||||
expression: "A => (B || C)",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorDirectDescendant, *result.Operator)
|
||||
assert.Equal(t, "A", result.Left.QueryRef.Name)
|
||||
|
||||
// Right side should be an OR operation
|
||||
assert.NotNil(t, result.Right.Operator)
|
||||
assert.Equal(t, TraceOperatorOr, *result.Right.Operator)
|
||||
assert.Equal(t, "B", result.Right.Left.QueryRef.Name)
|
||||
assert.Equal(t, "C", result.Right.Right.QueryRef.Name)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nested NOT with parentheses",
|
||||
expression: "NOT (A && B)",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorNot, *result.Operator)
|
||||
assert.Nil(t, result.Right) // Unary operator
|
||||
|
||||
// Left side should be an AND operation
|
||||
assert.NotNil(t, result.Left.Operator)
|
||||
assert.Equal(t, TraceOperatorAnd, *result.Left.Operator)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid query reference with numbers",
|
||||
expression: "123",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid query reference with special chars",
|
||||
expression: "A-B",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty expression",
|
||||
expression: "",
|
||||
expectError: true,
|
||||
},
|
||||
|
||||
{
|
||||
name: "expression with extra whitespace",
|
||||
expression: " A => B ",
|
||||
expectError: false,
|
||||
checkResult: func(t *testing.T, result *TraceOperand) {
|
||||
assert.NotNil(t, result.Operator)
|
||||
assert.Equal(t, TraceOperatorDirectDescendant, *result.Operator)
|
||||
assert.Equal(t, "A", result.Left.QueryRef.Name)
|
||||
assert.Equal(t, "B", result.Right.QueryRef.Name)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := parseTraceExpression(tt.expression)
|
||||
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, result)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
if tt.checkResult != nil {
|
||||
tt.checkResult(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryBuilderTraceOperator_ValidateTraceOperator(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
traceOperator QueryBuilderTraceOperator
|
||||
queries []QueryEnvelope
|
||||
expectError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "valid trace operator with trace queries",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A => B",
|
||||
Filter: &Filter{
|
||||
Expression: "trace_duration > 200ms",
|
||||
},
|
||||
Order: []OrderBy{{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: OrderByTraceDuration.StringValue(),
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionDesc,
|
||||
}},
|
||||
Limit: 100,
|
||||
},
|
||||
queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "empty expression",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "",
|
||||
},
|
||||
queries: []QueryEnvelope{},
|
||||
expectError: true,
|
||||
errorContains: "expression cannot be empty",
|
||||
},
|
||||
{
|
||||
name: "referenced query does not exist",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A => B",
|
||||
},
|
||||
queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorContains: "query 'B' referenced in trace operator expression does not exist or is not a trace query",
|
||||
},
|
||||
{
|
||||
name: "referenced query is not trace signal",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A => B",
|
||||
},
|
||||
queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[LogAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorContains: "query 'B' referenced in trace operator expression does not exist or is not a trace query",
|
||||
},
|
||||
{
|
||||
name: "invalid orderBy field",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A",
|
||||
Order: []OrderBy{{
|
||||
Key: OrderByKey{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "invalid_string"}},
|
||||
Direction: OrderDirectionDesc,
|
||||
}},
|
||||
},
|
||||
queries: []QueryEnvelope{{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{Name: "A", Signal: telemetrytypes.SignalTraces},
|
||||
}},
|
||||
expectError: true,
|
||||
errorContains: "orderBy[0] field must be either 'span_count' or 'trace_duration'",
|
||||
},
|
||||
{
|
||||
name: "invalid pagination limit",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A",
|
||||
Limit: -1,
|
||||
},
|
||||
queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorContains: "limit must be non-negative",
|
||||
},
|
||||
{
|
||||
name: "limit exceeds maximum",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A",
|
||||
Limit: 15000,
|
||||
},
|
||||
queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorContains: "limit cannot exceed 10000",
|
||||
},
|
||||
{
|
||||
name: "valid returnSpansFrom",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A => B",
|
||||
ReturnSpansFrom: "A",
|
||||
},
|
||||
queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "returnSpansFrom references non-existent query",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A => B",
|
||||
ReturnSpansFrom: "C",
|
||||
},
|
||||
queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorContains: "returnSpansFrom references query 'C' which does not exist or is not a trace query",
|
||||
},
|
||||
{
|
||||
name: "returnSpansFrom references query not in expression",
|
||||
traceOperator: QueryBuilderTraceOperator{
|
||||
Name: "test_operator",
|
||||
Expression: "A => B",
|
||||
ReturnSpansFrom: "C",
|
||||
},
|
||||
queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "C",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorContains: "returnSpansFrom references query 'C' which is not used in the expression",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.traceOperator.ValidateTraceOperator(tt.queries)
|
||||
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
if tt.errorContains != "" {
|
||||
assert.Contains(t, err.Error(), tt.errorContains)
|
||||
}
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateUniqueTraceOperator(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queries []QueryEnvelope
|
||||
expectError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "no trace operators",
|
||||
queries: []QueryEnvelope{
|
||||
{Type: QueryTypeBuilder},
|
||||
{Type: QueryTypeFormula},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "single trace operator",
|
||||
queries: []QueryEnvelope{
|
||||
{Type: QueryTypeBuilder},
|
||||
{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "T1",
|
||||
},
|
||||
},
|
||||
{Type: QueryTypeFormula},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple trace operators",
|
||||
queries: []QueryEnvelope{
|
||||
{Type: QueryTypeBuilder},
|
||||
{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "T1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "T2",
|
||||
},
|
||||
},
|
||||
{Type: QueryTypeFormula},
|
||||
},
|
||||
expectError: true,
|
||||
errorContains: "only one trace operator is allowed per request, found 2 trace operators: [T1 T2]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := ValidateUniqueTraceOperator(tt.queries)
|
||||
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
if tt.errorContains != "" {
|
||||
assert.Contains(t, err.Error(), tt.errorContains)
|
||||
}
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,8 +129,9 @@ type ColumnDescriptor struct {
|
||||
}
|
||||
|
||||
type ScalarData struct {
|
||||
Columns []*ColumnDescriptor `json:"columns"`
|
||||
Data [][]any `json:"data"`
|
||||
QueryName string `json:"queryName"`
|
||||
Columns []*ColumnDescriptor `json:"columns"`
|
||||
Data [][]any `json:"data"`
|
||||
}
|
||||
|
||||
type RawData struct {
|
||||
|
||||
@@ -0,0 +1,438 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type TraceOperatorType struct{ valuer.String }
|
||||
|
||||
var (
|
||||
TraceOperatorDirectDescendant = TraceOperatorType{valuer.NewString("=>")}
|
||||
TraceOperatorIndirectDescendant = TraceOperatorType{valuer.NewString("->")}
|
||||
TraceOperatorAnd = TraceOperatorType{valuer.NewString("&&")}
|
||||
TraceOperatorOr = TraceOperatorType{valuer.NewString("||")}
|
||||
TraceOperatorNot = TraceOperatorType{valuer.NewString("NOT")}
|
||||
TraceOperatorExclude = TraceOperatorType{valuer.NewString("NOT")}
|
||||
)
|
||||
|
||||
type TraceOrderBy struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
OrderBySpanCount = TraceOrderBy{valuer.NewString("span_count")}
|
||||
OrderByTraceDuration = TraceOrderBy{valuer.NewString("trace_duration")}
|
||||
)
|
||||
|
||||
type QueryBuilderTraceOperator struct {
|
||||
Name string `json:"name"`
|
||||
Disabled bool `json:"disabled,omitempty"`
|
||||
|
||||
Expression string `json:"expression"`
|
||||
|
||||
Filter *Filter `json:"filter,omitempty"`
|
||||
|
||||
// User-configurable span return strategy - which query's spans to return
|
||||
ReturnSpansFrom string `json:"returnSpansFrom,omitempty"`
|
||||
|
||||
// Trace-specific ordering (only span_count and trace_duration allowed)
|
||||
Order []OrderBy `json:"orderBy,omitempty"`
|
||||
|
||||
Aggregations []TraceAggregation `json:"aggregations,omitempty"`
|
||||
StepInterval Step `json:"stepInterval,omitempty"`
|
||||
GroupBy []GroupByKey `json:"groupBy,omitempty"`
|
||||
|
||||
Limit int `json:"limit,omitempty"`
|
||||
Cursor string `json:"cursor,omitempty"`
|
||||
|
||||
// Other post-processing options
|
||||
SelectFields []telemetrytypes.TelemetryFieldKey `json:"selectFields,omitempty"`
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
|
||||
// Internal parsed representation (not exposed in JSON)
|
||||
ParsedExpression *TraceOperand `json:"-"`
|
||||
}
|
||||
|
||||
// TraceOperand represents the internal parsed tree structure
|
||||
type TraceOperand struct {
|
||||
// For leaf nodes - reference to a query
|
||||
QueryRef *TraceOperatorQueryRef `json:"-"`
|
||||
|
||||
// For nested operations
|
||||
Operator *TraceOperatorType `json:"-"`
|
||||
Left *TraceOperand `json:"-"`
|
||||
Right *TraceOperand `json:"-"`
|
||||
}
|
||||
|
||||
// TraceOperatorQueryRef represents a reference to another query
|
||||
type TraceOperatorQueryRef struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// ParseExpression parses the expression string into a tree structure
|
||||
func (q *QueryBuilderTraceOperator) ParseExpression() error {
|
||||
if q.Expression == "" {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"expression cannot be empty",
|
||||
)
|
||||
}
|
||||
|
||||
parsed, err := parseTraceExpression(q.Expression)
|
||||
if err != nil {
|
||||
return errors.WrapInvalidInputf(
|
||||
err,
|
||||
errors.CodeInvalidInput,
|
||||
"failed to parse expression '%s'",
|
||||
q.Expression,
|
||||
)
|
||||
}
|
||||
|
||||
q.ParsedExpression = parsed
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateTraceOperator validates that all referenced queries exist and are trace queries
|
||||
func (q *QueryBuilderTraceOperator) ValidateTraceOperator(queries []QueryEnvelope) error {
|
||||
// Parse the expression
|
||||
if err := q.ParseExpression(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate orderBy field if present
|
||||
if err := q.ValidateOrderBy(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate pagination parameters
|
||||
if err := q.ValidatePagination(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a map of query names to track if they exist and their signal type
|
||||
availableQueries := make(map[string]telemetrytypes.Signal)
|
||||
|
||||
// Only collect trace queries
|
||||
for _, query := range queries {
|
||||
if query.Type == QueryTypeBuilder {
|
||||
switch spec := query.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if spec.Signal == telemetrytypes.SignalTraces {
|
||||
availableQueries[spec.Name] = spec.Signal
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get all query names referenced in the expression
|
||||
referencedQueries := q.collectReferencedQueries(q.ParsedExpression)
|
||||
|
||||
// Validate that all referenced queries exist and are trace queries
|
||||
for _, queryName := range referencedQueries {
|
||||
signal, exists := availableQueries[queryName]
|
||||
if !exists {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"query '%s' referenced in trace operator expression does not exist or is not a trace query",
|
||||
queryName,
|
||||
)
|
||||
}
|
||||
|
||||
// This check is redundant since we only add trace queries to availableQueries, but keeping for clarity
|
||||
if signal != telemetrytypes.SignalTraces {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"query '%s' must be a trace query, but found signal '%s'",
|
||||
queryName,
|
||||
signal,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate ReturnSpansFrom if specified
|
||||
if q.ReturnSpansFrom != "" {
|
||||
if _, exists := availableQueries[q.ReturnSpansFrom]; !exists {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"returnSpansFrom references query '%s' which does not exist or is not a trace query",
|
||||
q.ReturnSpansFrom,
|
||||
)
|
||||
}
|
||||
|
||||
// Ensure the query is referenced in the expression
|
||||
found := false
|
||||
for _, queryName := range referencedQueries {
|
||||
if queryName == q.ReturnSpansFrom {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"returnSpansFrom references query '%s' which is not used in the expression '%s'",
|
||||
q.ReturnSpansFrom,
|
||||
q.Expression,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateOrderBy validates the orderBy field
|
||||
func (q *QueryBuilderTraceOperator) ValidateOrderBy() error {
|
||||
if len(q.Order) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, orderBy := range q.Order {
|
||||
// Validate field is one of the allowed values
|
||||
fieldName := orderBy.Key.Name
|
||||
if fieldName != OrderBySpanCount.StringValue() && fieldName != OrderByTraceDuration.StringValue() {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"orderBy[%d] field must be either '%s' or '%s', got '%s'",
|
||||
i, OrderBySpanCount.StringValue(), OrderByTraceDuration.StringValue(), fieldName,
|
||||
)
|
||||
}
|
||||
|
||||
// Validate direction
|
||||
if orderBy.Direction != OrderDirectionAsc && orderBy.Direction != OrderDirectionDesc {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"orderBy[%d] direction must be either 'asc' or 'desc', got '%s'",
|
||||
i, orderBy.Direction,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePagination validates pagination parameters (AIP-158 compliance)
|
||||
func (q *QueryBuilderTraceOperator) ValidatePagination() error {
|
||||
if q.Limit < 0 {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"limit must be non-negative, got %d",
|
||||
q.Limit,
|
||||
)
|
||||
}
|
||||
|
||||
// For production use, you might want to enforce maximum limits
|
||||
if q.Limit > 10000 {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"limit cannot exceed 10000, got %d",
|
||||
q.Limit,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// collectReferencedQueries collects all query names referenced in the expression tree
|
||||
func (q *QueryBuilderTraceOperator) collectReferencedQueries(operand *TraceOperand) []string {
|
||||
if operand == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var queries []string
|
||||
|
||||
if operand.QueryRef != nil {
|
||||
queries = append(queries, operand.QueryRef.Name)
|
||||
}
|
||||
|
||||
// Recursively collect from children
|
||||
queries = append(queries, q.collectReferencedQueries(operand.Left)...)
|
||||
queries = append(queries, q.collectReferencedQueries(operand.Right)...)
|
||||
|
||||
// Remove duplicates
|
||||
seen := make(map[string]bool)
|
||||
unique := []string{}
|
||||
for _, q := range queries {
|
||||
if !seen[q] {
|
||||
seen[q] = true
|
||||
unique = append(unique, q)
|
||||
}
|
||||
}
|
||||
|
||||
return unique
|
||||
}
|
||||
|
||||
// ValidateUniqueTraceOperator ensures only one trace operator exists in queries
|
||||
func ValidateUniqueTraceOperator(queries []QueryEnvelope) error {
|
||||
traceOperatorCount := 0
|
||||
var traceOperatorNames []string
|
||||
|
||||
for _, query := range queries {
|
||||
if query.Type == QueryTypeTraceOperator {
|
||||
// Extract the name from the trace operator spec
|
||||
if spec, ok := query.Spec.(QueryBuilderTraceOperator); ok {
|
||||
traceOperatorCount++
|
||||
traceOperatorNames = append(traceOperatorNames, spec.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if traceOperatorCount > 1 {
|
||||
return errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"only one trace operator is allowed per request, found %d trace operators: %v",
|
||||
traceOperatorCount,
|
||||
traceOperatorNames,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseTraceExpression parses an expression string into a tree structure
|
||||
// Handles precedence: NOT (highest) > || > && > => (lowest)
|
||||
func parseTraceExpression(expr string) (*TraceOperand, error) {
|
||||
expr = strings.TrimSpace(expr)
|
||||
|
||||
// Handle parentheses
|
||||
if strings.HasPrefix(expr, "(") && strings.HasSuffix(expr, ")") {
|
||||
// Check if parentheses are balanced
|
||||
if isBalancedParentheses(expr[1 : len(expr)-1]) {
|
||||
return parseTraceExpression(expr[1 : len(expr)-1])
|
||||
}
|
||||
}
|
||||
|
||||
// Handle unary NOT operator (prefix)
|
||||
if strings.HasPrefix(expr, "NOT ") {
|
||||
operand, err := parseTraceExpression(expr[4:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
notOp := TraceOperatorNot
|
||||
return &TraceOperand{
|
||||
Operator: ¬Op,
|
||||
Left: operand,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Find binary operators with lowest precedence first (=> has lowest precedence)
|
||||
// Order: => (lowest) < && < || < NOT (highest)
|
||||
operators := []string{"=>", "&&", "||", " NOT "}
|
||||
|
||||
for _, op := range operators {
|
||||
if pos := findOperatorPosition(expr, op); pos != -1 {
|
||||
leftExpr := strings.TrimSpace(expr[:pos])
|
||||
rightExpr := strings.TrimSpace(expr[pos+len(op):])
|
||||
|
||||
left, err := parseTraceExpression(leftExpr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
right, err := parseTraceExpression(rightExpr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var opType TraceOperatorType
|
||||
switch strings.TrimSpace(op) {
|
||||
case "=>":
|
||||
opType = TraceOperatorDirectDescendant
|
||||
case "&&":
|
||||
opType = TraceOperatorAnd
|
||||
case "||":
|
||||
opType = TraceOperatorOr
|
||||
case "NOT":
|
||||
opType = TraceOperatorExclude // Binary NOT (A NOT B)
|
||||
}
|
||||
|
||||
return &TraceOperand{
|
||||
Operator: &opType,
|
||||
Left: left,
|
||||
Right: right,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no operators found, this should be a query reference
|
||||
if matched, _ := regexp.MatchString(`^[A-Za-z][A-Za-z0-9_]*$`, expr); !matched {
|
||||
return nil, errors.WrapInvalidInputf(
|
||||
nil,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid query reference '%s'",
|
||||
expr,
|
||||
)
|
||||
}
|
||||
|
||||
return &TraceOperand{
|
||||
QueryRef: &TraceOperatorQueryRef{Name: expr},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// isBalancedParentheses checks if parentheses are balanced in the expression
|
||||
func isBalancedParentheses(expr string) bool {
|
||||
depth := 0
|
||||
for _, char := range expr {
|
||||
if char == '(' {
|
||||
depth++
|
||||
} else if char == ')' {
|
||||
depth--
|
||||
if depth < 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return depth == 0
|
||||
}
|
||||
|
||||
// findOperatorPosition finds the position of an operator, respecting parentheses
|
||||
func findOperatorPosition(expr, op string) int {
|
||||
depth := 0
|
||||
opLen := len(op)
|
||||
|
||||
// Scan from right to left to find the rightmost operator at depth 0
|
||||
for i := len(expr) - 1; i >= 0; i-- {
|
||||
char := expr[i]
|
||||
|
||||
// Update depth based on parentheses (scanning right to left)
|
||||
if char == ')' {
|
||||
depth++
|
||||
} else if char == '(' {
|
||||
depth--
|
||||
}
|
||||
|
||||
// Only check for operators when we're at depth 0 (outside parentheses)
|
||||
// and make sure we have enough characters for the operator
|
||||
if depth == 0 && i+opLen <= len(expr) {
|
||||
// Check if the substring matches our operator
|
||||
if expr[i:i+opLen] == op {
|
||||
// For " NOT " (binary), ensure proper spacing
|
||||
if op == " NOT " {
|
||||
// Make sure it's properly space-padded
|
||||
if i > 0 && i+opLen < len(expr) {
|
||||
return i
|
||||
}
|
||||
} else {
|
||||
// For other operators (=>, &&, ||), return immediately
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
700
pkg/types/querybuildertypes/querybuildertypesv5/validation.go
Normal file
700
pkg/types/querybuildertypes/querybuildertypesv5/validation.go
Normal file
@@ -0,0 +1,700 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// getQueryIdentifier returns a friendly identifier for a query based on its type and name/content
|
||||
func getQueryIdentifier(envelope QueryEnvelope, index int) string {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("trace query at position %d", index+1)
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("log query at position %d", index+1)
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("metric query at position %d", index+1)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderFormula); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("formula '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("formula at position %d", index+1)
|
||||
case QueryTypeJoin:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderJoin); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("join '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("join at position %d", index+1)
|
||||
case QueryTypePromQL:
|
||||
if spec, ok := envelope.Spec.(PromQuery); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("PromQL query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("PromQL query at position %d", index+1)
|
||||
case QueryTypeClickHouseSQL:
|
||||
if spec, ok := envelope.Spec.(ClickHouseQuery); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("ClickHouse query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("ClickHouse query at position %d", index+1)
|
||||
}
|
||||
return fmt.Sprintf("query at position %d", index+1)
|
||||
}
|
||||
|
||||
const (
|
||||
// Maximum limit for query results
|
||||
MaxQueryLimit = 10000
|
||||
)
|
||||
|
||||
// ValidateFunctionName checks if the function name is valid
|
||||
func ValidateFunctionName(name FunctionName) error {
|
||||
validFunctions := []FunctionName{
|
||||
FunctionNameCutOffMin,
|
||||
FunctionNameCutOffMax,
|
||||
FunctionNameClampMin,
|
||||
FunctionNameClampMax,
|
||||
FunctionNameAbsolute,
|
||||
FunctionNameRunningDiff,
|
||||
FunctionNameLog2,
|
||||
FunctionNameLog10,
|
||||
FunctionNameCumulativeSum,
|
||||
FunctionNameEWMA3,
|
||||
FunctionNameEWMA5,
|
||||
FunctionNameEWMA7,
|
||||
FunctionNameMedian3,
|
||||
FunctionNameMedian5,
|
||||
FunctionNameMedian7,
|
||||
FunctionNameTimeShift,
|
||||
FunctionNameAnomaly,
|
||||
}
|
||||
|
||||
if slices.Contains(validFunctions, name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Format valid functions as comma-separated string
|
||||
var validFunctionNames []string
|
||||
for _, fn := range validFunctions {
|
||||
validFunctionNames = append(validFunctionNames, fn.StringValue())
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid function name: %s",
|
||||
name.StringValue(),
|
||||
).WithAdditional(fmt.Sprintf("valid functions are: %s", strings.Join(validFunctionNames, ", ")))
|
||||
}
|
||||
|
||||
// Validate performs preliminary validation on QueryBuilderQuery
|
||||
func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
// Validate signal
|
||||
if err := q.validateSignal(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate aggregations only for non-raw request types
|
||||
if requestType != RequestTypeRaw {
|
||||
if err := q.validateAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Validate limit and pagination
|
||||
if err := q.validateLimitAndPagination(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate functions
|
||||
if err := q.validateFunctions(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate secondary aggregations
|
||||
if err := q.validateSecondaryAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate order by
|
||||
if err := q.validateOrderBy(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSignal() error {
|
||||
// Signal validation is handled during unmarshaling in req.go
|
||||
// Valid signals are: metrics, traces, logs
|
||||
switch q.Signal {
|
||||
case telemetrytypes.SignalMetrics,
|
||||
telemetrytypes.SignalTraces,
|
||||
telemetrytypes.SignalLogs,
|
||||
telemetrytypes.SignalUnspecified: // Empty is allowed for backward compatibility
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid signal type: %s",
|
||||
q.Signal,
|
||||
).WithAdditional(
|
||||
"Valid signals are: metrics, traces, logs",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateAggregations() error {
|
||||
// At least one aggregation required for non-disabled queries
|
||||
if len(q.Aggregations) == 0 && !q.Disabled {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one aggregation is required",
|
||||
)
|
||||
// TODO: add url with docs
|
||||
}
|
||||
|
||||
// Check for duplicate aliases
|
||||
aliases := make(map[string]bool)
|
||||
for i, agg := range q.Aggregations {
|
||||
// Type-specific validation based on T
|
||||
switch v := any(agg).(type) {
|
||||
case MetricAggregation:
|
||||
if v.MetricName == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"metric name is required for %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
// Validate metric-specific aggregations
|
||||
if err := validateMetricAggregation(v); err != nil {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return wrapValidationError(err, aggId, "invalid metric %s: %s")
|
||||
}
|
||||
case TraceAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for trace %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
if v.Alias != "" {
|
||||
if aliases[v.Alias] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate aggregation alias: %s",
|
||||
v.Alias,
|
||||
)
|
||||
}
|
||||
aliases[v.Alias] = true
|
||||
}
|
||||
case LogAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for log %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
if v.Alias != "" {
|
||||
if aliases[v.Alias] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate aggregation alias: %s",
|
||||
v.Alias,
|
||||
)
|
||||
}
|
||||
aliases[v.Alias] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateLimitAndPagination() error {
|
||||
// Validate limit
|
||||
if q.Limit < 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit must be non-negative, got %d",
|
||||
q.Limit,
|
||||
)
|
||||
}
|
||||
|
||||
if q.Limit > MaxQueryLimit {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit exceeds maximum allowed value of %d",
|
||||
MaxQueryLimit,
|
||||
).WithAdditional(
|
||||
fmt.Sprintf("Provided limit: %d", q.Limit),
|
||||
)
|
||||
}
|
||||
|
||||
// Validate offset
|
||||
if q.Offset < 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"offset must be non-negative, got %d",
|
||||
q.Offset,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateFunctions() error {
|
||||
for i, fn := range q.Functions {
|
||||
if err := ValidateFunctionName(fn.Name); err != nil {
|
||||
fnId := fmt.Sprintf("function #%d", i+1)
|
||||
if q.Name != "" {
|
||||
fnId = fmt.Sprintf("function #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return wrapValidationError(err, fnId, "invalid %s: %s")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSecondaryAggregations() error {
|
||||
for i, secAgg := range q.SecondaryAggregations {
|
||||
// Secondary aggregation expression can be empty - we allow it per requirements
|
||||
// Just validate structure
|
||||
if secAgg.Limit < 0 {
|
||||
secAggId := fmt.Sprintf("secondary aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
secAggId = fmt.Sprintf("secondary aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"%s: limit must be non-negative",
|
||||
secAggId,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateOrderBy() error {
|
||||
for i, order := range q.Order {
|
||||
// Direction validation is handled by the OrderDirection type
|
||||
if order.Direction != OrderDirectionAsc && order.Direction != OrderDirectionDesc {
|
||||
orderId := fmt.Sprintf("order by clause #%d", i+1)
|
||||
if q.Name != "" {
|
||||
orderId = fmt.Sprintf("order by clause #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid direction for %s: %s",
|
||||
orderId,
|
||||
order.Direction.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid directions are: asc, desc",
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateQueryRangeRequest validates the entire query range request
|
||||
func (r *QueryRangeRequest) Validate() error {
|
||||
// Validate time range
|
||||
if r.Start >= r.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"start time must be before end time",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate request type
|
||||
switch r.RequestType {
|
||||
case RequestTypeRaw, RequestTypeTimeSeries, RequestTypeScalar:
|
||||
// Valid request types
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid request type: %s",
|
||||
r.RequestType,
|
||||
).WithAdditional(
|
||||
"Valid request types are: raw, timeseries, scalar",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate composite query
|
||||
if err := r.validateCompositeQuery(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) validateCompositeQuery() error {
|
||||
// Validate queries in composite query
|
||||
if len(r.CompositeQuery.Queries) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one query is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Track query names for uniqueness (only for non-formula queries)
|
||||
queryNames := make(map[string]bool)
|
||||
|
||||
// Validate each query based on its type
|
||||
for i, envelope := range r.CompositeQuery.Queries {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
// Validate based on the concrete type
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
default:
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown spec type for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
// Formula validation is handled separately
|
||||
spec, ok := envelope.Spec.(QueryBuilderFormula)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Expression == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeJoin:
|
||||
// Join validation is handled separately
|
||||
_, ok := envelope.Spec.(QueryBuilderJoin)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypePromQL:
|
||||
// PromQL validation is handled separately
|
||||
spec, ok := envelope.Spec.(PromQuery)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"query expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeClickHouseSQL:
|
||||
// ClickHouse SQL validation is handled separately
|
||||
spec, ok := envelope.Spec.(ClickHouseQuery)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"query expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
default:
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type '%s' for %s",
|
||||
envelope.Type,
|
||||
queryId,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate performs validation on CompositeQuery
|
||||
func (c *CompositeQuery) Validate(requestType RequestType) error {
|
||||
if len(c.Queries) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one query is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate each query
|
||||
for i, envelope := range c.Queries {
|
||||
if err := validateQueryEnvelope(envelope, requestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) error {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query spec type",
|
||||
)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
spec, ok := envelope.Spec.(QueryBuilderFormula)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid formula spec",
|
||||
)
|
||||
}
|
||||
if spec.Expression == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"formula expression is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypeJoin:
|
||||
_, ok := envelope.Spec.(QueryBuilderJoin)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid join spec",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypePromQL:
|
||||
spec, ok := envelope.Spec.(PromQuery)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid PromQL spec",
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"PromQL query is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypeClickHouseSQL:
|
||||
spec, ok := envelope.Spec.(ClickHouseQuery)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid ClickHouse SQL spec",
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"ClickHouse SQL query is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type: %s",
|
||||
envelope.Type,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_sub_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// validateMetricAggregation validates metric-specific aggregation parameters
|
||||
func validateMetricAggregation(agg MetricAggregation) error {
|
||||
// we can't decide anything here without known temporality
|
||||
if agg.Temporality == metrictypes.Unknown {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate that rate/increase are only used with appropriate temporalities
|
||||
if agg.TimeAggregation == metrictypes.TimeAggregationRate || agg.TimeAggregation == metrictypes.TimeAggregationIncrease {
|
||||
// For gauge metrics (Unspecified temporality), rate/increase doesn't make sense
|
||||
if agg.Temporality == metrictypes.Unspecified {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"rate/increase aggregation cannot be used with gauge metrics (unspecified temporality)",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate percentile aggregations are only used with histogram types
|
||||
if agg.SpaceAggregation.IsPercentile() {
|
||||
if agg.Type != metrictypes.HistogramType && agg.Type != metrictypes.ExpHistogramType && agg.Type != metrictypes.SummaryType {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"percentile aggregation can only be used with histogram or summary metric types",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate time aggregation values
|
||||
validTimeAggregations := []metrictypes.TimeAggregation{
|
||||
metrictypes.TimeAggregationUnspecified,
|
||||
metrictypes.TimeAggregationLatest,
|
||||
metrictypes.TimeAggregationSum,
|
||||
metrictypes.TimeAggregationAvg,
|
||||
metrictypes.TimeAggregationMin,
|
||||
metrictypes.TimeAggregationMax,
|
||||
metrictypes.TimeAggregationCount,
|
||||
metrictypes.TimeAggregationCountDistinct,
|
||||
metrictypes.TimeAggregationRate,
|
||||
metrictypes.TimeAggregationIncrease,
|
||||
}
|
||||
|
||||
validTimeAgg := slices.Contains(validTimeAggregations, agg.TimeAggregation)
|
||||
if !validTimeAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation: %s",
|
||||
agg.TimeAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid time aggregations: latest, sum, avg, min, max, count, count_distinct, rate, increase",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate space aggregation values
|
||||
validSpaceAggregations := []metrictypes.SpaceAggregation{
|
||||
metrictypes.SpaceAggregationUnspecified,
|
||||
metrictypes.SpaceAggregationSum,
|
||||
metrictypes.SpaceAggregationAvg,
|
||||
metrictypes.SpaceAggregationMin,
|
||||
metrictypes.SpaceAggregationMax,
|
||||
metrictypes.SpaceAggregationCount,
|
||||
metrictypes.SpaceAggregationPercentile50,
|
||||
metrictypes.SpaceAggregationPercentile75,
|
||||
metrictypes.SpaceAggregationPercentile90,
|
||||
metrictypes.SpaceAggregationPercentile95,
|
||||
metrictypes.SpaceAggregationPercentile99,
|
||||
}
|
||||
|
||||
validSpaceAgg := slices.Contains(validSpaceAggregations, agg.SpaceAggregation)
|
||||
if !validSpaceAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation: %s",
|
||||
agg.SpaceAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid space aggregations: sum, avg, min, max, count, p50, p75, p90, p95, p99",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -104,7 +104,7 @@ func SuggestCorrection(input string, knownFieldKeys []string) (string, bool) {
|
||||
}
|
||||
|
||||
if bestSimilarity >= typoSuggestionThreshold {
|
||||
return fmt.Sprintf("did you mean: %s?", bestMatch), true
|
||||
return fmt.Sprintf("did you mean: '%s'?", bestMatch), true
|
||||
}
|
||||
|
||||
return "", false
|
||||
|
||||
@@ -2,6 +2,8 @@ package telemetrytypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
)
|
||||
|
||||
// MetadataStore is the interface for the telemetry metadata store.
|
||||
@@ -22,4 +24,10 @@ type MetadataStore interface {
|
||||
|
||||
// GetAllValues returns a list of all values.
|
||||
GetAllValues(ctx context.Context, fieldValueSelector *FieldValueSelector) (*TelemetryFieldValues, error)
|
||||
|
||||
// FetchTemporality fetches the temporality for metric
|
||||
FetchTemporality(ctx context.Context, metricName string) (metrictypes.Temporality, error)
|
||||
|
||||
// FetchTemporalityMulti fetches the temporality for multiple metrics
|
||||
FetchTemporalityMulti(ctx context.Context, metricNames ...string) (map[string]metrictypes.Temporality, error)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
@@ -13,6 +14,7 @@ type MockMetadataStore struct {
|
||||
KeysMap map[string][]*telemetrytypes.TelemetryFieldKey
|
||||
RelatedValuesMap map[string][]string
|
||||
AllValuesMap map[string]*telemetrytypes.TelemetryFieldValues
|
||||
TemporalityMap map[string]metrictypes.Temporality
|
||||
}
|
||||
|
||||
// NewMockMetadataStore creates a new instance of MockMetadataStore with initialized maps
|
||||
@@ -21,6 +23,7 @@ func NewMockMetadataStore() *MockMetadataStore {
|
||||
KeysMap: make(map[string][]*telemetrytypes.TelemetryFieldKey),
|
||||
RelatedValuesMap: make(map[string][]string),
|
||||
AllValuesMap: make(map[string]*telemetrytypes.TelemetryFieldValues),
|
||||
TemporalityMap: make(map[string]metrictypes.Temporality),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,3 +252,31 @@ func (m *MockMetadataStore) SetRelatedValues(lookupKey string, values []string)
|
||||
func (m *MockMetadataStore) SetAllValues(lookupKey string, values *telemetrytypes.TelemetryFieldValues) {
|
||||
m.AllValuesMap[lookupKey] = values
|
||||
}
|
||||
|
||||
// FetchTemporality fetches the temporality for a metric
|
||||
func (m *MockMetadataStore) FetchTemporality(ctx context.Context, metricName string) (metrictypes.Temporality, error) {
|
||||
if temporality, exists := m.TemporalityMap[metricName]; exists {
|
||||
return temporality, nil
|
||||
}
|
||||
return metrictypes.Unknown, nil
|
||||
}
|
||||
|
||||
// FetchTemporalityMulti fetches the temporality for multiple metrics
|
||||
func (m *MockMetadataStore) FetchTemporalityMulti(ctx context.Context, metricNames ...string) (map[string]metrictypes.Temporality, error) {
|
||||
result := make(map[string]metrictypes.Temporality)
|
||||
|
||||
for _, metricName := range metricNames {
|
||||
if temporality, exists := m.TemporalityMap[metricName]; exists {
|
||||
result[metricName] = temporality
|
||||
} else {
|
||||
result[metricName] = metrictypes.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SetTemporality sets the temporality for a metric in the mock store
|
||||
func (m *MockMetadataStore) SetTemporality(metricName string, temporality metrictypes.Temporality) {
|
||||
m.TemporalityMap[metricName] = temporality
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user