mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-28 13:34:18 +00:00
Compare commits
20 Commits
v0.63.0-cl
...
chore/sync
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bb2a60565b | ||
|
|
823f84f857 | ||
|
|
8a4d45084d | ||
|
|
5bc6c33899 | ||
|
|
73fc5e45e5 | ||
|
|
799c33ff82 | ||
|
|
83f6dea2db | ||
|
|
7031c866e8 | ||
|
|
46bc7c7a21 | ||
|
|
6d9741c3a4 | ||
|
|
610a8ec704 | ||
|
|
cd9f27ab08 | ||
|
|
14fbb1fcda | ||
|
|
e1b8205a16 | ||
|
|
e21b9a561d | ||
|
|
c4858ec829 | ||
|
|
2b5a0ec496 | ||
|
|
a9440c010c | ||
|
|
f9e7eff357 | ||
|
|
47d8c9e3e7 |
1
.github/workflows/build.yaml
vendored
1
.github/workflows/build.yaml
vendored
@@ -3,7 +3,6 @@ name: build-pipeline
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- release/v*
|
||||
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -3,7 +3,7 @@ name: "Update PR labels and Block PR until related docs are shipped for the feat
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
types: [opened, edited, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
|
||||
2
.github/workflows/e2e-k3s.yaml
vendored
2
.github/workflows/e2e-k3s.yaml
vendored
@@ -42,7 +42,7 @@ jobs:
|
||||
kubectl create ns sample-application
|
||||
|
||||
# apply hotrod k8s manifest file
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
# wait for all deployments in sample-application namespace to be READY
|
||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||
|
||||
5
.github/workflows/jest-coverage-changes.yml
vendored
5
.github/workflows/jest-coverage-changes.yml
vendored
@@ -2,7 +2,8 @@ name: Jest Coverage - changed files
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: develop
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -11,7 +12,7 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: "refs/heads/develop"
|
||||
ref: "refs/heads/main"
|
||||
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
|
||||
|
||||
- name: Fetch branch
|
||||
|
||||
1
.github/workflows/push.yaml
vendored
1
.github/workflows/push.yaml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
tags:
|
||||
- v*
|
||||
|
||||
|
||||
1
.github/workflows/sonar.yml
vendored
1
.github/workflows/sonar.yml
vendored
@@ -3,7 +3,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
defaults:
|
||||
|
||||
6
.github/workflows/staging-deployment.yaml
vendored
6
.github/workflows/staging-deployment.yaml
vendored
@@ -1,12 +1,12 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to develop branch
|
||||
# Trigger deployment only on push to main branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest develop branch to staging
|
||||
name: Deploy latest main branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
permissions:
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
2
.github/workflows/testing-deployment.yaml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout develop
|
||||
git checkout main
|
||||
git pull
|
||||
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||
git branch -D ${GITHUB_BRANCH}
|
||||
|
||||
@@ -339,7 +339,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
@@ -362,7 +362,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
```
|
||||
|
||||
To generate load:
|
||||
|
||||
@@ -146,11 +146,12 @@ services:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.61.0
|
||||
image: signoz/query-service:0.64.0
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
"--use-logs-new-schema=true",
|
||||
"--use-trace-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -186,7 +187,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.61.0
|
||||
image: signoz/frontend:0.64.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -199,7 +200,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.111.14
|
||||
image: signoz/signoz-otel-collector:0.111.16
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
@@ -237,7 +238,7 @@ services:
|
||||
- query-service
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:0.111.14
|
||||
image: signoz/signoz-schema-migrator:0.111.16
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -110,6 +110,7 @@ exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
|
||||
@@ -69,7 +69,7 @@ services:
|
||||
- --storage.path=/data
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "sync"
|
||||
@@ -86,7 +86,7 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:0.111.14
|
||||
image: signoz/signoz-otel-collector:0.111.16
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
|
||||
@@ -25,7 +25,8 @@ services:
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
"--use-logs-new-schema=true",
|
||||
"--use-trace-new-schema=true"
|
||||
]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
|
||||
@@ -162,12 +162,13 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.61.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.64.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
"--use-logs-new-schema=true",
|
||||
"--use-trace-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -201,7 +202,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.61.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.64.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -213,7 +214,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: otel-migrator-sync
|
||||
command:
|
||||
- "sync"
|
||||
@@ -228,7 +229,7 @@ services:
|
||||
# condition: service_healthy
|
||||
|
||||
otel-collector-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: otel-migrator-async
|
||||
command:
|
||||
- "async"
|
||||
@@ -245,7 +246,7 @@ services:
|
||||
# condition: service_healthy
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
|
||||
@@ -167,13 +167,14 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.61.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.64.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"-gateway-url=https://api.staging.signoz.cloud",
|
||||
"--use-logs-new-schema=true"
|
||||
"--use-logs-new-schema=true",
|
||||
"--use-trace-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -208,7 +209,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.61.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.64.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -220,7 +221,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -234,7 +235,7 @@ services:
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
|
||||
@@ -119,6 +119,7 @@ exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
|
||||
@@ -103,6 +103,7 @@ func main() {
|
||||
var maxOpenConns int
|
||||
var dialTimeout time.Duration
|
||||
var gatewayUrl string
|
||||
var useLicensesV3 bool
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
@@ -119,6 +120,7 @@ func main() {
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
|
||||
@@ -13,8 +13,3 @@ if [ "$branch" = "main" ]; then
|
||||
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$branch" = "develop" ]; then
|
||||
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
|
||||
exit 1
|
||||
fi
|
||||
2
go.mod
2
go.mod
@@ -8,7 +8,7 @@ require (
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.25.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.14
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
|
||||
4
go.sum
4
go.sum
@@ -70,8 +70,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
|
||||
github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE=
|
||||
github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.14 h1:nvRucNK/TTtZKM3Dsr/UNx+LwkjaGwx0yPlMvGw/4j0=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.14/go.mod h1:vRDT10om89DHybN7SRMlt8IN9+/pgh1D57pNHPr2LM4=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
|
||||
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
|
||||
github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY=
|
||||
|
||||
@@ -4075,10 +4075,9 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
||||
zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines")
|
||||
}
|
||||
|
||||
for _, p := range postable {
|
||||
if err := p.IsValid(); err != nil {
|
||||
return nil, model.BadRequestStr(err.Error())
|
||||
}
|
||||
validationErr := aH.LogsParsingPipelineController.ValidatePipelines(ctx, postable)
|
||||
if validationErr != nil {
|
||||
return nil, validationErr
|
||||
}
|
||||
|
||||
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, postable)
|
||||
|
||||
@@ -9,25 +9,25 @@ func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queu
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_query AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
resource_string_service$$name,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count,
|
||||
avg(CASE WHEN has(numberTagMap, 'messaging.message.body.size') THEN numberTagMap['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count,
|
||||
avg(CASE WHEN has(attributes_number, 'messaging.message.body.size') THEN attributes_number['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
|
||||
GROUP BY serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
|
||||
GROUP BY resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS throughput,
|
||||
@@ -35,7 +35,7 @@ SELECT
|
||||
FROM
|
||||
consumer_query
|
||||
ORDER BY
|
||||
serviceName;
|
||||
resource_string_service$$name;
|
||||
`, start, end, queueType, topic, partition, consumerGroup, timeRange)
|
||||
return query
|
||||
}
|
||||
@@ -48,14 +48,14 @@ WITH partition_query AS (
|
||||
SELECT
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
count(*) AS total_requests,
|
||||
stringTagMap['messaging.destination.name'] AS topic,
|
||||
stringTagMap['messaging.destination.partition.id'] AS partition
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
attributes_string['messaging.destination.name'] AS topic,
|
||||
attributes_string['messaging.destination.partition.id'] AS partition
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 4
|
||||
AND msgSystem = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, partition
|
||||
)
|
||||
|
||||
@@ -78,25 +78,25 @@ func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, que
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_pl AS (
|
||||
SELECT
|
||||
stringTagMap['messaging.kafka.consumer.group'] AS consumer_group,
|
||||
serviceName,
|
||||
attributes_string['messaging.kafka.consumer.group'] AS consumer_group,
|
||||
resource_string_service$$name,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY consumer_group, serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY consumer_group, resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
consumer_group,
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
@@ -115,23 +115,23 @@ func generateProducerPartitionThroughputSQL(start, end int64, queueType string)
|
||||
query := fmt.Sprintf(`
|
||||
WITH producer_latency AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
resource_string_service$$name,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
stringTagMap['messaging.destination.name'] AS topic,
|
||||
attributes_string['messaging.destination.name'] AS topic,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 4
|
||||
AND msgSystem = '%s'
|
||||
GROUP BY topic, serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
topic,
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
@@ -148,17 +148,17 @@ func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
stringTagMap['messaging.destination.partition.id'] AS partition,
|
||||
attributes_string['messaging.destination.partition.id'] AS partition,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 4
|
||||
AND serviceName = '%s'
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND resource_string_service$$name = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
GROUP BY partition
|
||||
)
|
||||
|
||||
@@ -179,24 +179,24 @@ func generateConsumerLatencySQL(start, end int64, queueType string) string {
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
stringTagMap['messaging.destination.name'] AS topic,
|
||||
resource_string_service$$name,
|
||||
attributes_string['messaging.destination.name'] AS topic,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count,
|
||||
SUM(numberTagMap['messaging.message.body.size']) AS total_bytes
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count,
|
||||
SUM(attributes_number['messaging.message.body.size']) AS total_bytes
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
GROUP BY topic, serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
topic,
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS ingestion_rate,
|
||||
@@ -216,17 +216,17 @@ func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueTy
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
stringTagMap['messaging.destination.partition.id'] AS partition,
|
||||
attributes_string['messaging.destination.partition.id'] AS partition,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 5
|
||||
AND serviceName = '%s'
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND resource_string_service$$name = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
GROUP BY partition
|
||||
)
|
||||
|
||||
@@ -246,26 +246,26 @@ func generateProducerConsumerEvalSQL(start, end int64, queueType string, evalTim
|
||||
query := fmt.Sprintf(`
|
||||
WITH trace_data AS (
|
||||
SELECT
|
||||
p.serviceName AS producer_service,
|
||||
c.serviceName AS consumer_service,
|
||||
p.traceID,
|
||||
p.resource_string_service$$name AS producer_service,
|
||||
c.resource_string_service$$name AS consumer_service,
|
||||
p.trace_id,
|
||||
p.timestamp AS producer_timestamp,
|
||||
c.timestamp AS consumer_timestamp,
|
||||
p.durationNano AS durationNano,
|
||||
(toUnixTimestamp64Nano(c.timestamp) - toUnixTimestamp64Nano(p.timestamp)) + p.durationNano AS time_difference
|
||||
FROM
|
||||
signoz_traces.distributed_signoz_index_v2 p
|
||||
signoz_traces.distributed_signoz_index_v3 p
|
||||
INNER JOIN
|
||||
signoz_traces.distributed_signoz_index_v2 c
|
||||
ON p.traceID = c.traceID
|
||||
AND c.parentSpanID = p.spanID
|
||||
signoz_traces.distributed_signoz_index_v3 c
|
||||
ON p.trace_id = c.trace_id
|
||||
AND c.parent_span_id = p.span_id
|
||||
WHERE
|
||||
p.kind = 4
|
||||
AND c.kind = 5
|
||||
AND toUnixTimestamp64Nano(p.timestamp) BETWEEN '%d' AND '%d'
|
||||
AND toUnixTimestamp64Nano(c.timestamp) BETWEEN '%d' AND '%d'
|
||||
AND c.msgSystem = '%s'
|
||||
AND p.msgSystem = '%s'
|
||||
AND c.attribute_string_messaging$$system = '%s'
|
||||
AND p.attribute_string_messaging$$system = '%s'
|
||||
)
|
||||
|
||||
SELECT
|
||||
@@ -278,7 +278,7 @@ SELECT
|
||||
arrayMap(x -> x.1,
|
||||
arraySort(
|
||||
x -> -x.2,
|
||||
groupArrayIf((traceID, time_difference), time_difference > '%d')
|
||||
groupArrayIf((trace_id, time_difference), time_difference > '%d')
|
||||
)
|
||||
),
|
||||
1, 10
|
||||
@@ -296,30 +296,30 @@ func generateProducerSQL(start, end int64, topic, partition, queueType string) s
|
||||
query := fmt.Sprintf(`
|
||||
WITH producer_query AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
resource_string_service$$name,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
count(*) AS total_count,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 4
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage,
|
||||
COALESCE(total_count / %d, 0) AS throughput
|
||||
FROM
|
||||
producer_query
|
||||
ORDER BY
|
||||
serviceName;
|
||||
resource_string_service$$name;
|
||||
`, start, end, queueType, topic, partition, timeRange)
|
||||
return query
|
||||
}
|
||||
@@ -328,18 +328,18 @@ func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partit
|
||||
timeRange := (end - start) / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
stringTagMap['messaging.client_id'] AS client_id,
|
||||
stringTagMap['service.instance.id'] AS service_instance_id,
|
||||
serviceName AS service_name,
|
||||
attributes_string['messaging.client_id'] AS client_id,
|
||||
attributes_string['service.instance.id'] AS service_instance_id,
|
||||
resource_string_service$$name AS service_name,
|
||||
count(*) / %d AS throughput
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY service_name, client_id, service_instance_id
|
||||
ORDER BY throughput DESC
|
||||
`, timeRange, start, end, queueType, consumerGroup, partitionID)
|
||||
@@ -350,12 +350,12 @@ func onboardProducersSQL(start, end int64, queueType string) string {
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
COUNT(*) = 0 AS entries,
|
||||
COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue,
|
||||
COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue,
|
||||
COUNT(IF(kind = 4, 1, NULL)) = 0 AS kind,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition
|
||||
COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||
COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition
|
||||
FROM
|
||||
signoz_traces.distributed_signoz_index_v2
|
||||
signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d';`, queueType, start, end)
|
||||
@@ -366,16 +366,16 @@ func onboardConsumerSQL(start, end int64, queueType string) string {
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
COUNT(*) = 0 AS entries,
|
||||
COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue,
|
||||
COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue,
|
||||
COUNT(IF(kind = 5, 1, NULL)) = 0 AS kind,
|
||||
COUNT(serviceName) = 0 AS svc,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
|
||||
COUNT(IF(has(numberTagMap, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
|
||||
COUNT(IF(has(stringTagMap, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
COUNT(resource_string_service$$name) = 0 AS svc,
|
||||
COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||
COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition,
|
||||
COUNT(IF(has(attributes_string, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
|
||||
COUNT(IF(has(attributes_number, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
|
||||
COUNT(IF(has(attributes_string, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
|
||||
COUNT(IF(has(attributes_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d';`, queueType, start, end)
|
||||
|
||||
@@ -94,6 +94,45 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
|
||||
return ic.GetPipelinesByVersion(ctx, cfg.Version)
|
||||
}
|
||||
|
||||
func (ic *LogParsingPipelineController) ValidatePipelines(
|
||||
ctx context.Context,
|
||||
postedPipelines []PostablePipeline,
|
||||
) *model.ApiError {
|
||||
for _, p := range postedPipelines {
|
||||
if err := p.IsValid(); err != nil {
|
||||
return model.BadRequestStr(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Also run a collector simulation to ensure config is fit
|
||||
// for e2e use with a collector
|
||||
pipelines := []Pipeline{}
|
||||
for _, pp := range postedPipelines {
|
||||
pipelines = append(pipelines, Pipeline{
|
||||
Id: uuid.New().String(),
|
||||
OrderId: pp.OrderId,
|
||||
Enabled: pp.Enabled,
|
||||
Name: pp.Name,
|
||||
Alias: pp.Alias,
|
||||
Description: &pp.Description,
|
||||
Filter: pp.Filter,
|
||||
Config: pp.Config,
|
||||
})
|
||||
}
|
||||
|
||||
sampleLogs := []model.SignozLog{{Body: ""}}
|
||||
_, _, simulationErr := SimulatePipelinesProcessing(
|
||||
ctx, pipelines, sampleLogs,
|
||||
)
|
||||
if simulationErr != nil {
|
||||
return model.BadRequest(fmt.Errorf(
|
||||
"invalid pipelines config: %w", simulationErr.ToError(),
|
||||
))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns effective list of pipelines including user created
|
||||
// pipelines and pipelines for installed integrations
|
||||
func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||
|
||||
@@ -90,7 +90,7 @@ func EnableHostsInfraMonitoring() bool {
|
||||
return GetOrDefaultEnv("ENABLE_INFRA_METRICS", "true") == "true"
|
||||
}
|
||||
|
||||
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false")
|
||||
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "true")
|
||||
|
||||
func IsDurationSortFeatureEnabled() bool {
|
||||
isDurationSortFeatureEnabledStr := DurationSortFeature
|
||||
|
||||
@@ -350,6 +350,27 @@ func TestLogPipelinesValidation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
ExpectedResponseStatusCode: 400,
|
||||
}, {
|
||||
Name: "Invalid from field path",
|
||||
Pipeline: logparsingpipeline.PostablePipeline{
|
||||
OrderId: 1,
|
||||
Name: "pipeline 1",
|
||||
Alias: "pipeline1",
|
||||
Enabled: true,
|
||||
Filter: validPipelineFilterSet,
|
||||
Config: []logparsingpipeline.PipelineOperator{
|
||||
{
|
||||
OrderId: 1,
|
||||
ID: "move",
|
||||
Type: "move",
|
||||
From: `attributes.temp_parsed_body."@l"`,
|
||||
To: "attributes.test",
|
||||
Enabled: true,
|
||||
Name: "test move",
|
||||
},
|
||||
},
|
||||
},
|
||||
ExpectedResponseStatusCode: 400,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ Follow the steps in this section to install a sample application named HotR.O.D,
|
||||
```console
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod.yaml
|
||||
```
|
||||
|
||||
In case, you have installed SigNoz in namespace other than `platform` or selected Helm release name other than `my-release`, follow the steps below:
|
||||
@@ -15,7 +15,7 @@ export HELM_RELEASE=my-release-2
|
||||
export SIGNOZ_NAMESPACE=platform-2
|
||||
export HOTROD_NAMESPACE=sample-application-2
|
||||
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh | bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh | bash
|
||||
```
|
||||
|
||||
To delete sample application:
|
||||
@@ -23,7 +23,7 @@ To delete sample application:
|
||||
```console
|
||||
export HOTROD_NAMESPACE=sample-application-2
|
||||
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh | bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh | bash
|
||||
```
|
||||
|
||||
For testing with local scripts, you can use the following commands:
|
||||
|
||||
@@ -7,7 +7,7 @@ HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"}
|
||||
if [[ "${HOTROD_NAMESPACE}" == "default" || "${HOTROD_NAMESPACE}" == "kube-system" || "${HOTROD_NAMESPACE}" == "platform" ]]; then
|
||||
echo "Default k8s namespace and SigNoz namespace must not be deleted"
|
||||
echo "Deleting components only"
|
||||
kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml)
|
||||
kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml)
|
||||
else
|
||||
echo "Delete HotROD sample app namespace ${HOTROD_NAMESPACE}"
|
||||
kubectl delete namespace "${HOTROD_NAMESPACE}"
|
||||
|
||||
@@ -37,7 +37,7 @@ kubectl create namespace "$HOTROD_NAMESPACE" --save-config --dry-run -o yaml 2>/
|
||||
|
||||
# Setup sample apps into specified namespace
|
||||
kubectl apply --namespace="${HOTROD_NAMESPACE}" -f <( \
|
||||
(cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml) | \
|
||||
(cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) | \
|
||||
HOTROD_NAMESPACE="${HOTROD_NAMESPACE}" \
|
||||
HOTROD_IMAGE="${HOTROD_IMAGE}" \
|
||||
LOCUST_IMAGE="${LOCUST_IMAGE}" \
|
||||
|
||||
Reference in New Issue
Block a user