Compare commits
44 Commits
limiting-a
...
feat/7084
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7130082cc6 | ||
|
|
45901951fb | ||
|
|
08b9e9b9fa | ||
|
|
a41d413203 | ||
|
|
d83daa6085 | ||
|
|
7394c06fdf | ||
|
|
03d9c620f2 | ||
|
|
d4bdcb1f2d | ||
|
|
51794bae10 | ||
|
|
2a53b953a1 | ||
|
|
969d6b00e7 | ||
|
|
ab0ae8e6ad | ||
|
|
9676b7e068 | ||
|
|
8215cabf71 | ||
|
|
0ca886e213 | ||
|
|
9385029f5c | ||
|
|
f6ac729e70 | ||
|
|
59d3198b80 | ||
|
|
fa90fad373 | ||
|
|
77420b9d3a | ||
|
|
cecc57e72d | ||
|
|
512adc6471 | ||
|
|
42fefc65be | ||
|
|
dcc659907a | ||
|
|
b90ed375c2 | ||
|
|
a8a3bd3f7d | ||
|
|
7405bfbbee | ||
|
|
67e822e23e | ||
|
|
60dc479a19 | ||
|
|
85cf4f4e2e | ||
|
|
83aa48c721 | ||
|
|
823f84f857 | ||
|
|
8a4d45084d | ||
|
|
5bc6c33899 | ||
|
|
83f6dea2db | ||
|
|
7031c866e8 | ||
|
|
46bc7c7a21 | ||
|
|
6d9741c3a4 | ||
|
|
610a8ec704 | ||
|
|
cd9f27ab08 | ||
|
|
2b5a0ec496 | ||
|
|
a9440c010c | ||
|
|
f9e7eff357 | ||
|
|
47d8c9e3e7 |
1
.github/workflows/build.yaml
vendored
1
.github/workflows/build.yaml
vendored
@@ -3,7 +3,6 @@ name: build-pipeline
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- release/v*
|
||||
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -3,7 +3,7 @@ name: "Update PR labels and Block PR until related docs are shipped for the feat
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
types: [opened, edited, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
|
||||
2
.github/workflows/e2e-k3s.yaml
vendored
2
.github/workflows/e2e-k3s.yaml
vendored
@@ -42,7 +42,7 @@ jobs:
|
||||
kubectl create ns sample-application
|
||||
|
||||
# apply hotrod k8s manifest file
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
# wait for all deployments in sample-application namespace to be READY
|
||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||
|
||||
5
.github/workflows/jest-coverage-changes.yml
vendored
5
.github/workflows/jest-coverage-changes.yml
vendored
@@ -2,7 +2,8 @@ name: Jest Coverage - changed files
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: develop
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -11,7 +12,7 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: "refs/heads/develop"
|
||||
ref: "refs/heads/main"
|
||||
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
|
||||
|
||||
- name: Fetch branch
|
||||
|
||||
1
.github/workflows/push.yaml
vendored
1
.github/workflows/push.yaml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
tags:
|
||||
- v*
|
||||
|
||||
|
||||
1
.github/workflows/sonar.yml
vendored
1
.github/workflows/sonar.yml
vendored
@@ -3,7 +3,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
defaults:
|
||||
|
||||
6
.github/workflows/staging-deployment.yaml
vendored
6
.github/workflows/staging-deployment.yaml
vendored
@@ -1,12 +1,12 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to develop branch
|
||||
# Trigger deployment only on push to main branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest develop branch to staging
|
||||
name: Deploy latest main branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
permissions:
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
2
.github/workflows/testing-deployment.yaml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout develop
|
||||
git checkout main
|
||||
git pull
|
||||
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||
git branch -D ${GITHUB_BRANCH}
|
||||
|
||||
@@ -339,7 +339,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
@@ -362,7 +362,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
```
|
||||
|
||||
To generate load:
|
||||
|
||||
@@ -146,11 +146,12 @@ services:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.61.0
|
||||
image: signoz/query-service:0.64.0
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
"--use-logs-new-schema=true",
|
||||
"--use-trace-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -186,7 +187,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.61.0
|
||||
image: signoz/frontend:0.64.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -199,7 +200,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.111.14
|
||||
image: signoz/signoz-otel-collector:0.111.16
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
@@ -237,7 +238,7 @@ services:
|
||||
- query-service
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:0.111.14
|
||||
image: signoz/signoz-schema-migrator:0.111.16
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -110,6 +110,7 @@ exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
|
||||
@@ -69,7 +69,7 @@ services:
|
||||
- --storage.path=/data
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "sync"
|
||||
@@ -86,7 +86,7 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:0.111.14
|
||||
image: signoz/signoz-otel-collector:0.111.16
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
|
||||
@@ -25,7 +25,8 @@ services:
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
"--use-logs-new-schema=true",
|
||||
"--use-trace-new-schema=true"
|
||||
]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
|
||||
@@ -162,12 +162,13 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.61.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.64.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
"--use-logs-new-schema=true",
|
||||
"--use-trace-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -201,7 +202,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.61.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.64.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -213,7 +214,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: otel-migrator-sync
|
||||
command:
|
||||
- "sync"
|
||||
@@ -228,7 +229,7 @@ services:
|
||||
# condition: service_healthy
|
||||
|
||||
otel-collector-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: otel-migrator-async
|
||||
command:
|
||||
- "async"
|
||||
@@ -245,7 +246,7 @@ services:
|
||||
# condition: service_healthy
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
|
||||
@@ -167,13 +167,14 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.61.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.64.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"-gateway-url=https://api.staging.signoz.cloud",
|
||||
"--use-logs-new-schema=true"
|
||||
"--use-logs-new-schema=true",
|
||||
"--use-trace-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -208,7 +209,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.61.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.64.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -220,7 +221,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -234,7 +235,7 @@ services:
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
|
||||
@@ -119,6 +119,7 @@ exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
|
||||
@@ -371,6 +371,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
apiHandler.RegisterWebSocketPaths(r, am)
|
||||
apiHandler.RegisterMessagingQueuesRoutes(r, am)
|
||||
apiHandler.MetricExplorerRoutes(r, am)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
|
||||
@@ -13,8 +13,3 @@ if [ "$branch" = "main" ]; then
|
||||
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$branch" = "develop" ]; then
|
||||
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
|
||||
exit 1
|
||||
fi
|
||||
@@ -24,13 +24,13 @@ const MQServiceDetailTypePerView = (
|
||||
producerLatencyOption: ProducerLatencyOptions,
|
||||
): Record<string, MessagingQueueServiceDetailType[]> => ({
|
||||
[MessagingQueuesViewType.consumerLag.value]: [
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
MessagingQueueServiceDetailType.ProducerDetails,
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
MessagingQueueServiceDetailType.NetworkLatency,
|
||||
],
|
||||
[MessagingQueuesViewType.partitionLatency.value]: [
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
MessagingQueueServiceDetailType.ProducerDetails,
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
],
|
||||
[MessagingQueuesViewType.producerLatency.value]: [
|
||||
producerLatencyOption === ProducerLatencyOptions.Consumers
|
||||
@@ -122,7 +122,7 @@ function MessagingQueuesDetails({
|
||||
producerLatencyOption: ProducerLatencyOptions;
|
||||
}): JSX.Element {
|
||||
const [currentTab, setCurrentTab] = useState<MessagingQueueServiceDetailType>(
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
MessagingQueueServiceDetailType.ProducerDetails,
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@@ -179,10 +179,13 @@ export const convertToNanoseconds = (timestamp: number): bigint =>
|
||||
export const getStartAndEndTimesInMilliseconds = (
|
||||
timestamp: number,
|
||||
): { start: number; end: number } => {
|
||||
const FIVE_MINUTES_IN_MILLISECONDS = 5 * 60 * 1000; // 5 minutes in milliseconds - check with Shivanshu once
|
||||
const FIVE_MINUTES_IN_MILLISECONDS = 5 * 60 * 1000; // 300,000 milliseconds
|
||||
|
||||
const start = Math.floor(timestamp);
|
||||
const end = Math.floor(start + FIVE_MINUTES_IN_MILLISECONDS);
|
||||
const pointInTime = Math.floor(timestamp * 1000);
|
||||
|
||||
// Convert timestamp to milliseconds and floor it
|
||||
const start = Math.floor(pointInTime - FIVE_MINUTES_IN_MILLISECONDS);
|
||||
const end = Math.floor(pointInTime + FIVE_MINUTES_IN_MILLISECONDS);
|
||||
|
||||
return { start, end };
|
||||
};
|
||||
@@ -311,8 +314,8 @@ export const getMetaDataAndAPIPerView = (
|
||||
return {
|
||||
[MessagingQueuesViewType.consumerLag.value]: {
|
||||
tableApiPayload: {
|
||||
start: (selectedTimelineQuery?.start || 0) * 1e9,
|
||||
end: (selectedTimelineQuery?.end || 0) * 1e9,
|
||||
start: (selectedTimelineQuery?.start || 0) * 1e6,
|
||||
end: (selectedTimelineQuery?.end || 0) * 1e6,
|
||||
variables: {
|
||||
partition: selectedTimelineQuery?.partition,
|
||||
topic: selectedTimelineQuery?.topic,
|
||||
|
||||
3
go.mod
3
go.mod
@@ -8,7 +8,7 @@ require (
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.25.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.14
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
@@ -64,6 +64,7 @@ require (
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/net v0.29.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/text v0.21.0
|
||||
google.golang.org/grpc v1.67.1
|
||||
google.golang.org/protobuf v1.34.2
|
||||
|
||||
4
go.sum
4
go.sum
@@ -70,8 +70,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
|
||||
github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE=
|
||||
github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.14 h1:nvRucNK/TTtZKM3Dsr/UNx+LwkjaGwx0yPlMvGw/4j0=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.14/go.mod h1:vRDT10om89DHybN7SRMlt8IN9+/pgh1D57pNHPr2LM4=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
|
||||
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
|
||||
github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY=
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/model/metrics_explorer"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/google/uuid"
|
||||
@@ -1182,7 +1184,7 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU
|
||||
|
||||
func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.SearchTracesParams,
|
||||
smartTraceAlgorithm func(payload []model.SearchSpanResponseItem, targetSpanId string,
|
||||
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
|
||||
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
|
||||
searchSpansResult := []model.SearchSpansResult{
|
||||
{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
|
||||
@@ -1330,7 +1332,7 @@ func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.Sea
|
||||
|
||||
func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.SearchTracesParams,
|
||||
smartTraceAlgorithm func(payload []model.SearchSpanResponseItem, targetSpanId string,
|
||||
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
|
||||
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
if r.useTraceNewSchema {
|
||||
return r.SearchTracesV2(ctx, params, smartTraceAlgorithm)
|
||||
@@ -2694,8 +2696,8 @@ func (r *ClickHouseReader) GetTagsInfoInLastHeartBeatInterval(ctx context.Contex
|
||||
}
|
||||
|
||||
// remove this after sometime
|
||||
func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField {
|
||||
lookup := map[string]model.LogField{}
|
||||
func removeUnderscoreDuplicateFields(fields []model.Field) []model.Field {
|
||||
lookup := map[string]model.Field{}
|
||||
for _, v := range fields {
|
||||
lookup[v.Name+v.DataType] = v
|
||||
}
|
||||
@@ -2706,7 +2708,7 @@ func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField {
|
||||
}
|
||||
}
|
||||
|
||||
updatedFields := []model.LogField{}
|
||||
updatedFields := []model.Field{}
|
||||
for _, v := range lookup {
|
||||
updatedFields = append(updatedFields, v)
|
||||
}
|
||||
@@ -2717,11 +2719,11 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe
|
||||
// response will contain top level fields from the otel log model
|
||||
response := model.GetFieldsResponse{
|
||||
Selected: constants.StaticSelectedLogFields,
|
||||
Interesting: []model.LogField{},
|
||||
Interesting: []model.Field{},
|
||||
}
|
||||
|
||||
// get attribute keys
|
||||
attributes := []model.LogField{}
|
||||
attributes := []model.Field{}
|
||||
query := fmt.Sprintf("SELECT DISTINCT name, datatype from %s.%s group by name, datatype", r.logsDB, r.logsAttributeKeys)
|
||||
err := r.db.Select(ctx, &attributes, query)
|
||||
if err != nil {
|
||||
@@ -2729,7 +2731,7 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe
|
||||
}
|
||||
|
||||
// get resource keys
|
||||
resources := []model.LogField{}
|
||||
resources := []model.Field{}
|
||||
query = fmt.Sprintf("SELECT DISTINCT name, datatype from %s.%s group by name, datatype", r.logsDB, r.logsResourceKeys)
|
||||
err = r.db.Select(ctx, &resources, query)
|
||||
if err != nil {
|
||||
@@ -2753,9 +2755,11 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) extractSelectedAndInterestingFields(tableStatement string, fieldType string, fields *[]model.LogField, response *model.GetFieldsResponse) {
|
||||
func (r *ClickHouseReader) extractSelectedAndInterestingFields(tableStatement string, overrideFieldType string, fields *[]model.Field, response *model.GetFieldsResponse) {
|
||||
for _, field := range *fields {
|
||||
field.Type = fieldType
|
||||
if overrideFieldType != "" {
|
||||
field.Type = overrideFieldType
|
||||
}
|
||||
// all static fields are assumed to be selected as we don't allow changing them
|
||||
if isColumn(r.useLogsNewSchema, tableStatement, field.Type, field.Name, field.DataType) {
|
||||
response.Selected = append(response.Selected, field)
|
||||
@@ -2945,6 +2949,165 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetTraceFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) {
|
||||
// response will contain top level fields from the otel trace model
|
||||
response := model.GetFieldsResponse{
|
||||
Selected: []model.Field{},
|
||||
Interesting: []model.Field{},
|
||||
}
|
||||
|
||||
// get the top level selected fields
|
||||
for _, field := range constants.NewStaticFieldsTraces {
|
||||
if (v3.AttributeKey{} == field) {
|
||||
continue
|
||||
}
|
||||
response.Selected = append(response.Selected, model.Field{
|
||||
Name: field.Key,
|
||||
DataType: field.DataType.String(),
|
||||
Type: constants.Static,
|
||||
})
|
||||
}
|
||||
|
||||
// get attribute keys
|
||||
attributes := []model.Field{}
|
||||
query := fmt.Sprintf("SELECT tagKey, tagType, dataType from %s.%s group by tagKey, tagType, dataType", r.TraceDB, r.spanAttributesKeysTable)
|
||||
rows, err := r.db.Query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var tagKey string
|
||||
var dataType string
|
||||
var tagType string
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&tagKey, &tagType, &dataType); err != nil {
|
||||
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
attributes = append(attributes, model.Field{
|
||||
Name: tagKey,
|
||||
DataType: dataType,
|
||||
Type: tagType,
|
||||
})
|
||||
}
|
||||
|
||||
statements := []model.ShowCreateTableStatement{}
|
||||
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.TraceDB, r.traceLocalTableName)
|
||||
err = r.db.Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
|
||||
r.extractSelectedAndInterestingFields(statements[0].Statement, "", &attributes, &response)
|
||||
|
||||
return &response, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) UpdateTraceField(ctx context.Context, field *model.UpdateField) *model.ApiError {
|
||||
if !field.Selected {
|
||||
return model.ForbiddenError(errors.New("removing a selected field is not allowed, please reach out to support."))
|
||||
}
|
||||
|
||||
// name of the materialized column
|
||||
colname := utils.GetClickhouseColumnNameV2(field.Type, field.DataType, field.Name)
|
||||
|
||||
field.DataType = strings.ToLower(field.DataType)
|
||||
|
||||
// dataType and chDataType of the materialized column
|
||||
var dataTypeMap = map[string]string{
|
||||
"string": "string",
|
||||
"bool": "bool",
|
||||
"int64": "number",
|
||||
"float64": "number",
|
||||
}
|
||||
var chDataTypeMap = map[string]string{
|
||||
"string": "String",
|
||||
"bool": "Bool",
|
||||
"int64": "Float64",
|
||||
"float64": "Float64",
|
||||
}
|
||||
chDataType := chDataTypeMap[field.DataType]
|
||||
dataType := dataTypeMap[field.DataType]
|
||||
|
||||
// typeName: tag => attributes, resource => resources
|
||||
typeName := field.Type
|
||||
if field.Type == string(v3.AttributeKeyTypeTag) {
|
||||
typeName = constants.Attributes
|
||||
} else if field.Type == string(v3.AttributeKeyTypeResource) {
|
||||
typeName = constants.Resources
|
||||
}
|
||||
|
||||
attrColName := fmt.Sprintf("%s_%s", typeName, dataType)
|
||||
for _, table := range []string{r.traceLocalTableName, r.traceTableName} {
|
||||
q := "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s` %s DEFAULT %s['%s'] CODEC(ZSTD(1))"
|
||||
query := fmt.Sprintf(q,
|
||||
r.TraceDB, table,
|
||||
r.cluster,
|
||||
colname, chDataType,
|
||||
attrColName,
|
||||
field.Name,
|
||||
)
|
||||
err := r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
|
||||
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s_exists` bool DEFAULT if(mapContains(%s, '%s') != 0, true, false) CODEC(ZSTD(1))",
|
||||
r.TraceDB, table,
|
||||
r.cluster,
|
||||
colname,
|
||||
attrColName,
|
||||
field.Name,
|
||||
)
|
||||
err = r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
}
|
||||
|
||||
// create the index
|
||||
if strings.ToLower(field.DataType) == "bool" {
|
||||
// there is no point in creating index for bool attributes as the cardinality is just 2
|
||||
return nil
|
||||
}
|
||||
|
||||
if field.IndexType == "" {
|
||||
field.IndexType = constants.DefaultLogSkipIndexType
|
||||
}
|
||||
if field.IndexGranularity == 0 {
|
||||
field.IndexGranularity = constants.DefaultLogSkipIndexGranularity
|
||||
}
|
||||
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS `%s_idx` (`%s`) TYPE %s GRANULARITY %d",
|
||||
r.TraceDB, r.traceLocalTableName,
|
||||
r.cluster,
|
||||
colname,
|
||||
colname,
|
||||
field.IndexType,
|
||||
field.IndexGranularity,
|
||||
)
|
||||
err := r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
|
||||
// add a default minmax index for numbers
|
||||
if dataType == "number" {
|
||||
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS `%s_minmax_idx` (`%s`) TYPE minmax GRANULARITY 1",
|
||||
r.TraceDB, r.traceLocalTableName,
|
||||
r.cluster,
|
||||
colname,
|
||||
colname,
|
||||
)
|
||||
err = r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetLogs(ctx context.Context, params *model.LogsFilterParams) (*[]model.SignozLog, *model.ApiError) {
|
||||
response := []model.SignozLog{}
|
||||
fields, apiErr := r.GetLogFields(ctx)
|
||||
@@ -5051,3 +5214,528 @@ func (r *ClickHouseReader) SubscribeToQueryProgress(
|
||||
) (<-chan model.QueryProgress, func(), *model.ApiError) {
|
||||
return r.queryProgressTracker.SubscribeToQueryProgress(queryId)
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetAllMetricFilterAttributeKeys(ctx context.Context, req *metrics_explorer.FilterKeyRequest, skipDotNames bool) (*[]v3.AttributeKey, *model.ApiError) {
|
||||
var rows driver.Rows
|
||||
var response []v3.AttributeKey
|
||||
query := fmt.Sprintf("SELECT arrayJoin(tagKeys) AS distinctTagKey FROM (SELECT JSONExtractKeys(labels) AS tagKeys FROM %s.%s WHERE unix_milli >= $1 GROUP BY tagKeys) WHERE distinctTagKey ILIKE $2 AND distinctTagKey NOT LIKE '\\_\\_%%' GROUP BY distinctTagKey", signozMetricDBName, signozTSTableNameV41Day)
|
||||
if req.Limit != 0 {
|
||||
query = query + fmt.Sprintf(" LIMIT %d;", req.Limit)
|
||||
}
|
||||
rows, err := r.db.Query(ctx, query, common.PastDayRoundOff(), fmt.Sprintf("%%%s%%", req.SearchText))
|
||||
if err != nil {
|
||||
zap.L().Error("Error while executing query", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
var attributeKey string
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&attributeKey); err != nil {
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
if skipDotNames && strings.Contains(attributeKey, ".") {
|
||||
continue
|
||||
}
|
||||
key := v3.AttributeKey{
|
||||
Key: attributeKey,
|
||||
DataType: v3.AttributeKeyDataTypeString, // https://github.com/OpenObservability/OpenMetrics/blob/main/proto/openmetrics_data_model.proto#L64-L72.
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
}
|
||||
response = append(response, key)
|
||||
}
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetAllMetricFilterAttributeValues(ctx context.Context, req *metrics_explorer.FilterValueRequest) ([]string, *model.ApiError) {
|
||||
var query string
|
||||
var err error
|
||||
var rows driver.Rows
|
||||
var attributeValues []string
|
||||
|
||||
query = fmt.Sprintf("SELECT JSONExtractString(labels, $1) AS tagValue FROM %s.%s WHERE JSONExtractString(labels, $2) ILIKE $3 AND unix_milli >= $4 GROUP BY tagValue", signozMetricDBName, signozTSTableNameV41Day)
|
||||
if req.Limit != 0 {
|
||||
query = query + fmt.Sprintf(" LIMIT %d;", req.Limit)
|
||||
}
|
||||
rows, err = r.db.Query(ctx, query, req.FilterKey, req.FilterKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff())
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error while executing query", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var atrributeValue string
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&atrributeValue); err != nil {
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
attributeValues = append(attributeValues, atrributeValue)
|
||||
}
|
||||
return attributeValues, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetAllMetricFilterUnits(ctx context.Context, req *metrics_explorer.FilterValueRequest) ([]string, *model.ApiError) {
|
||||
var rows driver.Rows
|
||||
var response []string
|
||||
query := fmt.Sprintf("SELECT DISTINCT unit FROM %s.%s WHERE unit ILIKE $1 AND unit IS NOT NULL ORDER BY unit", signozMetricDBName, signozTSTableNameV41Day)
|
||||
if req.Limit != 0 {
|
||||
query = query + fmt.Sprintf(" LIMIT %d;", req.Limit)
|
||||
}
|
||||
|
||||
rows, err := r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
|
||||
if err != nil {
|
||||
zap.L().Error("Error while executing query", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
var attributeKey string
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&attributeKey); err != nil {
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
response = append(response, attributeKey)
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetMetricsDataPointsAndLastReceived(ctx context.Context, metricName string) (uint64, uint64, *model.ApiError) {
|
||||
query := fmt.Sprintf("SELECT COUNT(*) AS data_points, MAX(unix_milli) AS last_received_time FROM %s.%s WHERE metric_name = ?", signozMetricDBName, signozSampleTableName)
|
||||
var lastRecievedTimestamp int64 // Changed from uint64 to int64
|
||||
var dataPoints uint64
|
||||
err := r.db.QueryRow(ctx, query, metricName).Scan(&dataPoints, &lastRecievedTimestamp)
|
||||
if err != nil {
|
||||
return 0, 0, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
return dataPoints, uint64(lastRecievedTimestamp), nil // Convert to uint64 before returning
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetTotalTimeSeriesForMetricName(ctx context.Context, metricName string) (uint64, *model.ApiError) {
|
||||
query := fmt.Sprintf(`SELECT
|
||||
count(DISTINCT fingerprint) AS timeSeriesCount
|
||||
FROM %s.%s
|
||||
WHERE metric_name = ?;`, signozMetricDBName, signozTSTableNameV41Week)
|
||||
var timeSeriesCount uint64
|
||||
err := r.db.QueryRow(ctx, query, metricName).Scan(&timeSeriesCount)
|
||||
if err != nil {
|
||||
return 0, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
return timeSeriesCount, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetAttributesForMetricName(ctx context.Context, metricName string) (*[]metrics_explorer.Attribute, *model.ApiError) {
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
kv.1 AS key,
|
||||
arrayMap(x -> trim(BOTH '\"' FROM x), groupUniqArray(kv.2)) AS values,
|
||||
length(groupUniqArray(kv.2)) AS valueCount
|
||||
FROM %s.%s
|
||||
ARRAY JOIN arrayFilter(x -> NOT startsWith(x.1, '__'), JSONExtractKeysAndValuesRaw(labels)) AS kv
|
||||
WHERE metric_name = ?
|
||||
GROUP BY kv.1
|
||||
ORDER BY valueCount DESC;
|
||||
`, signozMetricDBName, signozTSTableNameV41Week)
|
||||
|
||||
rows, err := r.db.Query(ctx, query, metricName)
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close() // Ensure the rows are closed
|
||||
|
||||
var attributesList []metrics_explorer.Attribute
|
||||
for rows.Next() {
|
||||
var key string
|
||||
var values []string
|
||||
var valueCount uint64
|
||||
|
||||
// Manually scan each value into its corresponding variable
|
||||
if err := rows.Scan(&key, &values, &valueCount); err != nil {
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
// Append the scanned values into the struct
|
||||
attributesList = append(attributesList, metrics_explorer.Attribute{
|
||||
Key: key,
|
||||
Value: values,
|
||||
ValueCount: valueCount,
|
||||
})
|
||||
}
|
||||
|
||||
// Handle any errors encountered while scanning rows
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
return &attributesList, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetActiveTimeSeriesForMetricName(ctx context.Context, metricName string, duration time.Duration) (uint64, *model.ApiError) {
|
||||
milli := time.Now().Add(-duration).UnixMilli()
|
||||
query := fmt.Sprintf("SELECT count(DISTINCT fingerprint) FROM %s.%s WHERE metric_name = '%s' and unix_milli >= ?", signozMetricDBName, signozTSTableNameV4, metricName)
|
||||
var timeSeries uint64
|
||||
// Using QueryRow instead of Select since we're only expecting a single value
|
||||
err := r.db.QueryRow(ctx, query, milli).Scan(&timeSeries)
|
||||
if err != nil {
|
||||
return 0, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
return timeSeries, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) {
|
||||
var args []interface{}
|
||||
|
||||
conditions, _ := utils.BuildFilterConditions(&req.Filters, "t")
|
||||
whereClause := ""
|
||||
if conditions != nil {
|
||||
whereClause = "AND " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
|
||||
orderByClauseFirstQuery := ""
|
||||
firstQueryLimit := req.Limit
|
||||
dataPointsOrder := false
|
||||
|
||||
if len(req.OrderBy) > 0 {
|
||||
orderPartsFirstQuery := []string{}
|
||||
for _, order := range req.OrderBy {
|
||||
if order.ColumnName == "datapoints" {
|
||||
dataPointsOrder = true
|
||||
orderPartsFirstQuery = append(orderPartsFirstQuery, fmt.Sprintf("timeSeries %s", order.Order))
|
||||
if req.Limit < 50 {
|
||||
firstQueryLimit = 50
|
||||
}
|
||||
} else {
|
||||
orderPartsFirstQuery = append(orderPartsFirstQuery, fmt.Sprintf("%s %s", order.ColumnName, order.Order))
|
||||
}
|
||||
}
|
||||
orderByClauseFirstQuery = "ORDER BY " + strings.Join(orderPartsFirstQuery, ", ")
|
||||
}
|
||||
|
||||
start, end, tsTable := utils.WhichTSTableToUse(req.StartDate, req.EndDate)
|
||||
sampleTable, countExp := utils.WhichSampleTableToUse(req.StartDate, req.EndDate)
|
||||
|
||||
metricsQuery := fmt.Sprintf(
|
||||
`SELECT
|
||||
t.metric_name AS metric_name,
|
||||
ANY_VALUE(t.description) AS description,
|
||||
ANY_VALUE(t.type) AS type,
|
||||
ANY_VALUE(t.unit),
|
||||
COUNT(DISTINCT t.fingerprint) AS timeSeries
|
||||
FROM %s.%s AS t
|
||||
WHERE unix_milli BETWEEN ? AND ?
|
||||
%s
|
||||
GROUP BY t.metric_name
|
||||
%s
|
||||
LIMIT %d OFFSET %d;`,
|
||||
signozMetricDBName, tsTable, whereClause, orderByClauseFirstQuery, firstQueryLimit, req.Offset)
|
||||
|
||||
args = append(args, start, end)
|
||||
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", 8)
|
||||
rows, err := r.db.Query(valueCtx, metricsQuery, args...)
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing metrics query", zap.Error(err))
|
||||
return &metrics_explorer.SummaryListMetricsResponse{}, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var response metrics_explorer.SummaryListMetricsResponse
|
||||
var metricNames []string
|
||||
|
||||
for rows.Next() {
|
||||
var metric metrics_explorer.MetricDetail
|
||||
if err := rows.Scan(&metric.MetricName, &metric.Description, &metric.Type, &metric.Unit, &metric.TimeSeries); err != nil {
|
||||
zap.L().Error("Error scanning metric row", zap.Error(err))
|
||||
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
metricNames = append(metricNames, metric.MetricName)
|
||||
response.Metrics = append(response.Metrics, metric)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
zap.L().Error("Error iterating over metric rows", zap.Error(err))
|
||||
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
if len(metricNames) == 0 {
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
metricsList := "'" + strings.Join(metricNames, "', '") + "'"
|
||||
if dataPointsOrder {
|
||||
orderByClauseFirstQuery = fmt.Sprintf("ORDER BY s.samples %s", req.OrderBy[0].Order)
|
||||
} else {
|
||||
orderByClauseFirstQuery = ""
|
||||
}
|
||||
|
||||
sampleQuery := fmt.Sprintf(
|
||||
`SELECT
|
||||
s.samples,
|
||||
s.metric_name,
|
||||
s.unix_milli AS lastReceived
|
||||
FROM (
|
||||
SELECT
|
||||
metric_name,
|
||||
%s AS samples,
|
||||
max(unix_milli) as unix_milli
|
||||
FROM %s.%s
|
||||
WHERE fingerprint IN (
|
||||
SELECT fingerprint
|
||||
FROM %s.%s
|
||||
WHERE unix_milli BETWEEN ? AND ?
|
||||
%s
|
||||
AND metric_name IN (%s)
|
||||
GROUP BY fingerprint
|
||||
)
|
||||
AND metric_name in (%s)
|
||||
GROUP BY metric_name
|
||||
) AS s
|
||||
%s
|
||||
LIMIT %d OFFSET %d;`,
|
||||
countExp, signozMetricDBName, sampleTable, signozMetricDBName, tsTable,
|
||||
whereClause, metricsList, metricsList, orderByClauseFirstQuery,
|
||||
req.Limit, req.Offset)
|
||||
|
||||
args = append(args, start, end)
|
||||
rows, err = r.db.Query(valueCtx, sampleQuery, args...)
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing samples query", zap.Error(err))
|
||||
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
samplesMap := make(map[string]uint64)
|
||||
lastReceivedMap := make(map[string]int64)
|
||||
|
||||
for rows.Next() {
|
||||
var samples uint64
|
||||
var metricName string
|
||||
var lastReceived int64
|
||||
if err := rows.Scan(&samples, &metricName, &lastReceived); err != nil {
|
||||
zap.L().Error("Error scanning sample row", zap.Error(err))
|
||||
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
samplesMap[metricName] = samples
|
||||
lastReceivedMap[metricName] = lastReceived
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
zap.L().Error("Error iterating over sample rows", zap.Error(err))
|
||||
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
var filteredMetrics []metrics_explorer.MetricDetail
|
||||
for i := range response.Metrics {
|
||||
if samples, exists := samplesMap[response.Metrics[i].MetricName]; exists {
|
||||
response.Metrics[i].DataPoints = samples
|
||||
if lastReceived, exists := lastReceivedMap[response.Metrics[i].MetricName]; exists {
|
||||
response.Metrics[i].LastReceived = lastReceived
|
||||
}
|
||||
filteredMetrics = append(filteredMetrics, response.Metrics[i])
|
||||
}
|
||||
}
|
||||
response.Metrics = filteredMetrics
|
||||
|
||||
if dataPointsOrder {
|
||||
sort.Slice(response.Metrics, func(i, j int) bool {
|
||||
return response.Metrics[i].DataPoints > response.Metrics[j].DataPoints
|
||||
})
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetMetricsTimeSeriesPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError) {
|
||||
var args []interface{}
|
||||
|
||||
// Build filters dynamically
|
||||
conditions, _ := utils.BuildFilterConditions(&req.Filters, "")
|
||||
whereClause := ""
|
||||
if len(conditions) > 0 {
|
||||
whereClause = "AND " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
start, end, tsTable := utils.WhichTSTableToUse(req.StartDate, req.EndDate)
|
||||
|
||||
// Construct the query without backticks
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
metric_name,
|
||||
total_value,
|
||||
(total_value * 100.0 / total_time_series) AS percentage
|
||||
FROM (
|
||||
SELECT
|
||||
metric_name,
|
||||
uniqExact(fingerprint) AS total_value,
|
||||
(SELECT uniqExact(fingerprint)
|
||||
FROM %s.%s
|
||||
WHERE unix_milli BETWEEN ? AND ?) AS total_time_series
|
||||
FROM %s.%s
|
||||
WHERE unix_milli BETWEEN ? AND ? %s
|
||||
GROUP BY metric_name
|
||||
)
|
||||
ORDER BY percentage DESC
|
||||
LIMIT %d;`,
|
||||
signozMetricDBName,
|
||||
tsTable,
|
||||
signozMetricDBName,
|
||||
tsTable,
|
||||
whereClause,
|
||||
req.Limit,
|
||||
)
|
||||
|
||||
args = append(args,
|
||||
start, end, // For total_cardinality subquery
|
||||
start, end, // For main query
|
||||
)
|
||||
|
||||
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", 8)
|
||||
rows, err := r.db.Query(valueCtx, query, args...)
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing cardinality query", zap.Error(err), zap.String("query", query))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var heatmap []metrics_explorer.TreeMapResponseItem
|
||||
for rows.Next() {
|
||||
var item metrics_explorer.TreeMapResponseItem
|
||||
if err := rows.Scan(&item.MetricName, &item.TotalValue, &item.Percentage); err != nil {
|
||||
zap.L().Error("Error scanning row", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
heatmap = append(heatmap, item)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
zap.L().Error("Error iterating over rows", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
return &heatmap, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError) {
|
||||
var args []interface{}
|
||||
|
||||
// Build the filter conditions
|
||||
conditions, _ := utils.BuildFilterConditions(&req.Filters, "t")
|
||||
whereClause := ""
|
||||
if conditions != nil {
|
||||
whereClause = "AND " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
|
||||
// Determine time range and tables to use
|
||||
start, end, tsTable := utils.WhichTSTableToUse(req.StartDate, req.EndDate)
|
||||
sampleTable, countExp := utils.WhichSampleTableToUse(req.StartDate, req.EndDate)
|
||||
|
||||
// Construct the metrics query
|
||||
queryLimit := 50 + req.Limit
|
||||
metricsQuery := fmt.Sprintf(
|
||||
`SELECT
|
||||
t.metric_name AS metric_name,
|
||||
COUNT(DISTINCT t.fingerprint) AS timeSeries
|
||||
FROM %s.%s AS t
|
||||
WHERE unix_milli BETWEEN ? AND ?
|
||||
%s
|
||||
GROUP BY t.metric_name
|
||||
ORDER BY timeSeries DESC
|
||||
LIMIT %d;`,
|
||||
signozMetricDBName, tsTable, whereClause, queryLimit,
|
||||
)
|
||||
|
||||
args = append(args, start, end)
|
||||
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", 8)
|
||||
|
||||
// Execute the metrics query
|
||||
rows, err := r.db.Query(valueCtx, metricsQuery, args...)
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing metrics query", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Process the query results
|
||||
var metricNames []string
|
||||
for rows.Next() {
|
||||
var metricName string
|
||||
var timeSeries uint64
|
||||
if err := rows.Scan(&metricName, &timeSeries); err != nil {
|
||||
zap.L().Error("Error scanning metric row", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
metricNames = append(metricNames, metricName)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
zap.L().Error("Error iterating over metric rows", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
// If no metrics found, return early
|
||||
if len(metricNames) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Format metric names for query
|
||||
metricsList := "'" + strings.Join(metricNames, "', '") + "'"
|
||||
|
||||
// Construct the sample percentage query
|
||||
sampleQuery := fmt.Sprintf(
|
||||
`WITH TotalSamples AS (
|
||||
SELECT %s AS total_samples
|
||||
FROM %s.%s
|
||||
WHERE unix_milli BETWEEN ? AND ?
|
||||
)
|
||||
SELECT
|
||||
s.samples,
|
||||
s.metric_name,
|
||||
COALESCE((s.samples * 100.0 / t.total_samples), 0) AS percentage
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
metric_name,
|
||||
%s AS samples
|
||||
FROM %s.%s
|
||||
WHERE fingerprint IN
|
||||
(
|
||||
SELECT fingerprint
|
||||
FROM %s.%s
|
||||
WHERE unix_milli BETWEEN ? AND ?
|
||||
%s
|
||||
AND metric_name IN (%s)
|
||||
GROUP BY fingerprint
|
||||
)
|
||||
AND metric_name IN (%s)
|
||||
GROUP BY metric_name
|
||||
) AS s
|
||||
JOIN TotalSamples t ON 1 = 1
|
||||
ORDER BY percentage DESC
|
||||
LIMIT %d;`,
|
||||
countExp, signozMetricDBName, sampleTable, // Total samples
|
||||
countExp, signozMetricDBName, sampleTable, // Inner select samples
|
||||
signozMetricDBName, tsTable, whereClause, metricsList, // Subquery conditions
|
||||
metricsList, req.Limit, // Final conditions
|
||||
)
|
||||
|
||||
args = append(args, start, end)
|
||||
|
||||
// Execute the sample percentage query
|
||||
rows, err = r.db.Query(valueCtx, sampleQuery, args...)
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing samples query", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Process the results into a response slice
|
||||
var heatmap []metrics_explorer.TreeMapResponseItem
|
||||
for rows.Next() {
|
||||
var item metrics_explorer.TreeMapResponseItem
|
||||
if err := rows.Scan(&item.TotalValue, &item.MetricName, &item.Percentage); err != nil {
|
||||
zap.L().Error("Error scanning row", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
heatmap = append(heatmap, item)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
zap.L().Error("Error iterating over sample rows", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
return &heatmap, nil
|
||||
}
|
||||
|
||||
@@ -65,6 +65,10 @@ func (c clickhouseConnWrapper) addClickHouseSettings(ctx context.Context, query
|
||||
settings["optimize_read_in_order"] = 0
|
||||
}
|
||||
|
||||
if ctx.Value("clickhouse_max_threads") != nil {
|
||||
settings["max_threads"] = ctx.Value("clickhouse_max_threads")
|
||||
}
|
||||
|
||||
ctx = clickhouse.Context(ctx, clickhouse.WithSettings(settings))
|
||||
return ctx
|
||||
}
|
||||
|
||||
@@ -656,3 +656,87 @@ func countPanelsInDashboard(inputData map[string]interface{}) model.DashboardsIn
|
||||
LogsPanelsWithAttrContainsOp: logsPanelsWithAttrContains,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDashboardsWithMetricName(ctx context.Context, metricName string) ([]map[string]string, *model.ApiError) {
|
||||
// Get all dashboards first
|
||||
query := `SELECT uuid, data FROM dashboards`
|
||||
|
||||
type dashboardRow struct {
|
||||
Uuid string `db:"uuid"`
|
||||
Data json.RawMessage `db:"data"`
|
||||
}
|
||||
|
||||
var dashboards []dashboardRow
|
||||
err := db.Select(&dashboards, query)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in getting dashboards", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
|
||||
}
|
||||
|
||||
// Process the JSON data in Go
|
||||
var result []map[string]string
|
||||
for _, dashboard := range dashboards {
|
||||
var dashData map[string]interface{}
|
||||
if err := json.Unmarshal(dashboard.Data, &dashData); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
dashTitle, _ := dashData["title"].(string)
|
||||
widgets, ok := dashData["widgets"].([]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, w := range widgets {
|
||||
widget, ok := w.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
widgetTitle, _ := widget["title"].(string)
|
||||
widgetID, _ := widget["id"].(string)
|
||||
|
||||
query, ok := widget["query"].(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
builder, ok := query["builder"].(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
queryData, ok := builder["queryData"].([]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, qd := range queryData {
|
||||
data, ok := qd.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if dataSource, ok := data["dataSource"].(string); !ok || dataSource != "metrics" {
|
||||
continue
|
||||
}
|
||||
|
||||
aggregateAttr, ok := data["aggregateAttribute"].(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if key, ok := aggregateAttr["key"].(string); ok && strings.TrimSpace(key) == metricName {
|
||||
result = append(result, map[string]string{
|
||||
"dashboard_id": dashboard.Uuid,
|
||||
"widget_title": widgetTitle,
|
||||
"widget_id": widgetID,
|
||||
"dashboard_title": dashTitle,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -125,6 +126,10 @@ type APIHandler struct {
|
||||
daemonsetsRepo *inframetrics.DaemonSetsRepo
|
||||
statefulsetsRepo *inframetrics.StatefulSetsRepo
|
||||
jobsRepo *inframetrics.JobsRepo
|
||||
|
||||
SummaryService *metricsexplorer.SummaryService
|
||||
|
||||
pvcsRepo *inframetrics.PvcsRepo
|
||||
}
|
||||
|
||||
type APIHandlerOpts struct {
|
||||
@@ -208,6 +213,9 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
||||
daemonsetsRepo := inframetrics.NewDaemonSetsRepo(opts.Reader, querierv2)
|
||||
statefulsetsRepo := inframetrics.NewStatefulSetsRepo(opts.Reader, querierv2)
|
||||
jobsRepo := inframetrics.NewJobsRepo(opts.Reader, querierv2)
|
||||
pvcsRepo := inframetrics.NewPvcsRepo(opts.Reader, querierv2)
|
||||
//explorerCache := metricsexplorer.NewExplorerCache(metricsexplorer.WithCache(opts.Cache))
|
||||
summaryService := metricsexplorer.NewSummaryService(opts.Reader, querierv2)
|
||||
|
||||
aH := &APIHandler{
|
||||
reader: opts.Reader,
|
||||
@@ -237,6 +245,8 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
||||
daemonsetsRepo: daemonsetsRepo,
|
||||
statefulsetsRepo: statefulsetsRepo,
|
||||
jobsRepo: jobsRepo,
|
||||
pvcsRepo: pvcsRepo,
|
||||
SummaryService: summaryService,
|
||||
}
|
||||
|
||||
logsQueryBuilder := logsv3.PrepareLogsQuery
|
||||
@@ -408,6 +418,11 @@ func (aH *APIHandler) RegisterInfraMetricsRoutes(router *mux.Router, am *AuthMid
|
||||
podsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getPodAttributeValues)).Methods(http.MethodGet)
|
||||
podsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getPodList)).Methods(http.MethodPost)
|
||||
|
||||
pvcsSubRouter := router.PathPrefix("/api/v1/pvcs").Subrouter()
|
||||
pvcsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getPvcAttributeKeys)).Methods(http.MethodGet)
|
||||
pvcsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getPvcAttributeValues)).Methods(http.MethodGet)
|
||||
pvcsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getPvcList)).Methods(http.MethodPost)
|
||||
|
||||
nodesSubRouter := router.PathPrefix("/api/v1/nodes").Subrouter()
|
||||
nodesSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getNodeAttributeKeys)).Methods(http.MethodGet)
|
||||
nodesSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getNodeAttributeValues)).Methods(http.MethodGet)
|
||||
@@ -527,6 +542,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/settings/ingestion_key", am.AdminAccess(aH.insertIngestionKey)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/settings/ingestion_key", am.ViewAccess(aH.getIngestionKeys)).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v2/traces/fields", am.ViewAccess(aH.traceFields)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v2/traces/fields", am.EditAccess(aH.updateTraceField)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(aH.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/featureFlags", am.OpenAccess(aH.getFeatureFlags)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/configs", am.OpenAccess(aH.getConfigs)).Methods(http.MethodGet)
|
||||
@@ -587,6 +605,24 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/changePassword/{id}", am.SelfAccess(aH.changePassword)).Methods(http.MethodPost)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) MetricExplorerRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/metrics/filters/keys",
|
||||
am.ViewAccess(ah.FilterKeysSuggestion)).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/metrics/filters/values",
|
||||
am.ViewAccess(ah.FilterValuesSuggestion)).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/metrics/{metric_name}/metadata",
|
||||
am.ViewAccess(ah.GetMetricsDetails)).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/metrics",
|
||||
am.ViewAccess(ah.ListMetrics)).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/metrics/treemap",
|
||||
am.ViewAccess(ah.GetTreeMap)).
|
||||
Methods(http.MethodPost)
|
||||
}
|
||||
|
||||
func Intersection(a, b []int) (c []int) {
|
||||
m := make(map[int]bool)
|
||||
|
||||
@@ -4075,10 +4111,9 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
||||
zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines")
|
||||
}
|
||||
|
||||
for _, p := range postable {
|
||||
if err := p.IsValid(); err != nil {
|
||||
return nil, model.BadRequestStr(err.Error())
|
||||
}
|
||||
validationErr := aH.LogsParsingPipelineController.ValidatePipelines(ctx, postable)
|
||||
if validationErr != nil {
|
||||
return nil, validationErr
|
||||
}
|
||||
|
||||
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, postable)
|
||||
@@ -4893,3 +4928,35 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
aH.queryRangeV4(r.Context(), queryRangeParams, w, r)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) traceFields(w http.ResponseWriter, r *http.Request) {
|
||||
fields, apiErr := aH.reader.GetTraceFields(r.Context())
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "failed to fetch fields from the db")
|
||||
return
|
||||
}
|
||||
aH.WriteJSON(w, r, fields)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) updateTraceField(w http.ResponseWriter, r *http.Request) {
|
||||
field := model.UpdateField{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&field); err != nil {
|
||||
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
RespondError(w, apiErr, "failed to decode payload")
|
||||
return
|
||||
}
|
||||
|
||||
err := logs.ValidateUpdateFieldPayloadV2(&field)
|
||||
if err != nil {
|
||||
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
RespondError(w, apiErr, "incorrect payload")
|
||||
return
|
||||
}
|
||||
|
||||
apiErr := aH.reader.UpdateTraceField(r.Context(), &field)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "failed to update field in the db")
|
||||
return
|
||||
}
|
||||
aH.WriteJSON(w, r, field)
|
||||
}
|
||||
|
||||
@@ -544,3 +544,56 @@ func (aH *APIHandler) getJobList(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
aH.Respond(w, jobList)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getPvcList(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
req := model.VolumeListRequest{}
|
||||
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
pvcList, err := aH.pvcsRepo.GetPvcList(ctx, req)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, pvcList)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getPvcAttributeKeys(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
req, err := parseFilterAttributeKeyRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
keys, err := aH.pvcsRepo.GetPvcAttributeKeys(ctx, *req)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, keys)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getPvcAttributeValues(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
req, err := parseFilterAttributeValueRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
values, err := aH.pvcsRepo.GetPvcAttributeValues(ctx, *req)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, values)
|
||||
}
|
||||
|
||||
@@ -89,6 +89,10 @@ func getParamsForTopJobs(req model.JobListRequest) (int64, string, string) {
|
||||
return getParamsForTopItems(req.Start, req.End)
|
||||
}
|
||||
|
||||
func getParamsForTopVolumes(req model.VolumeListRequest) (int64, string, string) {
|
||||
return getParamsForTopItems(req.Start, req.End)
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): remove this
|
||||
// What is happening here?
|
||||
// The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint
|
||||
|
||||
@@ -23,10 +23,11 @@ var (
|
||||
}
|
||||
|
||||
queryNamesForNamespaces = map[string][]string{
|
||||
"cpu": {"A"},
|
||||
"memory": {"D"},
|
||||
"cpu": {"A"},
|
||||
"memory": {"D"},
|
||||
"pod_phase": {"H", "I", "J", "K"},
|
||||
}
|
||||
namespaceQueryNames = []string{"A", "D"}
|
||||
namespaceQueryNames = []string{"A", "D", "H", "I", "J", "K"}
|
||||
|
||||
attributesKeysForNamespaces = []v3.AttributeKey{
|
||||
{Key: "k8s_namespace_name"},
|
||||
@@ -307,6 +308,19 @@ func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.Namespa
|
||||
record.MemoryUsage = memory
|
||||
}
|
||||
|
||||
if pending, ok := row.Data["H"].(float64); ok {
|
||||
record.CountByPhase.Pending = int(pending)
|
||||
}
|
||||
if running, ok := row.Data["I"].(float64); ok {
|
||||
record.CountByPhase.Running = int(running)
|
||||
}
|
||||
if succeeded, ok := row.Data["J"].(float64); ok {
|
||||
record.CountByPhase.Succeeded = int(succeeded)
|
||||
}
|
||||
if failed, ok := row.Data["K"].(float64); ok {
|
||||
record.CountByPhase.Failed = int(failed)
|
||||
}
|
||||
|
||||
record.Meta = map[string]string{}
|
||||
if _, ok := namespaceAttrs[record.NamespaceName]; ok {
|
||||
record.Meta = namespaceAttrs[record.NamespaceName]
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
var (
|
||||
metricToUseForNodes = "k8s_node_cpu_utilization"
|
||||
|
||||
nodeAttrsToEnrich = []string{"k8s_node_name", "k8s_node_uid"}
|
||||
nodeAttrsToEnrich = []string{"k8s_node_name", "k8s_node_uid", "k8s_cluster_name"}
|
||||
|
||||
k8sNodeUIDAttrKey = "k8s_node_uid"
|
||||
|
||||
@@ -27,13 +27,14 @@ var (
|
||||
"memory": {"C"},
|
||||
"memory_allocatable": {"D"},
|
||||
}
|
||||
nodeQueryNames = []string{"A", "B", "C", "D"}
|
||||
nodeQueryNames = []string{"A", "B", "C", "D", "E", "F"}
|
||||
|
||||
metricNamesForNodes = map[string]string{
|
||||
"cpu": "k8s_node_cpu_utilization",
|
||||
"cpu_allocatable": "k8s_node_allocatable_cpu",
|
||||
"memory": "k8s_node_memory_usage",
|
||||
"memory_allocatable": "k8s_node_allocatable_memory",
|
||||
"node_condition": "k8s_node_condition_ready",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -325,6 +326,14 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
|
||||
record.NodeMemoryAllocatable = memory
|
||||
}
|
||||
|
||||
if ready, ok := row.Data["E"].(float64); ok {
|
||||
record.CountByCondition.Ready = int(ready)
|
||||
}
|
||||
|
||||
if notReady, ok := row.Data["F"].(float64); ok {
|
||||
record.CountByCondition.NotReady = int(notReady)
|
||||
}
|
||||
|
||||
record.Meta = map[string]string{}
|
||||
if _, ok := nodeAttrs[record.NodeUID]; ok {
|
||||
record.Meta = nodeAttrs[record.NodeUID]
|
||||
|
||||
@@ -109,6 +109,74 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
// node conditions - Ready
|
||||
"E": {
|
||||
QueryName: "E",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForNodes["node_condition"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "__value",
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "E",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAnyLast,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
// node conditions - NotReady
|
||||
"F": {
|
||||
QueryName: "F",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForNodes["node_condition"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "__value",
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "F",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAnyLast,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
|
||||
@@ -27,6 +27,7 @@ var (
|
||||
"k8s_daemonset_name",
|
||||
"k8s_job_name",
|
||||
"k8s_cronjob_name",
|
||||
"k8s_cluster_name",
|
||||
}
|
||||
|
||||
k8sPodUIDAttrKey = "k8s_pod_uid"
|
||||
@@ -39,8 +40,9 @@ var (
|
||||
"memory_request": {"E", "D"},
|
||||
"memory_limit": {"F", "D"},
|
||||
"restarts": {"G", "A"},
|
||||
"pod_phase": {"H", "I", "J", "K"},
|
||||
}
|
||||
podQueryNames = []string{"A", "B", "C", "D", "E", "F", "G"}
|
||||
podQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"}
|
||||
|
||||
metricNamesForPods = map[string]string{
|
||||
"cpu": "k8s_pod_cpu_utilization",
|
||||
@@ -50,6 +52,7 @@ var (
|
||||
"memory_request": "k8s_pod_memory_request_utilization",
|
||||
"memory_limit": "k8s_pod_memory_limit_utilization",
|
||||
"restarts": "k8s_container_restarts",
|
||||
"pod_phase": "k8s_pod_phase",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -365,6 +368,22 @@ func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (mo
|
||||
record.RestartCount = int(restarts)
|
||||
}
|
||||
|
||||
if pending, ok := row.Data["H"].(float64); ok {
|
||||
record.CountByPhase.Pending = int(pending)
|
||||
}
|
||||
|
||||
if running, ok := row.Data["I"].(float64); ok {
|
||||
record.CountByPhase.Running = int(running)
|
||||
}
|
||||
|
||||
if succeeded, ok := row.Data["J"].(float64); ok {
|
||||
record.CountByPhase.Succeeded = int(succeeded)
|
||||
}
|
||||
|
||||
if failed, ok := row.Data["K"].(float64); ok {
|
||||
record.CountByPhase.Failed = int(failed)
|
||||
}
|
||||
|
||||
record.Meta = map[string]string{}
|
||||
if _, ok := podAttrs[record.PodUID]; ok {
|
||||
record.Meta = podAttrs[record.PodUID]
|
||||
|
||||
@@ -54,7 +54,7 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
|
||||
Expression: "B",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod cpu limit utilization
|
||||
@@ -80,7 +80,7 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
|
||||
Expression: "C",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod memory utilization
|
||||
@@ -132,7 +132,7 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
|
||||
Expression: "E",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod memory limit utilization
|
||||
@@ -158,7 +158,7 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
|
||||
Expression: "F",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
Disabled: false,
|
||||
},
|
||||
"G": {
|
||||
@@ -187,6 +187,142 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
|
||||
Functions: []v3.Function{{Name: v3.FunctionNameRunningDiff}},
|
||||
Disabled: false,
|
||||
},
|
||||
// pod phase pending
|
||||
"H": {
|
||||
QueryName: "H",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForPods["pod_phase"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "__value",
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPodUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "H",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAnyLast,
|
||||
SpaceAggregation: v3.SpaceAggregationCount,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod phase running
|
||||
"I": {
|
||||
QueryName: "I",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForPods["pod_phase"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "__value",
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPodUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "I",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAnyLast,
|
||||
SpaceAggregation: v3.SpaceAggregationCount,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod phase succeeded
|
||||
"J": {
|
||||
QueryName: "J",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForPods["pod_phase"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "__value",
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPodUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "J",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAnyLast,
|
||||
SpaceAggregation: v3.SpaceAggregationCount,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod phase failed
|
||||
"K": {
|
||||
QueryName: "K",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForPods["pod_phase"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "__value",
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPodUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "K",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAnyLast,
|
||||
SpaceAggregation: v3.SpaceAggregationCount,
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
|
||||
378
pkg/query-service/app/inframetrics/pvcs.go
Normal file
378
pkg/query-service/app/inframetrics/pvcs.go
Normal file
@@ -0,0 +1,378 @@
|
||||
package inframetrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/postprocess"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var (
|
||||
metricToUseForVolumes = "k8s_volume_available"
|
||||
|
||||
volumeAttrsToEnrich = []string{
|
||||
"k8s_pod_uid",
|
||||
"k8s_pod_name",
|
||||
"k8s_namespace_name",
|
||||
"k8s_node_name",
|
||||
"k8s_statefulset_name",
|
||||
"k8s_cluster_name",
|
||||
"k8s_persistentvolumeclaim_name",
|
||||
}
|
||||
|
||||
k8sPersistentVolumeClaimNameAttrKey = "k8s_persistentvolumeclaim_name"
|
||||
|
||||
queryNamesForVolumes = map[string][]string{
|
||||
"available": {"A"},
|
||||
"capacity": {"B", "A"},
|
||||
"usage": {"F1", "B", "A"},
|
||||
"inodes": {"C", "A"},
|
||||
"inodes_free": {"D", "A"},
|
||||
"inodes_used": {"E", "A"},
|
||||
}
|
||||
|
||||
volumeQueryNames = []string{"A", "B", "C", "D", "E", "F1"}
|
||||
|
||||
metricNamesForVolumes = map[string]string{
|
||||
"available": "k8s_volume_available",
|
||||
"capacity": "k8s_volume_capacity",
|
||||
"inodes": "k8s_volume_inodes",
|
||||
"inodes_free": "k8s_volume_inodes_free",
|
||||
"inodes_used": "k8s_volume_inodes_used",
|
||||
}
|
||||
)
|
||||
|
||||
type PvcsRepo struct {
|
||||
reader interfaces.Reader
|
||||
querierV2 interfaces.Querier
|
||||
}
|
||||
|
||||
func NewPvcsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *PvcsRepo {
|
||||
return &PvcsRepo{reader: reader, querierV2: querierV2}
|
||||
}
|
||||
|
||||
func (p *PvcsRepo) GetPvcAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
|
||||
req.DataSource = v3.DataSourceMetrics
|
||||
req.AggregateAttribute = metricToUseForVolumes
|
||||
if req.Limit == 0 {
|
||||
req.Limit = 50
|
||||
}
|
||||
|
||||
attributeKeysResponse, err := p.reader.GetMetricAttributeKeys(ctx, &req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): only return resource attributes when we have a way to
|
||||
// distinguish between resource attributes and other attributes.
|
||||
filteredKeys := []v3.AttributeKey{}
|
||||
for _, key := range attributeKeysResponse.AttributeKeys {
|
||||
if slices.Contains(pointAttrsToIgnore, key.Key) {
|
||||
continue
|
||||
}
|
||||
filteredKeys = append(filteredKeys, key)
|
||||
}
|
||||
|
||||
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
|
||||
}
|
||||
|
||||
func (p *PvcsRepo) GetPvcAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
|
||||
req.DataSource = v3.DataSourceMetrics
|
||||
req.AggregateAttribute = metricToUseForVolumes
|
||||
if req.Limit == 0 {
|
||||
req.Limit = 50
|
||||
}
|
||||
|
||||
attributeValuesResponse, err := p.reader.GetMetricAttributeValues(ctx, &req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attributeValuesResponse, nil
|
||||
}
|
||||
|
||||
func (p *PvcsRepo) getMetadataAttributes(ctx context.Context, req model.VolumeListRequest) (map[string]map[string]string, error) {
|
||||
volumeAttrs := map[string]map[string]string{}
|
||||
|
||||
for _, key := range volumeAttrsToEnrich {
|
||||
hasKey := false
|
||||
for _, groupByKey := range req.GroupBy {
|
||||
if groupByKey.Key == key {
|
||||
hasKey = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasKey {
|
||||
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
|
||||
}
|
||||
}
|
||||
|
||||
mq := v3.BuilderQuery{
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricToUseForVolumes,
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
GroupBy: req.GroupBy,
|
||||
}
|
||||
|
||||
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query = localQueryToDistributedQuery(query)
|
||||
|
||||
attrsListResponse, err := p.reader.GetListResultV3(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, row := range attrsListResponse {
|
||||
stringData := map[string]string{}
|
||||
for key, value := range row.Data {
|
||||
if str, ok := value.(string); ok {
|
||||
stringData[key] = str
|
||||
} else if strPtr, ok := value.(*string); ok {
|
||||
stringData[key] = *strPtr
|
||||
}
|
||||
}
|
||||
|
||||
volumeName := stringData[k8sPersistentVolumeClaimNameAttrKey]
|
||||
if _, ok := volumeAttrs[volumeName]; !ok {
|
||||
volumeAttrs[volumeName] = map[string]string{}
|
||||
}
|
||||
|
||||
for _, key := range req.GroupBy {
|
||||
volumeAttrs[volumeName][key.Key] = stringData[key.Key]
|
||||
}
|
||||
}
|
||||
|
||||
return volumeAttrs, nil
|
||||
}
|
||||
|
||||
func (p *PvcsRepo) getTopVolumeGroups(ctx context.Context, req model.VolumeListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
|
||||
step, timeSeriesTableName, samplesTableName := getParamsForTopVolumes(req)
|
||||
|
||||
queryNames := queryNamesForVolumes[req.OrderBy.ColumnName]
|
||||
topVolumeGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
|
||||
Start: req.Start,
|
||||
End: req.End,
|
||||
Step: step,
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{},
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeTable,
|
||||
},
|
||||
}
|
||||
|
||||
for _, queryName := range queryNames {
|
||||
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
|
||||
query.StepInterval = step
|
||||
query.MetricTableHints = &v3.MetricTableHints{
|
||||
TimeSeriesTableName: timeSeriesTableName,
|
||||
SamplesTableName: samplesTableName,
|
||||
}
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
if query.Filters == nil {
|
||||
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
|
||||
}
|
||||
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
|
||||
}
|
||||
topVolumeGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
|
||||
}
|
||||
|
||||
queryResponse, _, err := p.querierV2.QueryRange(ctx, topVolumeGroupsQueryRangeParams)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topVolumeGroupsQueryRangeParams)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
if req.OrderBy.Order == v3.DirectionDesc {
|
||||
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
|
||||
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
|
||||
})
|
||||
} else {
|
||||
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
|
||||
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
|
||||
})
|
||||
}
|
||||
|
||||
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
|
||||
|
||||
paginatedTopVolumeGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
|
||||
|
||||
topVolumeGroups := []map[string]string{}
|
||||
for _, series := range paginatedTopVolumeGroupsSeries {
|
||||
topVolumeGroups = append(topVolumeGroups, series.Labels)
|
||||
}
|
||||
allVolumeGroups := []map[string]string{}
|
||||
for _, series := range formattedResponse[0].Series {
|
||||
allVolumeGroups = append(allVolumeGroups, series.Labels)
|
||||
}
|
||||
|
||||
return topVolumeGroups, allVolumeGroups, nil
|
||||
}
|
||||
|
||||
func (p *PvcsRepo) GetPvcList(ctx context.Context, req model.VolumeListRequest) (model.VolumeListResponse, error) {
|
||||
resp := model.VolumeListResponse{}
|
||||
|
||||
if req.Limit == 0 {
|
||||
req.Limit = 10
|
||||
}
|
||||
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &v3.OrderBy{ColumnName: "usage", Order: v3.DirectionDesc}
|
||||
}
|
||||
|
||||
if req.GroupBy == nil {
|
||||
req.GroupBy = []v3.AttributeKey{{Key: k8sPersistentVolumeClaimNameAttrKey}}
|
||||
resp.Type = model.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = model.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
|
||||
|
||||
query := PvcsTableListQuery.Clone()
|
||||
|
||||
query.Start = req.Start
|
||||
query.End = req.End
|
||||
query.Step = step
|
||||
|
||||
for _, query := range query.CompositeQuery.BuilderQueries {
|
||||
query.StepInterval = step
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
if query.Filters == nil {
|
||||
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
|
||||
}
|
||||
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
|
||||
}
|
||||
query.GroupBy = req.GroupBy
|
||||
}
|
||||
|
||||
volumeAttrs, err := p.getMetadataAttributes(ctx, req)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
topVolumeGroups, allVolumeGroups, err := p.getTopVolumeGroups(ctx, req, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
groupFilters := map[string][]string{}
|
||||
for _, topVolumeGroup := range topVolumeGroups {
|
||||
for k, v := range topVolumeGroup {
|
||||
groupFilters[k] = append(groupFilters[k], v)
|
||||
}
|
||||
}
|
||||
|
||||
for groupKey, groupValues := range groupFilters {
|
||||
hasGroupFilter := false
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
for _, filter := range req.Filters.Items {
|
||||
if filter.Key.Key == groupKey {
|
||||
hasGroupFilter = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasGroupFilter {
|
||||
for _, query := range query.CompositeQuery.BuilderQueries {
|
||||
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
|
||||
Key: v3.AttributeKey{Key: groupKey},
|
||||
Value: groupValues,
|
||||
Operator: v3.FilterOperatorIn,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
queryResponse, _, err := p.querierV2.QueryRange(ctx, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
records := []model.VolumeListRecord{}
|
||||
|
||||
for _, result := range formattedResponse {
|
||||
for _, row := range result.Table.Rows {
|
||||
|
||||
record := model.VolumeListRecord{
|
||||
VolumeUsage: -1,
|
||||
VolumeAvailable: -1,
|
||||
VolumeCapacity: -1,
|
||||
VolumeInodes: -1,
|
||||
VolumeInodesFree: -1,
|
||||
VolumeInodesUsed: -1,
|
||||
Meta: map[string]string{},
|
||||
}
|
||||
|
||||
if volumeName, ok := row.Data[k8sPersistentVolumeClaimNameAttrKey].(string); ok {
|
||||
record.PersistentVolumeClaimName = volumeName
|
||||
}
|
||||
|
||||
if volumeAvailable, ok := row.Data["A"].(float64); ok {
|
||||
record.VolumeAvailable = volumeAvailable
|
||||
}
|
||||
if volumeCapacity, ok := row.Data["B"].(float64); ok {
|
||||
record.VolumeCapacity = volumeCapacity
|
||||
}
|
||||
|
||||
if volumeInodes, ok := row.Data["C"].(float64); ok {
|
||||
record.VolumeInodes = volumeInodes
|
||||
}
|
||||
|
||||
if volumeInodesFree, ok := row.Data["D"].(float64); ok {
|
||||
record.VolumeInodesFree = volumeInodesFree
|
||||
}
|
||||
|
||||
if volumeInodesUsed, ok := row.Data["E"].(float64); ok {
|
||||
record.VolumeInodesUsed = volumeInodesUsed
|
||||
}
|
||||
|
||||
record.VolumeUsage = record.VolumeCapacity - record.VolumeAvailable
|
||||
|
||||
record.Meta = map[string]string{}
|
||||
if _, ok := volumeAttrs[record.PersistentVolumeClaimName]; ok {
|
||||
record.Meta = volumeAttrs[record.PersistentVolumeClaimName]
|
||||
}
|
||||
|
||||
for k, v := range row.Data {
|
||||
if slices.Contains(volumeQueryNames, k) {
|
||||
continue
|
||||
}
|
||||
if labelValue, ok := v.(string); ok {
|
||||
record.Meta[k] = labelValue
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, record)
|
||||
}
|
||||
}
|
||||
resp.Total = len(allVolumeGroups)
|
||||
resp.Records = records
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
204
pkg/query-service/app/inframetrics/pvcs_query.go
Normal file
204
pkg/query-service/app/inframetrics/pvcs_query.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package inframetrics
|
||||
|
||||
import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
var PvcsTableListQuery = v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
// k8s.volume.available
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForVolumes["available"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotEqual,
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
// k8s.volume.capacity
|
||||
"B": {
|
||||
QueryName: "B",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForVolumes["capacity"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotEqual,
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "B",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
"F1": {
|
||||
QueryName: "F1",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
Expression: "B - A",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
},
|
||||
// k8s.volume.inodes
|
||||
"C": {
|
||||
QueryName: "C",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForVolumes["inodes"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotEqual,
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "C",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
// k8s.volume.inodes_free
|
||||
"D": {
|
||||
QueryName: "D",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForVolumes["inodes_free"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotEqual,
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "D",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
// k8s.volume.inodes_used
|
||||
"E": {
|
||||
QueryName: "E",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForVolumes["inodes_used"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotEqual,
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sPersistentVolumeClaimNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "E",
|
||||
ReduceTo: v3.ReduceToOperatorLast,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
},
|
||||
Version: "v4",
|
||||
FormatForWeb: true,
|
||||
}
|
||||
@@ -4,13 +4,13 @@ import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
var (
|
||||
metricNamesForWorkloads = map[string]string{
|
||||
"cpu": "k8s_pod_cpu_utilization",
|
||||
"cpu_req": "k8s_pod_cpu_request_utilization",
|
||||
"cpu_limit": "k8s_pod_cpu_limit_utilization",
|
||||
"memory": "k8s_pod_memory_usage",
|
||||
"memory_req": "k8s_pod_memory_request_utilization",
|
||||
"memory_limit": "k8s_pod_memory_limit_utilization",
|
||||
"restarts": "k8s_container_restarts",
|
||||
"cpu": "k8s_pod_cpu_utilization",
|
||||
"cpu_request": "k8s_pod_cpu_request_utilization",
|
||||
"cpu_limit": "k8s_pod_cpu_limit_utilization",
|
||||
"memory": "k8s_pod_memory_usage",
|
||||
"memory_request": "k8s_pod_memory_request_utilization",
|
||||
"memory_limit": "k8s_pod_memory_limit_utilization",
|
||||
"restarts": "k8s_container_restarts",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -54,7 +54,7 @@ var WorkloadTableListQuery = v3.QueryRangeParamsV3{
|
||||
Expression: "B",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod cpu limit utilization
|
||||
@@ -74,7 +74,7 @@ var WorkloadTableListQuery = v3.QueryRangeParamsV3{
|
||||
Expression: "C",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod memory utilization
|
||||
@@ -114,7 +114,7 @@ var WorkloadTableListQuery = v3.QueryRangeParamsV3{
|
||||
Expression: "E",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
Disabled: false,
|
||||
},
|
||||
// pod memory limit utilization
|
||||
@@ -134,7 +134,7 @@ var WorkloadTableListQuery = v3.QueryRangeParamsV3{
|
||||
Expression: "F",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
Disabled: false,
|
||||
},
|
||||
"G": {
|
||||
|
||||
@@ -6,28 +6,32 @@ import (
|
||||
|
||||
func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_query AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
resource_string_service$$name,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count,
|
||||
avg(CASE WHEN has(numberTagMap, 'messaging.message.body.size') THEN numberTagMap['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count,
|
||||
avg(CASE WHEN has(attributes_number, 'messaging.message.body.size') THEN attributes_number['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
|
||||
GROUP BY serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
|
||||
GROUP BY resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS throughput,
|
||||
@@ -35,27 +39,31 @@ SELECT
|
||||
FROM
|
||||
consumer_query
|
||||
ORDER BY
|
||||
serviceName;
|
||||
`, start, end, queueType, topic, partition, consumerGroup, timeRange)
|
||||
resource_string_service$$name;
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, consumerGroup, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S1 landing
|
||||
func generatePartitionLatencySQL(start, end int64, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH partition_query AS (
|
||||
SELECT
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
count(*) AS total_requests,
|
||||
stringTagMap['messaging.destination.name'] AS topic,
|
||||
stringTagMap['messaging.destination.partition.id'] AS partition
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
attributes_string['messaging.destination.name'] AS topic,
|
||||
attributes_string['messaging.destination.partition.id'] AS partition
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 4
|
||||
AND msgSystem = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, partition
|
||||
)
|
||||
|
||||
@@ -68,35 +76,39 @@ FROM
|
||||
partition_query
|
||||
ORDER BY
|
||||
topic;
|
||||
`, start, end, queueType, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S1 consumer
|
||||
func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_pl AS (
|
||||
SELECT
|
||||
stringTagMap['messaging.kafka.consumer.group'] AS consumer_group,
|
||||
serviceName,
|
||||
attributes_string['messaging.kafka.consumer.group'] AS consumer_group,
|
||||
resource_string_service$$name,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY consumer_group, serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY consumer_group, resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
consumer_group,
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
@@ -104,61 +116,68 @@ FROM
|
||||
consumer_pl
|
||||
ORDER BY
|
||||
consumer_group;
|
||||
`, start, end, queueType, topic, partition, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S3, producer overview
|
||||
func generateProducerPartitionThroughputSQL(start, end int64, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
// t, svc, rps, byte*, p99, err
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000 // t, svc, rps, byte*, p99, err
|
||||
query := fmt.Sprintf(`
|
||||
WITH producer_latency AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
resource_string_service$$name,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
stringTagMap['messaging.destination.name'] AS topic,
|
||||
attributes_string['messaging.destination.name'] AS topic,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 4
|
||||
AND msgSystem = '%s'
|
||||
GROUP BY topic, serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
topic,
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
FROM
|
||||
producer_latency
|
||||
`, start, end, queueType, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S3, producer topic/service overview
|
||||
func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
stringTagMap['messaging.destination.partition.id'] AS partition,
|
||||
attributes_string['messaging.destination.partition.id'] AS partition,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 4
|
||||
AND serviceName = '%s'
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND resource_string_service$$name = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
GROUP BY partition
|
||||
)
|
||||
|
||||
@@ -169,34 +188,38 @@ SELECT
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
FROM
|
||||
consumer_latency
|
||||
`, start, end, service, queueType, topic, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S3 consumer overview
|
||||
func generateConsumerLatencySQL(start, end int64, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
stringTagMap['messaging.destination.name'] AS topic,
|
||||
resource_string_service$$name,
|
||||
attributes_string['messaging.destination.name'] AS topic,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count,
|
||||
SUM(numberTagMap['messaging.message.body.size']) AS total_bytes
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count,
|
||||
SUM(attributes_number['messaging.message.body.size']) AS total_bytes
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
GROUP BY topic, serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
topic,
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS ingestion_rate,
|
||||
@@ -205,28 +228,32 @@ FROM
|
||||
consumer_latency
|
||||
ORDER BY
|
||||
topic;
|
||||
`, start, end, queueType, timeRange, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S3 consumer topic/service
|
||||
func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
stringTagMap['messaging.destination.partition.id'] AS partition,
|
||||
attributes_string['messaging.destination.partition.id'] AS partition,
|
||||
COUNT(*) AS total_requests,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND serviceName = '%s'
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND resource_string_service$$name = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
GROUP BY partition
|
||||
)
|
||||
|
||||
@@ -237,7 +264,7 @@ SELECT
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
FROM
|
||||
consumer_latency
|
||||
`, start, end, service, queueType, topic, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
@@ -246,26 +273,26 @@ func generateProducerConsumerEvalSQL(start, end int64, queueType string, evalTim
|
||||
query := fmt.Sprintf(`
|
||||
WITH trace_data AS (
|
||||
SELECT
|
||||
p.serviceName AS producer_service,
|
||||
c.serviceName AS consumer_service,
|
||||
p.traceID,
|
||||
p.resource_string_service$$name AS producer_service,
|
||||
c.resource_string_service$$name AS consumer_service,
|
||||
p.trace_id,
|
||||
p.timestamp AS producer_timestamp,
|
||||
c.timestamp AS consumer_timestamp,
|
||||
p.durationNano AS durationNano,
|
||||
(toUnixTimestamp64Nano(c.timestamp) - toUnixTimestamp64Nano(p.timestamp)) + p.durationNano AS time_difference
|
||||
FROM
|
||||
signoz_traces.distributed_signoz_index_v2 p
|
||||
signoz_traces.distributed_signoz_index_v3 p
|
||||
INNER JOIN
|
||||
signoz_traces.distributed_signoz_index_v2 c
|
||||
ON p.traceID = c.traceID
|
||||
AND c.parentSpanID = p.spanID
|
||||
signoz_traces.distributed_signoz_index_v3 c
|
||||
ON p.trace_id = c.trace_id
|
||||
AND c.parent_span_id = p.span_id
|
||||
WHERE
|
||||
p.kind = 4
|
||||
AND c.kind = 5
|
||||
AND toUnixTimestamp64Nano(p.timestamp) BETWEEN '%d' AND '%d'
|
||||
AND toUnixTimestamp64Nano(c.timestamp) BETWEEN '%d' AND '%d'
|
||||
AND c.msgSystem = '%s'
|
||||
AND p.msgSystem = '%s'
|
||||
AND c.attribute_string_messaging$$system = '%s'
|
||||
AND p.attribute_string_messaging$$system = '%s'
|
||||
)
|
||||
|
||||
SELECT
|
||||
@@ -278,7 +305,7 @@ SELECT
|
||||
arrayMap(x -> x.1,
|
||||
arraySort(
|
||||
x -> -x.2,
|
||||
groupArrayIf((traceID, time_difference), time_difference > '%d')
|
||||
groupArrayIf((trace_id, time_difference), time_difference > '%d')
|
||||
)
|
||||
),
|
||||
1, 10
|
||||
@@ -293,91 +320,107 @@ GROUP BY
|
||||
|
||||
func generateProducerSQL(start, end int64, topic, partition, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH producer_query AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
resource_string_service$$name,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
count(*) AS total_count,
|
||||
sumIf(1, statusCode = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
sumIf(1, status_code = 2) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 4
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY serviceName
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY resource_string_service$$name
|
||||
)
|
||||
|
||||
SELECT
|
||||
serviceName AS service_name,
|
||||
resource_string_service$$name AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage,
|
||||
COALESCE(total_count / %d, 0) AS throughput
|
||||
FROM
|
||||
producer_query
|
||||
ORDER BY
|
||||
serviceName;
|
||||
`, start, end, queueType, topic, partition, timeRange)
|
||||
resource_string_service$$name;
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partitionID, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
stringTagMap['messaging.client_id'] AS client_id,
|
||||
stringTagMap['service.instance.id'] AS service_instance_id,
|
||||
serviceName AS service_name,
|
||||
attributes_string['messaging.client_id'] AS client_id,
|
||||
resources_string['service.instance.id'] AS service_instance_id,
|
||||
resource_string_service$$name AS service_name,
|
||||
count(*) / %d AS throughput
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY service_name, client_id, service_instance_id
|
||||
ORDER BY throughput DESC
|
||||
`, timeRange, start, end, queueType, consumerGroup, partitionID)
|
||||
`, timeRange, start, end, tsBucketStart, tsBucketEnd, queueType, consumerGroup, partitionID)
|
||||
return query
|
||||
}
|
||||
|
||||
func onboardProducersSQL(start, end int64, queueType string) string {
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
COUNT(*) = 0 AS entries,
|
||||
COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue,
|
||||
COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue,
|
||||
COUNT(IF(kind = 4, 1, NULL)) = 0 AS kind,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition
|
||||
COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||
COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition
|
||||
FROM
|
||||
signoz_traces.distributed_signoz_index_v2
|
||||
signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d';`, queueType, start, end)
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d';`, queueType, start, end, tsBucketStart, tsBucketEnd)
|
||||
return query
|
||||
}
|
||||
|
||||
func onboardConsumerSQL(start, end int64, queueType string) string {
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
COUNT(*) = 0 AS entries,
|
||||
COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue,
|
||||
COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue,
|
||||
COUNT(IF(kind = 5, 1, NULL)) = 0 AS kind,
|
||||
COUNT(serviceName) = 0 AS svc,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
|
||||
COUNT(IF(has(numberTagMap, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
|
||||
COUNT(IF(has(stringTagMap, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
|
||||
COUNT(IF(has(stringTagMap, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
COUNT(resource_string_service$$name) = 0 AS svc,
|
||||
COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||
COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition,
|
||||
COUNT(IF(has(attributes_string, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
|
||||
COUNT(IF(has(attributes_number, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
|
||||
COUNT(IF(has(attributes_string, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
|
||||
COUNT(IF(has(resources_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d';`, queueType, start, end)
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d' ;`, queueType, start, end, tsBucketStart, tsBucketEnd)
|
||||
return query
|
||||
}
|
||||
|
||||
@@ -94,6 +94,45 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
|
||||
return ic.GetPipelinesByVersion(ctx, cfg.Version)
|
||||
}
|
||||
|
||||
func (ic *LogParsingPipelineController) ValidatePipelines(
|
||||
ctx context.Context,
|
||||
postedPipelines []PostablePipeline,
|
||||
) *model.ApiError {
|
||||
for _, p := range postedPipelines {
|
||||
if err := p.IsValid(); err != nil {
|
||||
return model.BadRequestStr(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Also run a collector simulation to ensure config is fit
|
||||
// for e2e use with a collector
|
||||
pipelines := []Pipeline{}
|
||||
for _, pp := range postedPipelines {
|
||||
pipelines = append(pipelines, Pipeline{
|
||||
Id: uuid.New().String(),
|
||||
OrderId: pp.OrderId,
|
||||
Enabled: pp.Enabled,
|
||||
Name: pp.Name,
|
||||
Alias: pp.Alias,
|
||||
Description: &pp.Description,
|
||||
Filter: pp.Filter,
|
||||
Config: pp.Config,
|
||||
})
|
||||
}
|
||||
|
||||
sampleLogs := []model.SignozLog{{Body: ""}}
|
||||
_, _, simulationErr := SimulatePipelinesProcessing(
|
||||
ctx, pipelines, sampleLogs,
|
||||
)
|
||||
if simulationErr != nil {
|
||||
return model.BadRequest(fmt.Errorf(
|
||||
"invalid pipelines config: %w", simulationErr.ToError(),
|
||||
))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns effective list of pipelines including user created
|
||||
// pipelines and pipelines for installed integrations
|
||||
func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||
|
||||
@@ -228,8 +228,8 @@ func parseColumn(s string) (*string, error) {
|
||||
return &colName, nil
|
||||
}
|
||||
|
||||
func arrayToMap(fields []model.LogField) map[string]model.LogField {
|
||||
res := map[string]model.LogField{}
|
||||
func arrayToMap(fields []model.Field) map[string]model.Field {
|
||||
res := map[string]model.Field{}
|
||||
for _, field := range fields {
|
||||
res[field.Name] = field
|
||||
}
|
||||
@@ -251,7 +251,7 @@ func replaceInterestingFields(allFields *model.GetFieldsResponse, queryTokens []
|
||||
return queryTokens, nil
|
||||
}
|
||||
|
||||
func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]model.LogField, interestingFieldLookup map[string]model.LogField) (string, error) {
|
||||
func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]model.Field, interestingFieldLookup map[string]model.Field) (string, error) {
|
||||
op := strings.TrimSpace(operatorRegex.FindString(queryToken))
|
||||
opLower := strings.ToLower(op)
|
||||
|
||||
@@ -283,7 +283,7 @@ func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]mode
|
||||
}
|
||||
} else {
|
||||
// creating the query token here as we have the metadata
|
||||
field := model.LogField{}
|
||||
field := model.Field{}
|
||||
|
||||
if sfield, ok := selectedFieldsLookup[sqlColName]; ok {
|
||||
field = sfield
|
||||
|
||||
@@ -238,14 +238,14 @@ func TestParseColumn(t *testing.T) {
|
||||
func TestReplaceInterestingFields(t *testing.T) {
|
||||
queryTokens := []string{"id.userid IN (100) ", "and id_key >= 50 ", `AND body ILIKE '%searchstring%'`}
|
||||
allFields := model.GetFieldsResponse{
|
||||
Selected: []model.LogField{
|
||||
Selected: []model.Field{
|
||||
{
|
||||
Name: "id_key",
|
||||
DataType: "int64",
|
||||
Type: "attributes",
|
||||
},
|
||||
},
|
||||
Interesting: []model.LogField{
|
||||
Interesting: []model.Field{
|
||||
{
|
||||
Name: "id.userid",
|
||||
DataType: "int64",
|
||||
@@ -326,7 +326,7 @@ func TestCheckIfPrevousPaginateAndModifyOrder(t *testing.T) {
|
||||
}
|
||||
|
||||
var generateSQLQueryFields = model.GetFieldsResponse{
|
||||
Selected: []model.LogField{
|
||||
Selected: []model.Field{
|
||||
{
|
||||
Name: "field1",
|
||||
DataType: "int64",
|
||||
@@ -348,7 +348,7 @@ var generateSQLQueryFields = model.GetFieldsResponse{
|
||||
Type: "static",
|
||||
},
|
||||
},
|
||||
Interesting: []model.LogField{
|
||||
Interesting: []model.Field{
|
||||
{
|
||||
Name: "FielD1",
|
||||
DataType: "int64",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
func ValidateUpdateFieldPayload(field *model.UpdateField) error {
|
||||
@@ -38,3 +39,36 @@ func ValidateUpdateFieldPayload(field *model.UpdateField) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateUpdateFieldPayloadV2(field *model.UpdateField) error {
|
||||
if field.Name == "" {
|
||||
return fmt.Errorf("name cannot be empty")
|
||||
}
|
||||
if field.Type == "" {
|
||||
return fmt.Errorf("type cannot be empty")
|
||||
}
|
||||
if field.DataType == "" {
|
||||
return fmt.Errorf("dataType cannot be empty")
|
||||
}
|
||||
|
||||
// the logs api uses the old names i.e attributes and resources while traces use tag and attribute.
|
||||
// update log api to use tag and attribute.
|
||||
matched, err := regexp.MatchString(fmt.Sprintf("^(%s|%s)$", v3.AttributeKeyTypeTag, v3.AttributeKeyTypeResource), field.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !matched {
|
||||
return fmt.Errorf("type %s not supported", field.Type)
|
||||
}
|
||||
|
||||
if field.IndexType != "" {
|
||||
matched, err := regexp.MatchString(`^(minmax|set\([0-9]\)|bloom_filter\((0?.?[0-9]+|1)\)|tokenbf_v1\([0-9]+,[0-9]+,[0-9]+\)|ngrambf_v1\([0-9]+,[0-9]+,[0-9]+,[0-9]+\))$`, field.IndexType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !matched {
|
||||
return fmt.Errorf("index type %s not supported", field.IndexType)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,9 +5,73 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func AddMetricValueFilter(mq *v3.BuilderQuery) *v3.MetricValueFilter {
|
||||
|
||||
var metricValueFilter *v3.MetricValueFilter = nil
|
||||
|
||||
if mq != nil && mq.Filters != nil && mq.Filters.Items != nil {
|
||||
for _, item := range mq.Filters.Items {
|
||||
if item.Key.Key == "__value" {
|
||||
switch v := item.Value.(type) {
|
||||
case float64:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: v,
|
||||
}
|
||||
case float32:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case int:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case int8:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case int16:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case int32:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case int64:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case uint:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case uint8:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case uint16:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case uint32:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
case uint64:
|
||||
metricValueFilter = &v3.MetricValueFilter{
|
||||
Value: float64(v),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return metricValueFilter
|
||||
}
|
||||
|
||||
// FormattedValue formats the value to be used in clickhouse query
|
||||
func FormattedValue(v interface{}) string {
|
||||
switch x := v.(type) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
@@ -335,6 +336,10 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
|
||||
|
||||
start, end = common.AdjustedMetricTimeRange(start, end, mq.StepInterval, *mq)
|
||||
|
||||
if valFilter := metrics.AddMetricValueFilter(mq); valFilter != nil {
|
||||
mq.MetricValueFilter = valFilter
|
||||
}
|
||||
|
||||
// if the aggregate operator is a histogram quantile, and user has not forgotten
|
||||
// the le tag in the group by then add the le tag to the group by
|
||||
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
|
||||
|
||||
@@ -20,11 +20,16 @@ func PrepareMetricQueryCumulativeTable(start, end, step int64, mq *v3.BuilderQue
|
||||
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, mq.GroupBy)
|
||||
selectLabels := helpers.GroupByAttributeKeyTags(mq.GroupBy...)
|
||||
|
||||
valueFilter := " WHERE isNaN(per_series_value) = 0"
|
||||
if mq.MetricValueFilter != nil {
|
||||
valueFilter += fmt.Sprintf(" AND per_series_value = %f", mq.MetricValueFilter.Value)
|
||||
}
|
||||
|
||||
queryTmpl :=
|
||||
"SELECT %s," +
|
||||
" %s as value" +
|
||||
" FROM (%s)" +
|
||||
" WHERE isNaN(per_series_value) = 0" +
|
||||
valueFilter +
|
||||
" GROUP BY %s" +
|
||||
" ORDER BY %s"
|
||||
|
||||
|
||||
@@ -190,11 +190,16 @@ func PrepareMetricQueryCumulativeTimeSeries(start, end, step int64, mq *v3.Build
|
||||
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, mq.GroupBy)
|
||||
selectLabels := helpers.GroupByAttributeKeyTags(mq.GroupBy...)
|
||||
|
||||
valueFilter := " WHERE isNaN(per_series_value) = 0"
|
||||
if mq.MetricValueFilter != nil {
|
||||
valueFilter += fmt.Sprintf(" AND per_series_value = %f", mq.MetricValueFilter.Value)
|
||||
}
|
||||
|
||||
queryTmpl :=
|
||||
"SELECT %s," +
|
||||
" %s as value" +
|
||||
" FROM (%s)" +
|
||||
" WHERE isNaN(per_series_value) = 0" +
|
||||
valueFilter +
|
||||
" GROUP BY %s" +
|
||||
" ORDER BY %s"
|
||||
|
||||
|
||||
@@ -25,11 +25,16 @@ func PrepareMetricQueryDeltaTable(start, end, step int64, mq *v3.BuilderQuery) (
|
||||
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, mq.GroupBy)
|
||||
selectLabels := helpers.GroupByAttributeKeyTags(mq.GroupBy...)
|
||||
|
||||
valueFilter := " WHERE isNaN(per_series_value) = 0"
|
||||
if mq.MetricValueFilter != nil {
|
||||
valueFilter += fmt.Sprintf(" AND per_series_value = %f", mq.MetricValueFilter.Value)
|
||||
}
|
||||
|
||||
queryTmpl :=
|
||||
"SELECT %s," +
|
||||
" %s as value" +
|
||||
" FROM (%s)" +
|
||||
" WHERE isNaN(per_series_value) = 0" +
|
||||
valueFilter +
|
||||
" GROUP BY %s" +
|
||||
" ORDER BY %s"
|
||||
|
||||
|
||||
@@ -142,11 +142,16 @@ func PrepareMetricQueryDeltaTimeSeries(start, end, step int64, mq *v3.BuilderQue
|
||||
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, mq.GroupBy)
|
||||
selectLabels := helpers.GroupByAttributeKeyTags(mq.GroupBy...)
|
||||
|
||||
valueFilter := " WHERE isNaN(per_series_value) = 0"
|
||||
if mq.MetricValueFilter != nil {
|
||||
valueFilter += fmt.Sprintf(" AND per_series_value = %f", mq.MetricValueFilter.Value)
|
||||
}
|
||||
|
||||
queryTmpl :=
|
||||
"SELECT %s," +
|
||||
" %s as value" +
|
||||
" FROM (%s)" +
|
||||
" WHERE isNaN(per_series_value) = 0" +
|
||||
valueFilter +
|
||||
" GROUP BY %s" +
|
||||
" ORDER BY %s"
|
||||
|
||||
|
||||
@@ -270,6 +270,10 @@ func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string
|
||||
|
||||
if fs != nil && len(fs.Items) != 0 {
|
||||
for _, item := range fs.Items {
|
||||
if item.Key.Key == "__value" {
|
||||
continue
|
||||
}
|
||||
|
||||
toFormat := item.Value
|
||||
op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
|
||||
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics"
|
||||
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/cumulative"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/delta"
|
||||
@@ -19,6 +20,9 @@ import (
|
||||
// step is in seconds
|
||||
func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options metricsV3.Options) (string, error) {
|
||||
|
||||
if valFilter := metrics.AddMetricValueFilter(mq); valFilter != nil {
|
||||
mq.MetricValueFilter = valFilter
|
||||
}
|
||||
start, end = common.AdjustedMetricTimeRange(start, end, mq.StepInterval, *mq)
|
||||
|
||||
var quantile float64
|
||||
|
||||
74
pkg/query-service/app/metricsexplorer/parser.go
Normal file
74
pkg/query-service/app/metricsexplorer/parser.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package metricsexplorer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/model/metrics_explorer"
|
||||
)
|
||||
|
||||
func ParseFilterKeySuggestions(r *http.Request) (*metrics_explorer.FilterKeyRequest, *model.ApiError) {
|
||||
|
||||
searchText := r.URL.Query().Get("searchText")
|
||||
limit, err := strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
if err != nil {
|
||||
limit = 50
|
||||
}
|
||||
|
||||
return &metrics_explorer.FilterKeyRequest{Limit: limit, SearchText: searchText}, nil
|
||||
}
|
||||
|
||||
func ParseFilterValueSuggestions(r *http.Request) (*metrics_explorer.FilterValueRequest, *model.ApiError) {
|
||||
var filterValueRequest metrics_explorer.FilterValueRequest
|
||||
|
||||
// parse the request body
|
||||
if err := json.NewDecoder(r.Body).Decode(&filterValueRequest); err != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: %v", err)}
|
||||
}
|
||||
|
||||
return &filterValueRequest, nil
|
||||
}
|
||||
|
||||
func ParseSummaryListMetricsParams(r *http.Request) (*metrics_explorer.SummaryListMetricsRequest, *model.ApiError) {
|
||||
var listMetricsParams *metrics_explorer.SummaryListMetricsRequest
|
||||
|
||||
// parse the request body
|
||||
if err := json.NewDecoder(r.Body).Decode(&listMetricsParams); err != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: %v", err)}
|
||||
}
|
||||
|
||||
if len(listMetricsParams.OrderBy) > 1 {
|
||||
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: more than 1 order")}
|
||||
} else if len(listMetricsParams.OrderBy) == 0 {
|
||||
var defaultOrderBy v3.OrderBy
|
||||
defaultOrderBy.ColumnName = "timeSeries" // DEFAULT ORDER BY
|
||||
defaultOrderBy.Order = v3.DirectionDesc
|
||||
listMetricsParams.OrderBy = append(listMetricsParams.OrderBy, defaultOrderBy)
|
||||
}
|
||||
|
||||
if listMetricsParams.Limit == 0 {
|
||||
listMetricsParams.Limit = 10 // DEFAULT LIMIT
|
||||
}
|
||||
|
||||
return listMetricsParams, nil
|
||||
}
|
||||
|
||||
func ParseTreeMapMetricsParams(r *http.Request) (*metrics_explorer.TreeMapMetricsRequest, *model.ApiError) {
|
||||
var treeMapMetricParams *metrics_explorer.TreeMapMetricsRequest
|
||||
|
||||
// parse the request body
|
||||
if err := json.NewDecoder(r.Body).Decode(&treeMapMetricParams); err != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: %v", err)}
|
||||
}
|
||||
|
||||
if treeMapMetricParams.Limit == 0 {
|
||||
treeMapMetricParams.Limit = 10
|
||||
}
|
||||
|
||||
return treeMapMetricParams, nil
|
||||
}
|
||||
202
pkg/query-service/app/metricsexplorer/summary.go
Normal file
202
pkg/query-service/app/metricsexplorer/summary.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package metricsexplorer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/model/metrics_explorer"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type SummaryService struct {
|
||||
reader interfaces.Reader
|
||||
querierV2 interfaces.Querier
|
||||
}
|
||||
|
||||
func NewSummaryService(reader interfaces.Reader, querierV2 interfaces.Querier) *SummaryService {
|
||||
return &SummaryService{reader: reader, querierV2: querierV2}
|
||||
}
|
||||
|
||||
func (receiver *SummaryService) FilterKeys(ctx context.Context, params *metrics_explorer.FilterKeyRequest) (*metrics_explorer.FilterKeyResponse, *model.ApiError) {
|
||||
var response metrics_explorer.FilterKeyResponse
|
||||
keys, apiError := receiver.reader.GetAllMetricFilterAttributeKeys(
|
||||
ctx,
|
||||
params,
|
||||
true,
|
||||
)
|
||||
if apiError != nil {
|
||||
return nil, apiError
|
||||
}
|
||||
response.AttributeKeys = *keys
|
||||
var availableColumnFilter []string
|
||||
for key := range metrics_explorer.AvailableColumnFilterMap {
|
||||
availableColumnFilter = append(availableColumnFilter, key)
|
||||
}
|
||||
response.MetricColumns = availableColumnFilter
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
func (receiver *SummaryService) FilterValues(ctx context.Context, params *metrics_explorer.FilterValueRequest) (*metrics_explorer.FilterValueResponse, *model.ApiError) {
|
||||
var response metrics_explorer.FilterValueResponse
|
||||
switch params.FilterKey {
|
||||
case "metric_name":
|
||||
var filterValues []string
|
||||
request := v3.AggregateAttributeRequest{DataSource: v3.DataSourceMetrics, SearchText: params.SearchText, Limit: params.Limit}
|
||||
attributes, err := receiver.reader.GetMetricAggregateAttributes(ctx, &request, true)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
for _, item := range attributes.AttributeKeys {
|
||||
filterValues = append(filterValues, item.Key)
|
||||
}
|
||||
response.FilterValues = filterValues
|
||||
return &response, nil
|
||||
case "unit":
|
||||
attributes, err := receiver.reader.GetAllMetricFilterUnits(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.FilterValues = attributes
|
||||
return &response, nil
|
||||
default:
|
||||
attributes, err := receiver.reader.GetAllMetricFilterAttributeValues(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.FilterValues = attributes
|
||||
return &response, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, metricName string) (metrics_explorer.MetricDetailsDTO, *model.ApiError) {
|
||||
var metricDetailsDTO metrics_explorer.MetricDetailsDTO
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// Call 1: GetMetricMetadata
|
||||
g.Go(func() error {
|
||||
metadata, err := receiver.reader.GetMetricMetadata(ctx, metricName, metricName)
|
||||
if err != nil {
|
||||
return &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
metricDetailsDTO.Name = metricName
|
||||
metricDetailsDTO.Unit = metadata.Unit
|
||||
metricDetailsDTO.Description = metadata.Description
|
||||
metricDetailsDTO.Type = metadata.Type
|
||||
metricDetailsDTO.Metadata.MetricType = metadata.Type
|
||||
metricDetailsDTO.Metadata.Description = metadata.Description
|
||||
metricDetailsDTO.Metadata.Unit = metadata.Unit
|
||||
return nil
|
||||
})
|
||||
|
||||
// Call 2: GetMetricsDataPointsAndLastReceived
|
||||
g.Go(func() error {
|
||||
dataPoints, lastReceived, err := receiver.reader.GetMetricsDataPointsAndLastReceived(ctx, metricName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricDetailsDTO.DataPoints = dataPoints
|
||||
metricDetailsDTO.LastReceived = lastReceived
|
||||
return nil
|
||||
})
|
||||
|
||||
// Call 3: GetTotalTimeSeriesForMetricName
|
||||
g.Go(func() error {
|
||||
totalSeries, err := receiver.reader.GetTotalTimeSeriesForMetricName(ctx, metricName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricDetailsDTO.TimeSeriesTotal = totalSeries
|
||||
return nil
|
||||
})
|
||||
|
||||
// Call 4: GetActiveTimeSeriesForMetricName
|
||||
g.Go(func() error {
|
||||
activeSeries, err := receiver.reader.GetActiveTimeSeriesForMetricName(ctx, metricName, 120*time.Minute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricDetailsDTO.TimeSeriesActive = activeSeries
|
||||
return nil
|
||||
})
|
||||
|
||||
// Call 5: GetAttributesForMetricName
|
||||
g.Go(func() error {
|
||||
attributes, err := receiver.reader.GetAttributesForMetricName(ctx, metricName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if attributes != nil {
|
||||
metricDetailsDTO.Attributes = *attributes
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Call 6: GetDashboardsWithMetricName
|
||||
g.Go(func() error {
|
||||
data, err := dashboards.GetDashboardsWithMetricName(ctx, metricName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
zap.L().Error("Error marshalling data:", zap.Error(err))
|
||||
return &model.ApiError{Typ: "MarshallingErr", Err: err}
|
||||
}
|
||||
|
||||
var dashboards []metrics_explorer.Dashboard
|
||||
err = json.Unmarshal(jsonData, &dashboards)
|
||||
if err != nil {
|
||||
zap.L().Error("Error unmarshalling data:", zap.Error(err))
|
||||
return &model.ApiError{Typ: "UnMarshallingErr", Err: err}
|
||||
}
|
||||
metricDetailsDTO.Dashboards = dashboards
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Wait for all goroutines and handle any errors
|
||||
if err := g.Wait(); err != nil {
|
||||
// Type assert to check if it's already an ApiError
|
||||
if apiErr, ok := err.(*model.ApiError); ok {
|
||||
return metrics_explorer.MetricDetailsDTO{}, apiErr
|
||||
}
|
||||
// If it's not an ApiError, wrap it in one
|
||||
return metrics_explorer.MetricDetailsDTO{}, &model.ApiError{Typ: "InternalError", Err: err}
|
||||
}
|
||||
|
||||
return metricDetailsDTO, nil
|
||||
}
|
||||
|
||||
func (receiver *SummaryService) ListMetricsWithSummary(ctx context.Context, params *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) {
|
||||
return receiver.reader.ListSummaryMetrics(ctx, params)
|
||||
}
|
||||
|
||||
func (receiver *SummaryService) GetMetricsTreemap(ctx context.Context, params *metrics_explorer.TreeMapMetricsRequest) (*metrics_explorer.TreeMap, *model.ApiError) {
|
||||
var response metrics_explorer.TreeMap
|
||||
switch params.Treemap {
|
||||
case metrics_explorer.CardinalityTreeMap:
|
||||
cardinality, apiError := receiver.reader.GetMetricsTimeSeriesPercentage(ctx, params)
|
||||
if apiError != nil {
|
||||
return nil, apiError
|
||||
}
|
||||
response.Cardinality = *cardinality
|
||||
return &response, nil
|
||||
case metrics_explorer.DataPointsTreeMap:
|
||||
dataPoints, apiError := receiver.reader.GetMetricsSamplesPercentage(ctx, params)
|
||||
if apiError != nil {
|
||||
return nil, apiError
|
||||
}
|
||||
response.DataPoints = *dataPoints
|
||||
return &response, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
@@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery(
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
return
|
||||
}
|
||||
query = fmt.Sprintf(placeholderQuery, limitQuery)
|
||||
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
|
||||
} else {
|
||||
query, err = tracesQueryBuilder(
|
||||
start,
|
||||
|
||||
@@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery(
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
return
|
||||
}
|
||||
query = fmt.Sprintf(placeholderQuery, limitQuery)
|
||||
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
|
||||
} else {
|
||||
query, err = tracesQueryBuilder(
|
||||
start,
|
||||
|
||||
104
pkg/query-service/app/summary.go
Normal file
104
pkg/query-service/app/summary.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
|
||||
explorer "go.signoz.io/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (aH *APIHandler) FilterKeysSuggestion(w http.ResponseWriter, r *http.Request) {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
ctx := r.Context()
|
||||
params, apiError := explorer.ParseFilterKeySuggestions(r)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error parsing summary filter keys request", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
keys, apiError := aH.SummaryService.FilterKeys(ctx, params)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error getting filter keys", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, keys)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) FilterValuesSuggestion(w http.ResponseWriter, r *http.Request) {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
ctx := r.Context()
|
||||
params, apiError := explorer.ParseFilterValueSuggestions(r)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error parsing summary filter values request", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
values, apiError := aH.SummaryService.FilterValues(ctx, params)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error getting filter values", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, values)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) GetMetricsDetails(w http.ResponseWriter, r *http.Request) {
|
||||
metricName := mux.Vars(r)["metric_name"]
|
||||
ctx := r.Context()
|
||||
metricsDetail, apiError := aH.SummaryService.GetMetricsSummary(ctx, metricName)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error parsing metric query range params", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, metricsDetail)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) ListMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
ctx := r.Context()
|
||||
params, apiError := explorer.ParseSummaryListMetricsParams(r)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error parsing metric list metric summary api request", zap.Error(apiError.Err))
|
||||
RespondError(w, model.BadRequest(apiError), nil)
|
||||
return
|
||||
}
|
||||
|
||||
slmr, apiErr := aH.SummaryService.ListMetricsWithSummary(ctx, params)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("error parsing metric query range params", zap.Error(apiErr.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, slmr)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) GetTreeMap(w http.ResponseWriter, r *http.Request) {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
ctx := r.Context()
|
||||
params, apiError := explorer.ParseTreeMapMetricsParams(r)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error parsing metric query range params", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
result, apiError := aH.SummaryService.GetMetricsTreemap(ctx, params)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error getting heatmap data", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, result)
|
||||
|
||||
}
|
||||
@@ -34,8 +34,8 @@ func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey)
|
||||
return v
|
||||
}
|
||||
|
||||
for _, key := range utils.GenerateEnrichmentKeys(key) {
|
||||
if val, ok := keys[key]; ok {
|
||||
for _, tkey := range utils.GenerateEnrichmentKeys(key) {
|
||||
if val, ok := keys[tkey]; ok {
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +74,19 @@ func getSelectLabels(groupBy []v3.AttributeKey) string {
|
||||
return strings.Join(labels, ",")
|
||||
}
|
||||
|
||||
// TODO(nitya): use the _exists columns as well in the future similar to logs
|
||||
func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) {
|
||||
if key.DataType == v3.AttributeKeyDataTypeString {
|
||||
if op == v3.FilterOperatorExists {
|
||||
return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorNotEqual]), nil
|
||||
} else {
|
||||
return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorEqual]), nil
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("unsupported operation, exists and not exists can only be applied on custom attributes or string type columns")
|
||||
}
|
||||
}
|
||||
|
||||
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
var conditions []string
|
||||
|
||||
@@ -110,7 +123,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
|
||||
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
|
||||
if item.Key.IsColumn {
|
||||
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(item.Key, item.Operator)
|
||||
subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -312,7 +325,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
}
|
||||
|
||||
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
|
||||
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
|
||||
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)"
|
||||
}
|
||||
|
||||
switch mq.AggregateOperator {
|
||||
@@ -350,7 +363,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
case v3.AggregateOperatorCount:
|
||||
if mq.AggregateAttribute.Key != "" {
|
||||
if mq.AggregateAttribute.IsColumn {
|
||||
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
|
||||
subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
|
||||
if err == nil {
|
||||
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
|
||||
}
|
||||
|
||||
@@ -265,9 +265,11 @@ func Test_buildTracesFilterQuery(t *testing.T) {
|
||||
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
|
||||
{Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
|
||||
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
||||
{Key: v3.AttributeKey{Key: "http_url", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
||||
{Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
||||
}},
|
||||
},
|
||||
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND path = ''",
|
||||
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND `attribute_string_path` = '' AND http_url = '' AND `attribute_string_http$$route` = ''",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -683,7 +685,7 @@ func TestPrepareTracesQuery(t *testing.T) {
|
||||
},
|
||||
},
|
||||
want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " +
|
||||
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (%s) group by `function` order by value DESC",
|
||||
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function` order by value DESC",
|
||||
},
|
||||
{
|
||||
name: "test with limit with resources- first",
|
||||
@@ -766,7 +768,7 @@ func TestPrepareTracesQuery(t *testing.T) {
|
||||
want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
|
||||
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " +
|
||||
"AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " +
|
||||
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (%s) group by `function`,`serviceName` order by value DESC",
|
||||
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function`,`serviceName` order by value DESC",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -248,6 +248,9 @@ const (
|
||||
SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day"
|
||||
SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME = "time_series_v4_1week"
|
||||
SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME = "distributed_time_series_v4_1day"
|
||||
SIGNOZ_TIMESERIES_v4_TABLENAME = "distributed_time_series_v4"
|
||||
SIGNOZ_TIMESERIES_v4_1WEEK_TABLENAME = "distributed_time_series_v4_1week"
|
||||
SIGNOZ_TIMESERIES_v4_6HRS_TABLENAME = "distributed_time_series_v4_6hrs"
|
||||
)
|
||||
|
||||
var TimeoutExcludedRoutes = map[string]bool{
|
||||
@@ -290,7 +293,7 @@ const (
|
||||
UINT8 = "Uint8"
|
||||
)
|
||||
|
||||
var StaticSelectedLogFields = []model.LogField{
|
||||
var StaticSelectedLogFields = []model.Field{
|
||||
{
|
||||
Name: "timestamp",
|
||||
DataType: UINT32,
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/model/metrics_explorer"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
@@ -109,6 +111,24 @@ type Reader interface {
|
||||
SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError)
|
||||
|
||||
GetCountOfThings(ctx context.Context, query string) (uint64, error)
|
||||
|
||||
//trace
|
||||
GetTraceFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError)
|
||||
UpdateTraceField(ctx context.Context, field *model.UpdateField) *model.ApiError
|
||||
|
||||
GetAllMetricFilterAttributeValues(ctx context.Context, req *metrics_explorer.FilterValueRequest) ([]string, *model.ApiError)
|
||||
GetAllMetricFilterUnits(ctx context.Context, req *metrics_explorer.FilterValueRequest) ([]string, *model.ApiError)
|
||||
GetAllMetricFilterAttributeKeys(ctx context.Context, req *metrics_explorer.FilterKeyRequest, skipDotNames bool) (*[]v3.AttributeKey, *model.ApiError)
|
||||
|
||||
GetMetricsDataPointsAndLastReceived(ctx context.Context, metricName string) (uint64, uint64, *model.ApiError)
|
||||
GetTotalTimeSeriesForMetricName(ctx context.Context, metricName string) (uint64, *model.ApiError)
|
||||
GetActiveTimeSeriesForMetricName(ctx context.Context, metricName string, duration time.Duration) (uint64, *model.ApiError)
|
||||
GetAttributesForMetricName(ctx context.Context, metricName string) (*[]metrics_explorer.Attribute, *model.ApiError)
|
||||
|
||||
ListSummaryMetrics(ctx context.Context, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError)
|
||||
|
||||
GetMetricsTimeSeriesPercentage(ctx context.Context, request *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError)
|
||||
GetMetricsSamplesPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError)
|
||||
}
|
||||
|
||||
type Querier interface {
|
||||
|
||||
@@ -151,13 +151,20 @@ type NodeListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
type NodeCountByCondition struct {
|
||||
Ready int `json:"ready"`
|
||||
NotReady int `json:"notReady"`
|
||||
Unknown int `json:"unknown"`
|
||||
}
|
||||
|
||||
type NodeListRecord struct {
|
||||
NodeUID string `json:"nodeUID,omitempty"`
|
||||
NodeCPUUsage float64 `json:"nodeCPUUsage"`
|
||||
NodeCPUAllocatable float64 `json:"nodeCPUAllocatable"`
|
||||
NodeMemoryUsage float64 `json:"nodeMemoryUsage"`
|
||||
NodeMemoryAllocatable float64 `json:"nodeMemoryAllocatable"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
NodeUID string `json:"nodeUID,omitempty"`
|
||||
NodeCPUUsage float64 `json:"nodeCPUUsage"`
|
||||
NodeCPUAllocatable float64 `json:"nodeCPUAllocatable"`
|
||||
NodeMemoryUsage float64 `json:"nodeMemoryUsage"`
|
||||
NodeMemoryAllocatable float64 `json:"nodeMemoryAllocatable"`
|
||||
CountByCondition NodeCountByCondition `json:"countByCondition"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
}
|
||||
|
||||
type NamespaceListRequest struct {
|
||||
@@ -180,6 +187,7 @@ type NamespaceListRecord struct {
|
||||
NamespaceName string `json:"namespaceName"`
|
||||
CPUUsage float64 `json:"cpuUsage"`
|
||||
MemoryUsage float64 `json:"memoryUsage"`
|
||||
CountByPhase PodCountByPhase `json:"countByPhase"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
}
|
||||
|
||||
@@ -329,3 +337,30 @@ type JobListRecord struct {
|
||||
SuccessfulPods int `json:"successfulPods"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
}
|
||||
|
||||
type VolumeListRequest struct {
|
||||
Start int64 `json:"start"` // epoch time in ms
|
||||
End int64 `json:"end"` // epoch time in ms
|
||||
Filters *v3.FilterSet `json:"filters"`
|
||||
GroupBy []v3.AttributeKey `json:"groupBy"`
|
||||
OrderBy *v3.OrderBy `json:"orderBy"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
type VolumeListResponse struct {
|
||||
Type ResponseType `json:"type"`
|
||||
Records []VolumeListRecord `json:"records"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
type VolumeListRecord struct {
|
||||
PersistentVolumeClaimName string `json:"persistentVolumeClaimName"`
|
||||
VolumeAvailable float64 `json:"volumeAvailable"`
|
||||
VolumeCapacity float64 `json:"volumeCapacity"`
|
||||
VolumeInodes float64 `json:"volumeInodes"`
|
||||
VolumeInodesFree float64 `json:"volumeInodesFree"`
|
||||
VolumeInodesUsed float64 `json:"volumeInodesUsed"`
|
||||
VolumeUsage float64 `json:"volumeUsage"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
}
|
||||
|
||||
123
pkg/query-service/model/metrics_explorer/summary.go
Normal file
123
pkg/query-service/model/metrics_explorer/summary.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package metrics_explorer
|
||||
|
||||
import (
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
type SummaryListMetricsRequest struct {
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
OrderBy []v3.OrderBy `json:"orderBy"`
|
||||
StartDate int64 `json:"startDate"`
|
||||
EndDate int64 `json:"endDate"`
|
||||
Filters v3.FilterSet `json:"filters"`
|
||||
}
|
||||
|
||||
type TreeMapType string
|
||||
|
||||
const (
|
||||
CardinalityTreeMap TreeMapType = "cardinality"
|
||||
DataPointsTreeMap TreeMapType = "datapoints"
|
||||
)
|
||||
|
||||
type TreeMapMetricsRequest struct {
|
||||
Limit int `json:"limit"`
|
||||
Treemap TreeMapType `json:"treemap"`
|
||||
StartDate int64 `json:"startDate"`
|
||||
EndDate int64 `json:"endDate"`
|
||||
Filters v3.FilterSet `json:"filters"`
|
||||
}
|
||||
|
||||
type MetricDetail struct {
|
||||
MetricName string `json:"metric_name"`
|
||||
Description string `json:"description"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
TimeSeries uint64 `json:"cardinality"`
|
||||
DataPoints uint64 `json:"dataPoints"`
|
||||
LastReceived int64 `json:"lastReceived"`
|
||||
}
|
||||
|
||||
type TreeMapResponseItem struct {
|
||||
Percentage float64 `json:"percentage"`
|
||||
TotalValue uint64 `json:"total_value"`
|
||||
MetricName string `json:"metric_name"`
|
||||
}
|
||||
|
||||
type TreeMap struct {
|
||||
Cardinality []TreeMapResponseItem `json:"cardinality"`
|
||||
DataPoints []TreeMapResponseItem `json:"dataPoints"`
|
||||
}
|
||||
|
||||
type SummaryListMetricsResponse struct {
|
||||
Metrics []MetricDetail `json:"metrics"`
|
||||
Total uint64 `json:"total"`
|
||||
}
|
||||
|
||||
type Attribute struct {
|
||||
Key string `json:"key" db:"key"`
|
||||
Value []string `json:"value" db:"value"`
|
||||
ValueCount uint64 `json:"valueCount" db:"valueCount"`
|
||||
}
|
||||
|
||||
// Metadata holds additional information about the metric.
|
||||
type Metadata struct {
|
||||
MetricType string `json:"metric_type"`
|
||||
Description string `json:"description"`
|
||||
Unit string `json:"unit"`
|
||||
}
|
||||
|
||||
// Alert represents individual alerts associated with the metric.
|
||||
type Alert struct {
|
||||
AlertName string `json:"alert_name"`
|
||||
AlertID string `json:"alert_id"`
|
||||
}
|
||||
|
||||
// Dashboard represents individual dashboards associated with the metric.
|
||||
type Dashboard struct {
|
||||
DashboardName string `json:"dashboard_name"`
|
||||
DashboardID string `json:"dashboard_id"`
|
||||
WidgetID string `json:"widget_id"`
|
||||
WidgetName string `json:"widget_name"`
|
||||
}
|
||||
|
||||
type MetricDetailsDTO struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
DataPoints uint64 `json:"dataPoints"`
|
||||
TimeSeriesTotal uint64 `json:"timeSeriesTotal"`
|
||||
TimeSeriesActive uint64 `json:"timeSeriesActive"`
|
||||
LastReceived uint64 `json:"lastReceived"`
|
||||
Attributes []Attribute `json:"attributes"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
Alerts []Alert `json:"alerts"`
|
||||
Dashboards []Dashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
type FilterKeyRequest struct {
|
||||
SearchText string `json:"searchText"`
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
type FilterValueRequest struct {
|
||||
FilterKey string `json:"filterKey"`
|
||||
FilterAttributeKeyDataType v3.AttributeKeyDataType `json:"filterAttributeKeyDataType"`
|
||||
SearchText string `json:"searchText"`
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
type FilterValueResponse struct {
|
||||
FilterValues []string `json:"filterValues"`
|
||||
}
|
||||
|
||||
type FilterKeyResponse struct {
|
||||
MetricColumns []string `json:"metricColumns"`
|
||||
AttributeKeys []v3.AttributeKey `json:"attributeKeys"`
|
||||
}
|
||||
|
||||
var AvailableColumnFilterMap = map[string]bool{
|
||||
"metric_name": true,
|
||||
"unit": true,
|
||||
}
|
||||
@@ -509,15 +509,15 @@ type ShowCreateTableStatement struct {
|
||||
Statement string `json:"statement" ch:"statement"`
|
||||
}
|
||||
|
||||
type LogField struct {
|
||||
type Field struct {
|
||||
Name string `json:"name" ch:"name"`
|
||||
DataType string `json:"dataType" ch:"datatype"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type GetFieldsResponse struct {
|
||||
Selected []LogField `json:"selected"`
|
||||
Interesting []LogField `json:"interesting"`
|
||||
Selected []Field `json:"selected"`
|
||||
Interesting []Field `json:"interesting"`
|
||||
}
|
||||
|
||||
// Represents a log record in query service requests and responses.
|
||||
|
||||
@@ -770,6 +770,19 @@ type MetricTableHints struct {
|
||||
SamplesTableName string
|
||||
}
|
||||
|
||||
type MetricValueFilter struct {
|
||||
Value float64
|
||||
}
|
||||
|
||||
func (m *MetricValueFilter) Clone() *MetricValueFilter {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
return &MetricValueFilter{
|
||||
Value: m.Value,
|
||||
}
|
||||
}
|
||||
|
||||
type BuilderQuery struct {
|
||||
QueryName string `json:"queryName"`
|
||||
StepInterval int64 `json:"stepInterval"`
|
||||
@@ -795,7 +808,8 @@ type BuilderQuery struct {
|
||||
ShiftBy int64
|
||||
IsAnomaly bool
|
||||
QueriesUsedInFormula []string
|
||||
MetricTableHints *MetricTableHints `json:"-"`
|
||||
MetricTableHints *MetricTableHints `json:"-"`
|
||||
MetricValueFilter *MetricValueFilter `json:"-"`
|
||||
}
|
||||
|
||||
func (b *BuilderQuery) SetShiftByFromFunc() {
|
||||
@@ -859,6 +873,7 @@ func (b *BuilderQuery) Clone() *BuilderQuery {
|
||||
ShiftBy: b.ShiftBy,
|
||||
IsAnomaly: b.IsAnomaly,
|
||||
QueriesUsedInFormula: b.QueriesUsedInFormula,
|
||||
MetricValueFilter: b.MetricValueFilter.Clone(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -350,6 +350,27 @@ func TestLogPipelinesValidation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
ExpectedResponseStatusCode: 400,
|
||||
}, {
|
||||
Name: "Invalid from field path",
|
||||
Pipeline: logparsingpipeline.PostablePipeline{
|
||||
OrderId: 1,
|
||||
Name: "pipeline 1",
|
||||
Alias: "pipeline1",
|
||||
Enabled: true,
|
||||
Filter: validPipelineFilterSet,
|
||||
Config: []logparsingpipeline.PipelineOperator{
|
||||
{
|
||||
OrderId: 1,
|
||||
ID: "move",
|
||||
Type: "move",
|
||||
From: `attributes.temp_parsed_body."@l"`,
|
||||
To: "attributes.test",
|
||||
Enabled: true,
|
||||
Name: "test move",
|
||||
},
|
||||
},
|
||||
},
|
||||
ExpectedResponseStatusCode: 400,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
141
pkg/query-service/utils/filter_conditions.go
Normal file
141
pkg/query-service/utils/filter_conditions.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/model/metrics_explorer"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
// skipKey is an optional parameter to skip processing of a specific key
|
||||
func BuildFilterConditions(fs *v3.FilterSet, skipKey string) ([]string, error) {
|
||||
if fs == nil || len(fs.Items) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var conditions []string
|
||||
|
||||
for _, item := range fs.Items {
|
||||
if skipKey != "" && item.Key.Key == skipKey {
|
||||
continue
|
||||
}
|
||||
|
||||
toFormat := item.Value
|
||||
op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
|
||||
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
|
||||
toFormat = fmt.Sprintf("%%%s%%", toFormat)
|
||||
}
|
||||
fmtVal := ClickHouseFormattedValue(toFormat)
|
||||
|
||||
// Determine if the key is a JSON key or a normal column
|
||||
isJSONKey := false
|
||||
if _, exists := metrics_explorer.AvailableColumnFilterMap[item.Key.Key]; exists {
|
||||
isJSONKey = false
|
||||
} else {
|
||||
isJSONKey = true
|
||||
}
|
||||
|
||||
condition, err := buildSingleFilterCondition(item.Key.Key, op, fmtVal, isJSONKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conditions = append(conditions, condition)
|
||||
}
|
||||
|
||||
return conditions, nil
|
||||
}
|
||||
|
||||
func buildSingleFilterCondition(key string, op v3.FilterOperator, fmtVal string, isJSONKey bool) (string, error) {
|
||||
var keyCondition string
|
||||
if isJSONKey {
|
||||
keyCondition = fmt.Sprintf("JSONExtractString(labels, '%s')", key)
|
||||
} else {
|
||||
keyCondition = key // Assuming normal column access
|
||||
}
|
||||
|
||||
switch op {
|
||||
case v3.FilterOperatorEqual:
|
||||
return fmt.Sprintf("%s = %s", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorNotEqual:
|
||||
return fmt.Sprintf("%s != %s", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorIn:
|
||||
return fmt.Sprintf("%s IN %s", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorNotIn:
|
||||
return fmt.Sprintf("%s NOT IN %s", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorLike:
|
||||
return fmt.Sprintf("like(%s, %s)", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorNotLike:
|
||||
return fmt.Sprintf("notLike(%s, %s)", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorRegex:
|
||||
return fmt.Sprintf("match(%s, %s)", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorNotRegex:
|
||||
return fmt.Sprintf("not match(%s, %s)", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorGreaterThan:
|
||||
return fmt.Sprintf("%s > %s", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorGreaterThanOrEq:
|
||||
return fmt.Sprintf("%s >= %s", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorLessThan:
|
||||
return fmt.Sprintf("%s < %s", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorLessThanOrEq:
|
||||
return fmt.Sprintf("%s <= %s", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorContains:
|
||||
return fmt.Sprintf("like(%s, %s)", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorNotContains:
|
||||
return fmt.Sprintf("notLike(%s, %s)", keyCondition, fmtVal), nil
|
||||
case v3.FilterOperatorExists:
|
||||
return fmt.Sprintf("has(JSONExtractKeys(labels), '%s')", key), nil
|
||||
case v3.FilterOperatorNotExists:
|
||||
return fmt.Sprintf("not has(JSONExtractKeys(labels), '%s')", key), nil
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported filter operator: %s", op)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
sixHoursInMilliseconds = time.Hour.Milliseconds() * 6
|
||||
oneDayInMilliseconds = time.Hour.Milliseconds() * 24
|
||||
oneWeekInMilliseconds = oneDayInMilliseconds * 7
|
||||
)
|
||||
|
||||
func WhichTSTableToUse(start, end int64) (int64, int64, string) {
|
||||
|
||||
var tableName string
|
||||
if end-start < sixHoursInMilliseconds {
|
||||
// adjust the start time to nearest 1 hour
|
||||
start = start - (start % (time.Hour.Milliseconds() * 1))
|
||||
tableName = constants.SIGNOZ_TIMESERIES_v4_TABLENAME
|
||||
} else if end-start < oneDayInMilliseconds {
|
||||
// adjust the start time to nearest 6 hours
|
||||
start = start - (start % (time.Hour.Milliseconds() * 6))
|
||||
tableName = constants.SIGNOZ_TIMESERIES_v4_6HRS_TABLENAME
|
||||
} else if end-start < oneWeekInMilliseconds {
|
||||
// adjust the start time to nearest 1 day
|
||||
start = start - (start % (time.Hour.Milliseconds() * 24))
|
||||
tableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME
|
||||
} else {
|
||||
if constants.UseMetricsPreAggregation() {
|
||||
// adjust the start time to nearest 1 week
|
||||
start = start - (start % (time.Hour.Milliseconds() * 24 * 7))
|
||||
tableName = constants.SIGNOZ_TIMESERIES_v4_1WEEK_TABLENAME
|
||||
} else {
|
||||
// continue to use the 1 day table
|
||||
start = start - (start % (time.Hour.Milliseconds() * 24))
|
||||
tableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME
|
||||
}
|
||||
}
|
||||
|
||||
return start, end, tableName
|
||||
}
|
||||
|
||||
func WhichSampleTableToUse(start, end int64) (string, string) {
|
||||
if end-start < oneDayInMilliseconds {
|
||||
return constants.SIGNOZ_SAMPLES_V4_TABLENAME, "count(*)"
|
||||
} else if end-start < oneWeekInMilliseconds {
|
||||
return constants.SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME, "sum(count)"
|
||||
} else {
|
||||
return constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME, "sum(count)"
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ Follow the steps in this section to install a sample application named HotR.O.D,
|
||||
```console
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod.yaml
|
||||
```
|
||||
|
||||
In case, you have installed SigNoz in namespace other than `platform` or selected Helm release name other than `my-release`, follow the steps below:
|
||||
@@ -15,7 +15,7 @@ export HELM_RELEASE=my-release-2
|
||||
export SIGNOZ_NAMESPACE=platform-2
|
||||
export HOTROD_NAMESPACE=sample-application-2
|
||||
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh | bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh | bash
|
||||
```
|
||||
|
||||
To delete sample application:
|
||||
@@ -23,7 +23,7 @@ To delete sample application:
|
||||
```console
|
||||
export HOTROD_NAMESPACE=sample-application-2
|
||||
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh | bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh | bash
|
||||
```
|
||||
|
||||
For testing with local scripts, you can use the following commands:
|
||||
|
||||
@@ -7,7 +7,7 @@ HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"}
|
||||
if [[ "${HOTROD_NAMESPACE}" == "default" || "${HOTROD_NAMESPACE}" == "kube-system" || "${HOTROD_NAMESPACE}" == "platform" ]]; then
|
||||
echo "Default k8s namespace and SigNoz namespace must not be deleted"
|
||||
echo "Deleting components only"
|
||||
kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml)
|
||||
kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml)
|
||||
else
|
||||
echo "Delete HotROD sample app namespace ${HOTROD_NAMESPACE}"
|
||||
kubectl delete namespace "${HOTROD_NAMESPACE}"
|
||||
|
||||
@@ -37,7 +37,7 @@ kubectl create namespace "$HOTROD_NAMESPACE" --save-config --dry-run -o yaml 2>/
|
||||
|
||||
# Setup sample apps into specified namespace
|
||||
kubectl apply --namespace="${HOTROD_NAMESPACE}" -f <( \
|
||||
(cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml) | \
|
||||
(cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) | \
|
||||
HOTROD_NAMESPACE="${HOTROD_NAMESPACE}" \
|
||||
HOTROD_IMAGE="${HOTROD_IMAGE}" \
|
||||
LOCUST_IMAGE="${LOCUST_IMAGE}" \
|
||||
|
||||
Reference in New Issue
Block a user