Compare commits
62 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
14ea7bd86a | ||
|
|
8bf0123370 | ||
|
|
dc9ffcdd45 | ||
|
|
c79223742f | ||
|
|
d9a99827c0 | ||
|
|
1bf6faff8b | ||
|
|
95fb068bb0 | ||
|
|
0907ed280b | ||
|
|
fc7a0a8354 | ||
|
|
b27e30db58 | ||
|
|
1ab291f3e8 | ||
|
|
552d193cef | ||
|
|
ba0f06f381 | ||
|
|
bbbb1c1d60 | ||
|
|
32a09d4ca2 | ||
|
|
7ae43cf511 | ||
|
|
d1887fdbfe | ||
|
|
5414a73b40 | ||
|
|
7f116d1597 | ||
|
|
2c1b530aa0 | ||
|
|
231b8467fd | ||
|
|
24910f6a39 | ||
|
|
03bf9afe03 | ||
|
|
fbf047a477 | ||
|
|
afc0559456 | ||
|
|
34e9247562 | ||
|
|
bbd90bff0c | ||
|
|
3e0f5a866d | ||
|
|
fb634303e8 | ||
|
|
5e828bf174 | ||
|
|
8f2ed0e46f | ||
|
|
19b25219f4 | ||
|
|
447700326a | ||
|
|
3ed4fb2b75 | ||
|
|
32750fa2af | ||
|
|
47b0671b27 | ||
|
|
9bc62d83d3 | ||
|
|
5e4cff7ae2 | ||
|
|
271ffbd1a1 | ||
|
|
5b691d26e4 | ||
|
|
6b6070fd45 | ||
|
|
28bf2fe3f7 | ||
|
|
849c3d1156 | ||
|
|
b47a3e0932 | ||
|
|
4427f60708 | ||
|
|
319ca6af07 | ||
|
|
0f59baf740 | ||
|
|
4dfbdd2d63 | ||
|
|
73b5134971 | ||
|
|
20879dcf2e | ||
|
|
e2a5729c5e | ||
|
|
556914f808 | ||
|
|
c8830c9e3a | ||
|
|
865e487fc3 | ||
|
|
36e95332bc | ||
|
|
f934f96dd8 | ||
|
|
148d7d99ed | ||
|
|
43369bdefb | ||
|
|
da5bf3aea0 | ||
|
|
28c8df5e63 | ||
|
|
510815655f | ||
|
|
53d52254cb |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -14,6 +14,8 @@ frontend/coverage
|
||||
frontend/build
|
||||
frontend/.vscode
|
||||
frontend/.yarnclean
|
||||
frontend/.temp_cache
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
@@ -33,7 +35,6 @@ frontend/cypress.env.json
|
||||
.idea
|
||||
|
||||
**/.vscode
|
||||
*.tgz
|
||||
**/build
|
||||
**/storage
|
||||
**/locust-scripts/__pycache__/
|
||||
|
||||
@@ -16,7 +16,10 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
|
||||
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
- run `cd deploy && docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend service)
|
||||
- run `cd deploy` to move to deploy directory
|
||||
- Install signoz locally without the frontend
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
@@ -41,7 +44,9 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/deploy`
|
||||
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L22`
|
||||
- Run `docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend and query-service)
|
||||
- Install signoz locally without the frontend and query-service
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||
- `STORAGE=clickhouse ClickHouseUrl=tcp://localhost:9001 go run main.go`
|
||||
|
||||
**_Query Service should now be available at `http://localhost:8080`_**
|
||||
|
||||
35
deploy/docker/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
11
deploy/docker/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
@@ -2,7 +2,7 @@ version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
image: ${clickhouse_image}
|
||||
expose:
|
||||
- 8123
|
||||
- 9000
|
||||
@@ -21,8 +21,19 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.5.0
|
||||
volumes:
|
||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- '--config.file=/prometheus/alertmanager.yml'
|
||||
- '--storage.path=/data'
|
||||
ports:
|
||||
- 9093:9093
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.4.3
|
||||
image: signoz/query-service:0.5.3
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
@@ -43,7 +54,7 @@ services:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.3
|
||||
image: signoz/frontend:0.5.3
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
|
||||
1
deploy/docker/clickhouse-setup/env/arm64.env
vendored
Normal file
1
deploy/docker/clickhouse-setup/env/arm64.env
vendored
Normal file
@@ -0,0 +1 @@
|
||||
clickhouse_image=altinity/clickhouse-server:21.8.12.1.testingarm
|
||||
1
deploy/docker/clickhouse-setup/env/x86_64.env
vendored
Normal file
1
deploy/docker/clickhouse-setup/env/x86_64.env
vendored
Normal file
@@ -0,0 +1 @@
|
||||
clickhouse_image=yandex/clickhouse-server
|
||||
@@ -9,7 +9,7 @@ receivers:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 60s
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
processors:
|
||||
|
||||
@@ -9,12 +9,13 @@ alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
# - alertmanager:9093
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
|
||||
@@ -16,6 +16,9 @@ server {
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
location /api/alertmanager{
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
|
||||
@@ -36,6 +36,10 @@ is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
is_arm64(){
|
||||
[[ `uname -m` == 'arm64' ]]
|
||||
}
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
package_manager="brew"
|
||||
@@ -160,8 +164,9 @@ install_docker() {
|
||||
echo
|
||||
echo "Amazon Linux detected ... "
|
||||
echo
|
||||
sudo yum install docker
|
||||
sudo service docker start
|
||||
# sudo yum install docker
|
||||
# sudo service docker start
|
||||
sudo amazon-linux-extras install docker
|
||||
else
|
||||
|
||||
yum_cmd="sudo yum --assumeyes --quiet"
|
||||
@@ -266,7 +271,11 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
if is_arm64; then
|
||||
echo -e "sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
fi
|
||||
else
|
||||
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
@@ -313,28 +322,30 @@ check_os
|
||||
|
||||
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
|
||||
|
||||
echo ""
|
||||
# echo ""
|
||||
|
||||
echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
echo -e "${RED}2) Kafka + Druid as datastore \n"
|
||||
read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# echo -e "${RED}2) Kafka + Druid as datastore \n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
do
|
||||
# echo $choice_setup
|
||||
echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# echo $choice_setup
|
||||
done
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
# do
|
||||
# # echo $choice_setup
|
||||
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# # echo $choice_setup
|
||||
# done
|
||||
|
||||
if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
setup_type='clickhouse'
|
||||
else
|
||||
setup_type='druid'
|
||||
fi
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# else
|
||||
# setup_type='druid'
|
||||
# fi
|
||||
|
||||
echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
setup_type='clickhouse'
|
||||
|
||||
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
@@ -405,7 +416,11 @@ start_docker
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
if is_arm64; then
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
else
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
fi
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
|
||||
fi
|
||||
@@ -417,7 +432,11 @@ echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
if is_arm64; then
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
else
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
@@ -477,7 +496,7 @@ else
|
||||
echo ""
|
||||
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
|
||||
fi
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: retention-config
|
||||
data:
|
||||
retention-spec.json: |
|
||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: set-retention
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: set-retention
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
volumeMounts:
|
||||
- name: retention-config-volume
|
||||
mountPath: /app/retention-spec.json
|
||||
subPath: retention-spec.json
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://signoz-druid-router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
volumes:
|
||||
- name: retention-config-volume
|
||||
configMap:
|
||||
name: retention-config
|
||||
|
||||
restartPolicy: Never
|
||||
backoffLimit: 8
|
||||
@@ -1,76 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: supervisor-config
|
||||
data:
|
||||
supervisor-spec.json: |
|
||||
{
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
"ExternalHttpMethod",
|
||||
"ExternalHttpUrl",
|
||||
"Component",
|
||||
"DBSystem",
|
||||
"DBName",
|
||||
"DBOperation",
|
||||
"PeerService",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "signoz-kafka:9092"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: create-supervisor
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: create-supervisor
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
volumeMounts:
|
||||
- name: supervisor-config-volume
|
||||
mountPath: /app/supervisor-spec.json
|
||||
subPath: supervisor-spec.json
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://signoz-druid-router:8888/druid/indexer/v1/supervisor"
|
||||
volumes:
|
||||
- name: supervisor-config-volume
|
||||
configMap:
|
||||
name: supervisor-config
|
||||
restartPolicy: Never
|
||||
backoffLimit: 8
|
||||
@@ -1,60 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: otel-collector-conf
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-conf
|
||||
data:
|
||||
otel-collector-config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
queued_retry:
|
||||
num_workers: 4
|
||||
queue_size: 100
|
||||
retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
kafka/traces:
|
||||
brokers:
|
||||
- signoz-kafka:9092
|
||||
topic: 'otlp_spans'
|
||||
protocol_version: 2.0.0
|
||||
|
||||
kafka/metrics:
|
||||
brokers:
|
||||
- signoz-kafka:9092
|
||||
topic: 'otlp_metrics'
|
||||
protocol_version: 2.0.0
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [memory_limiter, batch, queued_retry]
|
||||
exporters: [kafka/traces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [kafka/metrics]
|
||||
@@ -2,20 +2,14 @@ dependencies:
|
||||
- name: zookeeper
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 6.0.0
|
||||
- name: kafka
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 12.0.0
|
||||
- name: druid
|
||||
repository: https://charts.helm.sh/incubator
|
||||
version: 0.2.18
|
||||
- name: flattener-processor
|
||||
repository: file://./signoz-charts/flattener-processor
|
||||
version: 0.3.6
|
||||
- name: query-service
|
||||
repository: file://./signoz-charts/query-service
|
||||
version: 0.3.6
|
||||
version: 0.5.3
|
||||
- name: frontend
|
||||
repository: file://./signoz-charts/frontend
|
||||
version: 0.3.6
|
||||
digest: sha256:b160e903c630a90644683c512eb8ba018e18d2c08051e255edd3749cb9cc7228
|
||||
generated: "2021-08-23T12:06:37.231066+05:30"
|
||||
version: 0.5.3
|
||||
- name: alertmanager
|
||||
repository: file://./signoz-charts/alertmanager
|
||||
version: 0.5.0
|
||||
digest: sha256:891df4e1a3d9fc7bf2b4b67beb4e505f1ec4da20952c2e294ad8b16fcc4eccb7
|
||||
generated: "2021-12-11T11:40:26.756851+05:30"
|
||||
|
||||
@@ -15,29 +15,23 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.2
|
||||
version: 0.5.3
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 0.3.2
|
||||
appVersion: 0.5.3
|
||||
|
||||
dependencies:
|
||||
- name: zookeeper
|
||||
repository: "https://charts.bitnami.com/bitnami"
|
||||
version: 6.0.0
|
||||
- name: kafka
|
||||
repository: "https://charts.bitnami.com/bitnami"
|
||||
version: 12.0.0
|
||||
- name: druid
|
||||
repository: "https://charts.helm.sh/incubator"
|
||||
version: 0.2.18
|
||||
- name: flattener-processor
|
||||
repository: "file://./signoz-charts/flattener-processor"
|
||||
version: 0.3.6
|
||||
- name: query-service
|
||||
repository: "file://./signoz-charts/query-service"
|
||||
version: 0.3.6
|
||||
version: 0.5.3
|
||||
- name: frontend
|
||||
repository: "file://./signoz-charts/frontend"
|
||||
version: 0.3.6
|
||||
version: 0.5.3
|
||||
- name: alertmanager
|
||||
repository: "file://./signoz-charts/alertmanager"
|
||||
version: 0.5.0
|
||||
BIN
deploy/kubernetes/platform/charts/alertmanager-0.5.0.tgz
Normal file
BIN
deploy/kubernetes/platform/charts/alertmanager-0.5.0.tgz
Normal file
Binary file not shown.
BIN
deploy/kubernetes/platform/charts/frontend-0.5.3.tgz
Normal file
BIN
deploy/kubernetes/platform/charts/frontend-0.5.3.tgz
Normal file
Binary file not shown.
BIN
deploy/kubernetes/platform/charts/query-service-0.5.3.tgz
Normal file
BIN
deploy/kubernetes/platform/charts/query-service-0.5.3.tgz
Normal file
Binary file not shown.
BIN
deploy/kubernetes/platform/charts/zookeeper-6.0.0.tgz
Normal file
BIN
deploy/kubernetes/platform/charts/zookeeper-6.0.0.tgz
Normal file
Binary file not shown.
1223
deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml
Normal file
1223
deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -21,3 +21,5 @@
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
||||
unittests/
|
||||
@@ -0,0 +1,7 @@
|
||||
apiVersion: v2
|
||||
name: alertmanager
|
||||
description: The Alertmanager handles alerts sent by client applications such as the Prometheus server.
|
||||
type: application
|
||||
version: 0.5.0
|
||||
appVersion: 0.5.0
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
configmapReload:
|
||||
enabled: true
|
||||
@@ -2,20 +2,20 @@
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "flattener-processor.fullname" . }})
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alertmanager.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "flattener-processor.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "flattener-processor.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "alertmanager.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "alertmanager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "flattener-processor.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME {{ .Values.service.port }}:80
|
||||
{{- end }}
|
||||
@@ -2,7 +2,7 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "flattener-processor.name" -}}
|
||||
{{- define "alertmanager.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -11,7 +11,7 @@ Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "flattener-processor.fullname" -}}
|
||||
{{- define "alertmanager.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
@@ -27,16 +27,16 @@ If release name contains chart name it will be used as a full name.
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "flattener-processor.chart" -}}
|
||||
{{- define "alertmanager.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "flattener-processor.labels" -}}
|
||||
helm.sh/chart: {{ include "flattener-processor.chart" . }}
|
||||
{{ include "flattener-processor.selectorLabels" . }}
|
||||
{{- define "alertmanager.labels" -}}
|
||||
helm.sh/chart: {{ include "alertmanager.chart" . }}
|
||||
{{ include "alertmanager.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
@@ -46,17 +46,17 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "flattener-processor.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "flattener-processor.name" . }}
|
||||
{{- define "alertmanager.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "alertmanager.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "flattener-processor.serviceAccountName" -}}
|
||||
{{- define "alertmanager.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "flattener-processor.fullname" .) .Values.serviceAccount.name }}
|
||||
{{ default (include "alertmanager.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if .Values.config }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "alertmanager.fullname" . }}
|
||||
labels:
|
||||
{{- include "alertmanager.labels" . | nindent 4 }}
|
||||
data:
|
||||
alertmanager.yml: |
|
||||
{{- toYaml .Values.config | default "{}" | nindent 4 }}
|
||||
{{- range $key, $value := .Values.templates }}
|
||||
{{ $key }}: |-
|
||||
{{- $value | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,61 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "alertmanager.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "alertmanager.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.ingress.className }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.podDisruptionBudget -}}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "alertmanager.fullname" . }}
|
||||
labels:
|
||||
{{- include "alertmanager.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "alertmanager.selectorLabels" . | nindent 6 }}
|
||||
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
|
||||
{{- end -}}
|
||||
@@ -2,9 +2,9 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "flattener-processor.serviceAccountName" . }}
|
||||
name: {{ include "alertmanager.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
{{- include "alertmanager.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
@@ -0,0 +1,48 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: alertmanager
|
||||
labels:
|
||||
{{- include "alertmanager.labels" . | nindent 4 }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.service.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "alertmanager.selectorLabels" . | nindent 4 }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "alertmanager.fullname" . }}-headless
|
||||
labels:
|
||||
{{- include "alertmanager.labels" . | nindent 4 }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }}
|
||||
- port: 9094
|
||||
targetPort: 9094
|
||||
protocol: TCP
|
||||
name: cluster-tcp
|
||||
- port: 9094
|
||||
targetPort: 9094
|
||||
protocol: UDP
|
||||
name: cluster-udp
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "alertmanager.selectorLabels" . | nindent 4 }}
|
||||
@@ -0,0 +1,152 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "alertmanager.fullname" . }}
|
||||
labels:
|
||||
{{- include "alertmanager.labels" . | nindent 4 }}
|
||||
{{- if .Values.statefulSet.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.statefulSet.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "alertmanager.selectorLabels" . | nindent 6 }}
|
||||
serviceName: {{ include "alertmanager.fullname" . }}-headless
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "alertmanager.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{ toYaml .Values.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if not .Values.configmapReload.enabled }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- end }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{- toYaml .Values.podAnnotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "alertmanager.serviceAccountName" . }}
|
||||
{{- with .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
{{- if and (.Values.configmapReload.enabled) (.Values.config) }}
|
||||
- name: {{ .Chart.Name }}-{{ .Values.configmapReload.name }}
|
||||
image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}"
|
||||
imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}"
|
||||
args:
|
||||
- --volume-dir=/etc/alertmanager
|
||||
- --webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload
|
||||
resources:
|
||||
{{- toYaml .Values.configmapReload.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/alertmanager
|
||||
{{- end }}
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
{{- if .Values.command }}
|
||||
command:
|
||||
{{- toYaml .Values.command | nindent 12 }}
|
||||
{{- end }}
|
||||
args:
|
||||
- --storage.path=/alertmanager
|
||||
- --config.file=/etc/alertmanager/alertmanager.yml
|
||||
{{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }}
|
||||
- --cluster.advertise-address=$(POD_IP):9094
|
||||
- --cluster.listen-address=0.0.0.0:9094
|
||||
{{- end }}
|
||||
{{- if gt .Values.replicaCount 1.0}}
|
||||
{{- $fullName := include "alertmanager.fullname" . }}
|
||||
{{- range $i := until (int .Values.replicaCount) }}
|
||||
- --cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:9094
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.additionalPeers }}
|
||||
{{- range $item := .Values.additionalPeers }}
|
||||
- --cluster.peer={{ $item }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.extraArgs }}
|
||||
- --{{ $key }}={{ $value }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9093
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
{{- if .Values.config }}
|
||||
- name: config
|
||||
mountPath: /etc/alertmanager
|
||||
{{- end }}
|
||||
- name: storage
|
||||
mountPath: /alertmanager
|
||||
{{- if .Values.config }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "alertmanager.fullname" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.enabled }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: storage
|
||||
spec:
|
||||
accessModes:
|
||||
{{- toYaml .Values.persistence.accessModes | nindent 10 }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size }}
|
||||
{{- if .Values.persistence.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: {{ .Values.persistence.storageClass }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- name: storage
|
||||
emptyDir: {}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,48 @@
|
||||
should match snapshot of default values:
|
||||
1: |
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: RELEASE-NAME
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/version: 1.0.0
|
||||
helm.sh/chart: alertmanager-1.0.0
|
||||
name: RELEASE-NAME-alertmanager
|
||||
spec:
|
||||
ingressClassName: nginx-test
|
||||
rules:
|
||||
- host: alertmanager.domain.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: RELEASE-NAME-alertmanager
|
||||
port:
|
||||
number: 9093
|
||||
path: /
|
||||
pathType: ImplementationSpecific
|
||||
should match snapshot of default values with old kubernetes ingress:
|
||||
1: |
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx-test
|
||||
labels:
|
||||
app.kubernetes.io/instance: RELEASE-NAME
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/version: 1.0.0
|
||||
helm.sh/chart: alertmanager-1.0.0
|
||||
name: RELEASE-NAME-alertmanager
|
||||
spec:
|
||||
rules:
|
||||
- host: alertmanager.domain.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: RELEASE-NAME-alertmanager
|
||||
servicePort: 9093
|
||||
path: /
|
||||
@@ -0,0 +1,81 @@
|
||||
suite: test ingress
|
||||
templates:
|
||||
- ingress.yaml
|
||||
tests:
|
||||
- it: should be empty if ingress is not enabled
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
- it: should have apiVersion extensions/v1beta1 for k8s < 1.14
|
||||
set:
|
||||
ingress.enabled: true
|
||||
capabilities:
|
||||
majorVersion: 1
|
||||
minorVersion: 13
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Ingress
|
||||
- isAPIVersion:
|
||||
of: extensions/v1beta1
|
||||
- it: should have apiVersion networking.k8s.io/v1beta1 for k8s < 1.19
|
||||
set:
|
||||
ingress.enabled: true
|
||||
capabilities:
|
||||
majorVersion: 1
|
||||
minorVersion: 18
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Ingress
|
||||
- isAPIVersion:
|
||||
of: networking.k8s.io/v1beta1
|
||||
- it: should have apiVersion networking.k8s.io/v1 for k8s >= 1.19
|
||||
set:
|
||||
ingress.enabled: true
|
||||
capabilities:
|
||||
majorVersion: 1
|
||||
minorVersion: 19
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Ingress
|
||||
- isAPIVersion:
|
||||
of: networking.k8s.io/v1
|
||||
- it: should have an ingressClassName for k8s >= 1.19
|
||||
set:
|
||||
ingress.enabled: true
|
||||
ingress.className: nginx-test
|
||||
capabilities:
|
||||
majorVersion: 1
|
||||
minorVersion: 19
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- equal:
|
||||
path: spec.ingressClassName
|
||||
value: nginx-test
|
||||
- it: should match snapshot of default values
|
||||
set:
|
||||
ingress.enabled: true
|
||||
ingress.className: nginx-test
|
||||
chart:
|
||||
version: 1.0.0
|
||||
appVersion: 1.0.0
|
||||
asserts:
|
||||
- matchSnapshot: { }
|
||||
- it: should match snapshot of default values with old kubernetes ingress
|
||||
set:
|
||||
ingress.enabled: true
|
||||
ingress.className: nginx-test
|
||||
capabilities:
|
||||
majorVersion: 1
|
||||
minorVersion: 17
|
||||
chart:
|
||||
version: 1.0.0
|
||||
appVersion: 1.0.0
|
||||
asserts:
|
||||
- matchSnapshot: { }
|
||||
@@ -0,0 +1,189 @@
|
||||
# Default values for alertmanager.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: signoz/alertmanager
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "0.5.0"
|
||||
|
||||
extraArgs: {}
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
podSecurityContext:
|
||||
fsGroup: 65534
|
||||
dnsConfig: {}
|
||||
# nameservers:
|
||||
# - 1.2.3.4
|
||||
# searches:
|
||||
# - ns1.svc.cluster-domain.example
|
||||
# - my.dns.search.suffix
|
||||
# options:
|
||||
# - name: ndots
|
||||
# value: "2"
|
||||
# - name: edns0
|
||||
securityContext:
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
runAsUser: 65534
|
||||
runAsNonRoot: true
|
||||
runAsGroup: 65534
|
||||
|
||||
additionalPeers: []
|
||||
|
||||
service:
|
||||
annotations: {}
|
||||
type: ClusterIP
|
||||
port: 9093
|
||||
# if you want to force a specific nodePort. Must be use with service.type=NodePort
|
||||
# nodePort:
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: alertmanager.domain.com
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - alertmanager.domain.com
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 10m
|
||||
# memory: 32Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
statefulSet:
|
||||
annotations: {}
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
|
||||
podDisruptionBudget: {}
|
||||
# maxUnavailable: 1
|
||||
# minAvailable: 1
|
||||
|
||||
command: []
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
## Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner.
|
||||
##
|
||||
# storageClass: "-"
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 100Mi
|
||||
|
||||
config:
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
templates:
|
||||
- '/etc/alertmanager/*.tmpl'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: '{{ template "slack.title" . }}'
|
||||
text: '{{ template "slack.text" . }}'
|
||||
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
## Monitors ConfigMap changes and POSTs to a URL
|
||||
## Ref: https://github.com/jimmidyson/configmap-reload
|
||||
##
|
||||
configmapReload:
|
||||
## If false, the configmap-reload container will not be deployed
|
||||
##
|
||||
enabled: false
|
||||
|
||||
## configmap-reload container name
|
||||
##
|
||||
name: configmap-reload
|
||||
|
||||
## configmap-reload container image
|
||||
##
|
||||
image:
|
||||
repository: jimmidyson/configmap-reload
|
||||
tag: v0.5.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## configmap-reload resource requests and limits
|
||||
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
##
|
||||
resources: {}
|
||||
|
||||
templates:
|
||||
title.tmpl: |-
|
||||
{{ define "slack.title" }}
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
text.tmpl: |-
|
||||
{{ define "slack.text" }}
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: flattener-processor
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.6
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.6
|
||||
@@ -1,65 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "flattener-processor.fullname" . }}
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "flattener-processor.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "flattener-processor.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "flattener-processor.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- "/root/flattener"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: KAFKA_BROKER
|
||||
value: {{ .Values.configVars.KAFKA_BROKER }}
|
||||
- name: KAFKA_INPUT_TOPIC
|
||||
value: {{ .Values.configVars.KAFKA_INPUT_TOPIC }}
|
||||
- name: KAFKA_OUTPUT_TOPIC
|
||||
value: {{ .Values.configVars.KAFKA_OUTPUT_TOPIC }}
|
||||
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,41 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "flattener-processor.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "flattener-processor.fullname" . }}
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "flattener-processor.selectorLabels" . | nindent 4 }}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "flattener-processor.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "flattener-processor.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
@@ -1,74 +0,0 @@
|
||||
# Default values for flattener-processor.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: signoz/flattener-processor
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
|
||||
configVars:
|
||||
KAFKA_BROKER: signoz-kafka:9092
|
||||
KAFKA_INPUT_TOPIC: otlp_spans
|
||||
KAFKA_OUTPUT_TOPIC: flattened_spans
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8080
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths: []
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -14,8 +14,8 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.6
|
||||
version: 0.5.3
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.6
|
||||
appVersion: 0.5.3
|
||||
|
||||
@@ -24,6 +24,9 @@ data:
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
location /api/alertmanager{
|
||||
proxy_pass http://{{ .Values.config.alertmanagerUrl }}/api/v2;
|
||||
}
|
||||
location /api {
|
||||
proxy_pass http://{{ .Values.config.queryServiceUrl }}/api;
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ configVars: {}
|
||||
config:
|
||||
name: signoz-nginx-config
|
||||
queryServiceUrl: signoz-query-service:8080
|
||||
alertmanagerUrl: alertmanager:9093
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
|
||||
@@ -14,8 +14,8 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.6
|
||||
version: 0.5.3
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.6
|
||||
appVersion: 0.5.3
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus-config
|
||||
data:
|
||||
prometheus.yml: |
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
|
||||
|
||||
remote_read:
|
||||
- url: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "query-service.fullname" . }}
|
||||
labels:
|
||||
{{- include "query-service.labels" . | nindent 4 }}
|
||||
spec:
|
||||
serviceName: query-service
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -14,19 +15,17 @@ spec:
|
||||
labels:
|
||||
{{- include "query-service.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "query-service.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
@@ -38,7 +37,10 @@ spec:
|
||||
value: {{ .Values.configVars.DruidDatasource }}
|
||||
- name: STORAGE
|
||||
value: {{ .Values.configVars.STORAGE }}
|
||||
|
||||
- name: ClickHouseUrl
|
||||
value: {{ .Values.configVars.ClickHouseUrl}}
|
||||
- name: GODEBUG
|
||||
value: netdns=go
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
@@ -47,6 +49,13 @@ spec:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
volumeMounts:
|
||||
- name: prometheus
|
||||
mountPath: /root/config
|
||||
- name: signoz-db
|
||||
mountPath: /var/lib/signoz/
|
||||
- name: dashboards
|
||||
mountPath: /root/config/dashboards
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
@@ -61,3 +70,18 @@ spec:
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: prometheus
|
||||
configMap:
|
||||
name: prometheus-config
|
||||
- name: dashboards
|
||||
emptyDir: {}
|
||||
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: signoz-db
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@@ -16,7 +16,8 @@ fullnameOverride: ""
|
||||
configVars:
|
||||
DruidClientUrl: http://signoz-druid-router:8888
|
||||
DruidDatasource: flattened_spans
|
||||
STORAGE: druid
|
||||
ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
|
||||
STORAGE: clickhouse
|
||||
POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w"
|
||||
|
||||
|
||||
|
||||
33
deploy/kubernetes/platform/templates/clickhouse-config.yaml
Normal file
33
deploy/kubernetes/platform/templates/clickhouse-config.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: initdb-config
|
||||
data:
|
||||
init-db.sql: |-
|
||||
CREATE TABLE IF NOT EXISTS signoz_index (
|
||||
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
spanID String CODEC(ZSTD(1)),
|
||||
parentSpanID String CODEC(ZSTD(1)),
|
||||
serviceName LowCardinality(String) CODEC(ZSTD(1)),
|
||||
name LowCardinality(String) CODEC(ZSTD(1)),
|
||||
kind Int32 CODEC(ZSTD(1)),
|
||||
durationNano UInt64 CODEC(ZSTD(1)),
|
||||
tags Array(String) CODEC(ZSTD(1)),
|
||||
tagsKeys Array(String) CODEC(ZSTD(1)),
|
||||
tagsValues Array(String) CODEC(ZSTD(1)),
|
||||
statusCode Int64 CODEC(ZSTD(1)),
|
||||
references String CODEC(ZSTD(1)),
|
||||
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
|
||||
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
|
||||
component Nullable(String) CODEC(ZSTD(1)),
|
||||
dbSystem Nullable(String) CODEC(ZSTD(1)),
|
||||
dbName Nullable(String) CODEC(ZSTD(1)),
|
||||
dbOperation Nullable(String) CODEC(ZSTD(1)),
|
||||
peerService Nullable(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (serviceName, -toUnixTimestamp(timestamp))
|
||||
104
deploy/kubernetes/platform/templates/clickhouse-instance.yaml
Normal file
104
deploy/kubernetes/platform/templates/clickhouse-instance.yaml
Normal file
@@ -0,0 +1,104 @@
|
||||
{{ if (eq (.Values.cloud | toString) "gcp" )}}
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: gce-resizable
|
||||
provisioner: kubernetes.io/gce-pd
|
||||
parameters:
|
||||
type: pd-standard
|
||||
fstype: ext4
|
||||
replication-type: none
|
||||
reclaimPolicy: Retain
|
||||
#volumeBindingMode: Immediate
|
||||
allowVolumeExpansion: true
|
||||
{{- else if (eq (.Values.cloud | toString) "aws") }}
|
||||
#
|
||||
# AWS resizable disk example
|
||||
#
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: gp2-resizable
|
||||
provisioner: kubernetes.io/aws-ebs
|
||||
parameters:
|
||||
type: gp2
|
||||
reclaimPolicy: Retain
|
||||
#volumeBindingMode: Immediate
|
||||
allowVolumeExpansion: true
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: "clickhouse.altinity.com/v1"
|
||||
kind: "ClickHouseInstallation"
|
||||
metadata:
|
||||
name: signoz
|
||||
spec:
|
||||
defaults:
|
||||
templates:
|
||||
dataVolumeClaimTemplate: default-volume-claim
|
||||
# logVolumeClaimTemplate: default-volume-claim
|
||||
serviceTemplate: chi-service-template
|
||||
configuration:
|
||||
zookeeper:
|
||||
nodes:
|
||||
- host: signoz-zookeeper
|
||||
port: 2181
|
||||
session_timeout_ms: 6000
|
||||
clusters:
|
||||
- name: cluster
|
||||
# Templates are specified for this cluster explicitly
|
||||
templates:
|
||||
dataVolumeClaimTemplate: default-volume-claim
|
||||
# logVolumeClaimTemplate: default-volume-claim
|
||||
podTemplate: pod-template-with-volume
|
||||
layout:
|
||||
shardsCount: 1
|
||||
replicasCount: 1
|
||||
templates:
|
||||
hostTemplates:
|
||||
- name: port-distribution
|
||||
portDistribution:
|
||||
- type: ClusterScopeIndex
|
||||
spec:
|
||||
tcpPort: 9000
|
||||
httpPort: 8123
|
||||
interserverHTTPPort: 9009
|
||||
|
||||
podTemplates:
|
||||
- name: pod-template-with-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: clickhouse
|
||||
image: yandex/clickhouse-server:21.7
|
||||
volumeMounts:
|
||||
- name: default-volume-claim
|
||||
mountPath: /var/lib/clickhouse
|
||||
- name: initdb
|
||||
mountPath: /docker-entrypoint-initdb.d
|
||||
volumes:
|
||||
- name: initdb
|
||||
configMap:
|
||||
name: initdb-config
|
||||
serviceTemplates:
|
||||
- name: chi-service-template
|
||||
generateName: signoz-clickhouse
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8123
|
||||
- name: tcp
|
||||
port: 9000
|
||||
type: {{ .Values.clickhouseOperator.serviceType }}
|
||||
volumeClaimTemplates:
|
||||
- name: default-volume-claim
|
||||
reclaimPolicy: Retain
|
||||
spec:
|
||||
{{- if (eq (.Values.cloud | toString) "gcp" )}}
|
||||
storageClassName: gce-resizable
|
||||
{{- else if (eq (.Values.cloud | toString) "aws") }}
|
||||
storageClassName: gp2-resizable
|
||||
{{- end }}
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.clickhouseOperator.storage | quote }}
|
||||
@@ -0,0 +1,157 @@
|
||||
{{- if .Values.clickhouseOperator.enabled }}
|
||||
# Template Parameters:
|
||||
#
|
||||
# NAMESPACE=posthog
|
||||
# COMMENT=#
|
||||
# ROLE_KIND=ClusterRole
|
||||
# ROLE_NAME=clickhouse-operator-posthog
|
||||
# ROLE_BINDING_KIND=ClusterRoleBinding
|
||||
# ROLE_BINDING_NAME=clickhouse-operator-posthog
|
||||
#
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: clickhouse-operator-posthog
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- replicasets
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps
|
||||
resourceNames:
|
||||
- clickhouse-operator
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- clickhouse.altinity.com
|
||||
resources:
|
||||
- clickhouseinstallations
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- clickhouse.altinity.com
|
||||
resources:
|
||||
- clickhouseinstallations
|
||||
- clickhouseinstallationtemplates
|
||||
- clickhouseoperatorconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- clickhouse.altinity.com
|
||||
resources:
|
||||
- clickhouseinstallations/finalizers
|
||||
- clickhouseinstallationtemplates/finalizers
|
||||
- clickhouseoperatorconfigurations/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- clickhouse.altinity.com
|
||||
resources:
|
||||
- clickhouseinstallations/status
|
||||
- clickhouseinstallationtemplates/status
|
||||
- clickhouseoperatorconfigurations/status
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,18 @@
|
||||
{{- if .Values.clickhouseOperator.enabled }}
|
||||
# Setup ClusterRoleBinding between ClusterRole and ServiceAccount.
|
||||
# ClusterRoleBinding is namespace-less and must have unique name
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: clickhouse-operator-posthog
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: clickhouse-operator-posthog
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: clickhouse-operator
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,418 @@
|
||||
{{- if .Values.clickhouseOperator.enabled }}
|
||||
# Template Parameters:
|
||||
#
|
||||
# NAME=etc-clickhouse-operator-files
|
||||
# NAMESPACE=posthog
|
||||
# COMMENT=
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: etc-clickhouse-operator-files
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
app: clickhouse-operator
|
||||
data:
|
||||
config.yaml: |
|
||||
################################################
|
||||
##
|
||||
## Watch Namespaces Section
|
||||
##
|
||||
################################################
|
||||
|
||||
# List of namespaces where clickhouse-operator watches for events.
|
||||
# Concurrently running operators should watch on different namespaces
|
||||
#watchNamespaces: ["dev", "test"]
|
||||
watchNamespaces: []
|
||||
|
||||
################################################
|
||||
##
|
||||
## Additional Configuration Files Section
|
||||
##
|
||||
################################################
|
||||
|
||||
# Path to folder where ClickHouse configuration files common for all instances within CHI are located.
|
||||
chCommonConfigsPath: config.d
|
||||
|
||||
# Path to folder where ClickHouse configuration files unique for each instance (host) within CHI are located.
|
||||
chHostConfigsPath: conf.d
|
||||
|
||||
# Path to folder where ClickHouse configuration files with users settings are located.
|
||||
# Files are common for all instances within CHI
|
||||
chUsersConfigsPath: users.d
|
||||
|
||||
# Path to folder where ClickHouseInstallation .yaml manifests are located.
|
||||
# Manifests are applied in sorted alpha-numeric order
|
||||
chiTemplatesPath: templates.d
|
||||
|
||||
################################################
|
||||
##
|
||||
## Cluster Create/Update/Delete Objects Section
|
||||
##
|
||||
################################################
|
||||
|
||||
# How many seconds to wait for created/updated StatefulSet to be Ready
|
||||
statefulSetUpdateTimeout: 300
|
||||
|
||||
# How many seconds to wait between checks for created/updated StatefulSet status
|
||||
statefulSetUpdatePollPeriod: 5
|
||||
|
||||
# What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
|
||||
# Possible options:
|
||||
# 1. abort - do nothing, just break the process and wait for admin
|
||||
# 2. delete - delete newly created problematic StatefulSet
|
||||
# 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
|
||||
onStatefulSetCreateFailureAction: ignore
|
||||
|
||||
# What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
|
||||
# Possible options:
|
||||
# 1. abort - do nothing, just break the process and wait for admin
|
||||
# 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
|
||||
# Pod would be recreated by StatefulSet based on rollback-ed configuration
|
||||
# 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
|
||||
onStatefulSetUpdateFailureAction: rollback
|
||||
|
||||
################################################
|
||||
##
|
||||
## ClickHouse Settings Section
|
||||
##
|
||||
################################################
|
||||
|
||||
# Default values for ClickHouse user configuration
|
||||
# 1. user/profile - string
|
||||
# 2. user/quota - string
|
||||
# 3. user/networks/ip - multiple strings
|
||||
# 4. user/password - string
|
||||
chConfigUserDefaultProfile: default
|
||||
chConfigUserDefaultQuota: default
|
||||
chConfigUserDefaultNetworksIP:
|
||||
- "::1"
|
||||
- "127.0.0.1"
|
||||
chConfigUserDefaultPassword: "default"
|
||||
|
||||
# Default host_regexp to limit network connectivity from outside
|
||||
chConfigNetworksHostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
|
||||
|
||||
################################################
|
||||
##
|
||||
## Access to ClickHouse instances
|
||||
##
|
||||
################################################
|
||||
|
||||
# ClickHouse credentials (username, password and port) to be used by operator to connect to ClickHouse instances
|
||||
# for:
|
||||
# 1. Metrics requests
|
||||
# 2. Schema maintenance
|
||||
# 3. DROP DNS CACHE
|
||||
# User with such credentials can be specified in additional ClickHouse .xml config files,
|
||||
# located in `chUsersConfigsPath` folder
|
||||
chUsername: "clickhouse_operator"
|
||||
chPassword: "clickhouse_operator_password"
|
||||
|
||||
# Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances
|
||||
# Can be used instead of explicitly specified username and password
|
||||
chCredentialsSecretNamespace: ""
|
||||
chCredentialsSecretName: ""
|
||||
|
||||
# Port where to connect to ClickHouse instances to
|
||||
chPort: 8123
|
||||
|
||||
################################################
|
||||
##
|
||||
## Log parameters
|
||||
##
|
||||
################################################
|
||||
|
||||
logtostderr: "true"
|
||||
alsologtostderr: "false"
|
||||
v: "1"
|
||||
stderrthreshold: ""
|
||||
vmodule: ""
|
||||
log_backtrace_at: ""
|
||||
|
||||
################################################
|
||||
##
|
||||
## Runtime parameters
|
||||
##
|
||||
################################################
|
||||
|
||||
# Max number of concurrent reconciles in progress
|
||||
reconcileThreadsNumber: 10
|
||||
reconcileWaitExclude: true
|
||||
reconcileWaitInclude: false
|
||||
|
||||
################################################
|
||||
##
|
||||
## Labels management parameters
|
||||
##
|
||||
################################################
|
||||
|
||||
# When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
|
||||
# exclude labels from the following list:
|
||||
#excludeFromPropagationLabels:
|
||||
# - "labelA"
|
||||
# - "labelB"
|
||||
|
||||
# Whether to append *Scope* labels to StatefulSet and Pod.
|
||||
# Full list of available *scope* labels check in labeler.go
|
||||
# LabelShardScopeIndex
|
||||
# LabelReplicaScopeIndex
|
||||
# LabelCHIScopeIndex
|
||||
# LabelCHIScopeCycleSize
|
||||
# LabelCHIScopeCycleIndex
|
||||
# LabelCHIScopeCycleOffset
|
||||
# LabelClusterScopeIndex
|
||||
# LabelClusterScopeCycleSize
|
||||
# LabelClusterScopeCycleIndex
|
||||
# LabelClusterScopeCycleOffset
|
||||
appendScopeLabels: "no"
|
||||
|
||||
################################################
|
||||
##
|
||||
## Pod management parameters
|
||||
##
|
||||
################################################
|
||||
# Grace period for Pod termination.
|
||||
# How many seconds to wait between sending
|
||||
# SIGTERM and SIGKILL during Pod termination process.
|
||||
# Increase this number is case of slow shutdown.
|
||||
terminationGracePeriod: 30
|
||||
|
||||
---
|
||||
|
||||
# Template Parameters:
|
||||
#
|
||||
# NAME=etc-clickhouse-operator-confd-files
|
||||
# NAMESPACE=posthog
|
||||
# COMMENT=
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: etc-clickhouse-operator-confd-files
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
app: clickhouse-operator
|
||||
data:
|
||||
|
||||
---
|
||||
|
||||
# Template Parameters:
|
||||
#
|
||||
# NAME=etc-clickhouse-operator-configd-files
|
||||
# NAMESPACE=posthog
|
||||
# COMMENT=
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: etc-clickhouse-operator-configd-files
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
app: clickhouse-operator
|
||||
data:
|
||||
01-clickhouse-01-listen.xml: |
|
||||
<yandex>
|
||||
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
|
||||
<listen_host>::</listen_host>
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
<listen_try>1</listen_try>
|
||||
</yandex>
|
||||
|
||||
01-clickhouse-02-logger.xml: |
|
||||
<yandex>
|
||||
<logger>
|
||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
|
||||
<level>debug</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||
<console>1</console>
|
||||
</logger>
|
||||
</yandex>
|
||||
|
||||
01-clickhouse-03-query_log.xml: |
|
||||
<yandex>
|
||||
<query_log replace="1">
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
<query_thread_log remove="1"/>
|
||||
</yandex>
|
||||
|
||||
01-clickhouse-04-part_log.xml: |
|
||||
<yandex>
|
||||
<part_log replace="1">
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
</yandex>
|
||||
|
||||
---
|
||||
|
||||
# Template Parameters:
|
||||
#
|
||||
# NAME=etc-clickhouse-operator-templatesd-files
|
||||
# NAMESPACE=posthog
|
||||
# COMMENT=
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: etc-clickhouse-operator-templatesd-files
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
app: clickhouse-operator
|
||||
data:
|
||||
001-templates.json.example: |
|
||||
{
|
||||
"apiVersion": "clickhouse.altinity.com/v1",
|
||||
"kind": "ClickHouseInstallationTemplate",
|
||||
"metadata": {
|
||||
"name": "01-default-volumeclaimtemplate"
|
||||
},
|
||||
"spec": {
|
||||
"templates": {
|
||||
"volumeClaimTemplates": [
|
||||
{
|
||||
"name": "chi-default-volume-claim-template",
|
||||
"spec": {
|
||||
"accessModes": [
|
||||
"ReadWriteOnce"
|
||||
],
|
||||
"resources": {
|
||||
"requests": {
|
||||
"storage": "2Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"podTemplates": [
|
||||
{
|
||||
"name": "chi-default-oneperhost-pod-template",
|
||||
"distribution": "OnePerHost",
|
||||
"spec": {
|
||||
"containers" : [
|
||||
{
|
||||
"name": "clickhouse",
|
||||
"image": "yandex/clickhouse-server:19.3.7",
|
||||
"ports": [
|
||||
{
|
||||
"name": "http",
|
||||
"containerPort": 8123
|
||||
},
|
||||
{
|
||||
"name": "client",
|
||||
"containerPort": 9000
|
||||
},
|
||||
{
|
||||
"name": "interserver",
|
||||
"containerPort": 9009
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
default-pod-template.yaml.example: |
|
||||
apiVersion: "clickhouse.altinity.com/v1"
|
||||
kind: "ClickHouseInstallationTemplate"
|
||||
metadata:
|
||||
name: "default-oneperhost-pod-template"
|
||||
spec:
|
||||
templates:
|
||||
podTemplates:
|
||||
- name: default-oneperhost-pod-template
|
||||
distribution: "OnePerHost"
|
||||
default-storage-template.yaml.example: |
|
||||
apiVersion: "clickhouse.altinity.com/v1"
|
||||
kind: "ClickHouseInstallationTemplate"
|
||||
metadata:
|
||||
name: "default-storage-template-2Gi"
|
||||
spec:
|
||||
templates:
|
||||
volumeClaimTemplates:
|
||||
- name: default-storage-template-2Gi
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
|
||||
readme: |
|
||||
Templates in this folder are packaged with an operator and available via 'useTemplate'
|
||||
|
||||
---
|
||||
|
||||
# Template Parameters:
|
||||
#
|
||||
# NAME=etc-clickhouse-operator-usersd-files
|
||||
# NAMESPACE=posthog
|
||||
# COMMENT=
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: etc-clickhouse-operator-usersd-files
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
app: clickhouse-operator
|
||||
data:
|
||||
01-clickhouse-user.xml: |
|
||||
<yandex>
|
||||
<users>
|
||||
<clickhouse_operator>
|
||||
<networks>
|
||||
<ip>127.0.0.1</ip>
|
||||
<ip>0.0.0.0/0</ip>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<password_sha256_hex>716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448</password_sha256_hex>
|
||||
<profile>clickhouse_operator</profile>
|
||||
<quota>default</quota>
|
||||
</clickhouse_operator>
|
||||
</users>
|
||||
<profiles>
|
||||
<clickhouse_operator>
|
||||
<log_queries>0</log_queries>
|
||||
<skip_unavailable_shards>1</skip_unavailable_shards>
|
||||
<http_connection_timeout>10</http_connection_timeout>
|
||||
</clickhouse_operator>
|
||||
</profiles>
|
||||
</yandex>
|
||||
|
||||
02-clickhouse-default-profile.xml: |
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
<log_queries>1</log_queries>
|
||||
<connect_timeout_with_failover_ms>1000</connect_timeout_with_failover_ms>
|
||||
<distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
|
||||
<parallel_view_processing>1</parallel_view_processing>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
||||
03-database-ordinary.xml: |
|
||||
<!-- Remove it for ClickHouse versions before 20.4 -->
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
<default_database_engine>Ordinary</default_database_engine>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,129 @@
|
||||
{{- if .Values.clickhouseOperator.enabled }}
|
||||
# Template Parameters:
|
||||
#
|
||||
# NAMESPACE=posthog
|
||||
# COMMENT=
|
||||
# OPERATOR_IMAGE=altinity/clickhouse-operator:latest
|
||||
# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:latest
|
||||
#
|
||||
# Setup Deployment for clickhouse-operator
|
||||
# Deployment would be created in kubectl-specified namespace
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: clickhouse-operator
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
app: clickhouse-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: clickhouse-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: clickhouse-operator
|
||||
annotations:
|
||||
prometheus.io/port: '8888'
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
serviceAccountName: clickhouse-operator
|
||||
volumes:
|
||||
- name: etc-clickhouse-operator-folder
|
||||
configMap:
|
||||
name: etc-clickhouse-operator-files
|
||||
- name: etc-clickhouse-operator-confd-folder
|
||||
configMap:
|
||||
name: etc-clickhouse-operator-confd-files
|
||||
- name: etc-clickhouse-operator-configd-folder
|
||||
configMap:
|
||||
name: etc-clickhouse-operator-configd-files
|
||||
- name: etc-clickhouse-operator-templatesd-folder
|
||||
configMap:
|
||||
name: etc-clickhouse-operator-templatesd-files
|
||||
- name: etc-clickhouse-operator-usersd-folder
|
||||
configMap:
|
||||
name: etc-clickhouse-operator-usersd-files
|
||||
containers:
|
||||
- name: clickhouse-operator
|
||||
image: altinity/clickhouse-operator:latest
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: etc-clickhouse-operator-folder
|
||||
mountPath: /etc/clickhouse-operator
|
||||
- name: etc-clickhouse-operator-confd-folder
|
||||
mountPath: /etc/clickhouse-operator/conf.d
|
||||
- name: etc-clickhouse-operator-configd-folder
|
||||
mountPath: /etc/clickhouse-operator/config.d
|
||||
- name: etc-clickhouse-operator-templatesd-folder
|
||||
mountPath: /etc/clickhouse-operator/templates.d
|
||||
- name: etc-clickhouse-operator-usersd-folder
|
||||
mountPath: /etc/clickhouse-operator/users.d
|
||||
env:
|
||||
# Pod-specific
|
||||
# spec.nodeName: ip-172-20-52-62.ec2.internal
|
||||
- name: OPERATOR_POD_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# metadata.name: clickhouse-operator-6f87589dbb-ftcsf
|
||||
- name: OPERATOR_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
# metadata.namespace: kube-system
|
||||
- name: OPERATOR_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
# status.podIP: 100.96.3.2
|
||||
- name: OPERATOR_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
# spec.serviceAccount: clickhouse-operator
|
||||
# spec.serviceAccountName: clickhouse-operator
|
||||
- name: OPERATOR_POD_SERVICE_ACCOUNT
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.serviceAccountName
|
||||
|
||||
# Container-specific
|
||||
- name: OPERATOR_CONTAINER_CPU_REQUEST
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
containerName: clickhouse-operator
|
||||
resource: requests.cpu
|
||||
- name: OPERATOR_CONTAINER_CPU_LIMIT
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
containerName: clickhouse-operator
|
||||
resource: limits.cpu
|
||||
- name: OPERATOR_CONTAINER_MEM_REQUEST
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
containerName: clickhouse-operator
|
||||
resource: requests.memory
|
||||
- name: OPERATOR_CONTAINER_MEM_LIMIT
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
containerName: clickhouse-operator
|
||||
resource: limits.memory
|
||||
|
||||
- name: metrics-exporter
|
||||
image: altinity/metrics-exporter:latest
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: etc-clickhouse-operator-folder
|
||||
mountPath: /etc/clickhouse-operator
|
||||
- name: etc-clickhouse-operator-confd-folder
|
||||
mountPath: /etc/clickhouse-operator/conf.d
|
||||
- name: etc-clickhouse-operator-configd-folder
|
||||
mountPath: /etc/clickhouse-operator/config.d
|
||||
- name: etc-clickhouse-operator-templatesd-folder
|
||||
mountPath: /etc/clickhouse-operator/templates.d
|
||||
- name: etc-clickhouse-operator-usersd-folder
|
||||
mountPath: /etc/clickhouse-operator/users.d
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,26 @@
|
||||
{{- if .Values.clickhouseOperator.enabled }}
|
||||
# Template Parameters:
|
||||
#
|
||||
# NAMESPACE=posthog
|
||||
# COMMENT=
|
||||
#
|
||||
# Setup ClusterIP Service to provide monitoring metrics for Prometheus
|
||||
# Service would be created in kubectl-specified namespace
|
||||
# In order to get access outside of k8s it should be exposed as:
|
||||
# kubectl --namespace prometheus port-forward service/prometheus 9090
|
||||
# and point browser to localhost:9090
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: clickhouse-operator-metrics
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
app: clickhouse-operator
|
||||
spec:
|
||||
ports:
|
||||
- port: 8888
|
||||
name: clickhouse-operator-metrics
|
||||
selector:
|
||||
app: clickhouse-operator
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if .Values.clickhouseOperator.enabled }}
|
||||
# Template Parameters:
|
||||
#
|
||||
# COMMENT=
|
||||
# NAMESPACE=posthog
|
||||
# NAME=clickhouse-operator
|
||||
#
|
||||
# Setup ServiceAccount
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: clickhouse-operator
|
||||
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,53 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: otel-collector-metrics-conf
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-metrics-conf
|
||||
data:
|
||||
otel-collector-metrics-config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
@@ -0,0 +1,72 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: otel-collector-metrics
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-metrics
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-metrics
|
||||
minReadySeconds: 5
|
||||
progressDeadlineSeconds: 120
|
||||
replicas: 1 #TODO - adjust this to your own requirements
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-metrics
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- "/otelcontribcol"
|
||||
- "--config=/conf/otel-collector-metrics-config.yaml"
|
||||
# Memory Ballast size should be max 1/3 to 1/2 of memory.
|
||||
- "--mem-ballast-size-mib=683"
|
||||
image: signoz/otelcontribcol:0.4.2
|
||||
name: otel-collector
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 400Mi
|
||||
ports:
|
||||
- containerPort: 55679 # Default endpoint for ZPages.
|
||||
- containerPort: 55680 # Default endpoint for OpenTelemetry receiver.
|
||||
- containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver.
|
||||
- containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver.
|
||||
- containerPort: 14250 # Default endpoint for Jaeger GRPC receiver.
|
||||
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
|
||||
- containerPort: 9411 # Default endpoint for Zipkin receiver.
|
||||
- containerPort: 8888 # Default endpoint for querying metrics.
|
||||
volumeMounts:
|
||||
- name: otel-collector-metrics-config-vol
|
||||
mountPath: /conf
|
||||
# - name: otel-collector-secrets
|
||||
# mountPath: /secrets
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133 # Health Check extension default port.
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133 # Health Check extension default port.
|
||||
volumes:
|
||||
- configMap:
|
||||
name: otel-collector-metrics-conf
|
||||
items:
|
||||
- key: otel-collector-metrics-config
|
||||
path: otel-collector-metrics-config.yaml
|
||||
name: otel-collector-metrics-config-vol
|
||||
# - secret:
|
||||
# name: otel-collector-secrets
|
||||
# items:
|
||||
# - key: cert.pem
|
||||
# path: cert.pem
|
||||
# - key: key.pem
|
||||
# path: key.pem
|
||||
@@ -0,0 +1,31 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: otel-collector-metrics
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-metrics
|
||||
spec:
|
||||
ports:
|
||||
- name: otlp # Default endpoint for OpenTelemetry receiver.
|
||||
port: 55680
|
||||
protocol: TCP
|
||||
targetPort: 55680
|
||||
- name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver.
|
||||
port: 55681
|
||||
protocol: TCP
|
||||
targetPort: 55681
|
||||
- name: otlp-grpc # Default endpoint for OpenTelemetry receiver.
|
||||
port: 4317
|
||||
protocol: TCP
|
||||
targetPort: 4317
|
||||
- name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver
|
||||
port: 14250
|
||||
- name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver.
|
||||
port: 14268
|
||||
- name: zipkin # Default endpoint for Zipkin receiver.
|
||||
port: 9411
|
||||
- name: metrics # Default endpoint for querying metrics.
|
||||
port: 8888
|
||||
selector:
|
||||
component: otel-collector-metrics
|
||||
@@ -0,0 +1,67 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: otel-collector-conf
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-conf
|
||||
data:
|
||||
otel-collector-config: |
|
||||
receivers:
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "localhost:12345"
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhouse]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
@@ -21,11 +21,11 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- "/otelcol"
|
||||
- "/otelcontribcol"
|
||||
- "--config=/conf/otel-collector-config.yaml"
|
||||
# Memory Ballast size should be max 1/3 to 1/2 of memory.
|
||||
- "--mem-ballast-size-mib=683"
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
image: signoz/otelcontribcol:0.4.2
|
||||
name: otel-collector
|
||||
resources:
|
||||
limits:
|
||||
@@ -43,6 +43,7 @@ spec:
|
||||
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
|
||||
- containerPort: 9411 # Default endpoint for Zipkin receiver.
|
||||
- containerPort: 8888 # Default endpoint for querying metrics.
|
||||
- containerPort: 8889 # Default endpoint for prometheus exported metrics.
|
||||
volumeMounts:
|
||||
- name: otel-collector-config-vol
|
||||
mountPath: /conf
|
||||
@@ -27,5 +27,7 @@ spec:
|
||||
port: 9411
|
||||
- name: metrics # Default endpoint for querying metrics.
|
||||
port: 8888
|
||||
- name: prometheus-metrics # Default endpoint for querying prometheus metrics.
|
||||
port: 8889
|
||||
selector:
|
||||
component: otel-collector
|
||||
@@ -1,51 +1,15 @@
|
||||
zookeeper:
|
||||
autopurge:
|
||||
purgeInterval: 1
|
||||
|
||||
kafka:
|
||||
zookeeper:
|
||||
enabled: false
|
||||
externalZookeeper:
|
||||
servers: ["signoz-zookeeper:2181"]
|
||||
zookeeperConnectionTimeoutMs: 6000
|
||||
|
||||
druid:
|
||||
image:
|
||||
tag: 0.21.1-rc2
|
||||
|
||||
configVars:
|
||||
|
||||
# To store data on local disks attached
|
||||
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]'
|
||||
druid_storage_type: local
|
||||
|
||||
|
||||
# # To store data in S3
|
||||
# druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]'
|
||||
# druid_storage_type: s3
|
||||
# druid_storage_bucket: signoz-druid
|
||||
# druid_storage_baseKey: baseKey
|
||||
# AWS_ACCESS_KEY_ID: <your secret id>
|
||||
# AWS_SECRET_ACCESS_KEY: <your secret key>
|
||||
# AWS_REGION: <your region>
|
||||
|
||||
historical:
|
||||
persistence:
|
||||
size: "20Gi"
|
||||
|
||||
zkHosts: "signoz-zookeeper:2181"
|
||||
|
||||
zookeeper:
|
||||
enabled: false
|
||||
|
||||
flattener-processor:
|
||||
configVars:
|
||||
KAFKA_BROKER: signoz-kafka:9092
|
||||
KAFKA_INPUT_TOPIC: otlp_spans
|
||||
KAFKA_OUTPUT_TOPIC: flattened_spans
|
||||
autopurge:
|
||||
purgeInterval: 1
|
||||
|
||||
query-service:
|
||||
configVars:
|
||||
DruidClientUrl: http://signoz-druid-router:8888
|
||||
DruidDatasource: flattened_spans
|
||||
STORAGE: druid
|
||||
configVars:
|
||||
ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
|
||||
STORAGE: clickhouse
|
||||
|
||||
cloud: aws
|
||||
|
||||
clickhouseOperator:
|
||||
enabled: true
|
||||
storage: 20Gi
|
||||
serviceType: ClusterIP
|
||||
|
||||
@@ -41,7 +41,8 @@ module.exports = {
|
||||
'react/prop-types': 'off',
|
||||
'@typescript-eslint/explicit-function-return-type': 'error',
|
||||
'@typescript-eslint/no-var-requires': 0,
|
||||
'linebreak-style': ['error', 'unix'],
|
||||
'react/no-array-index-key': 2,
|
||||
'linebreak-style': ['error', process.platform === 'win32' ? 'windows' : 'unix'],
|
||||
|
||||
// simple sort error
|
||||
'simple-import-sort/imports': 'error',
|
||||
|
||||
8
frontend/bundlesize.config.json
Normal file
8
frontend/bundlesize.config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"files": [
|
||||
{
|
||||
"path": "./build/**.js",
|
||||
"maxSize": "1.2MB"
|
||||
}
|
||||
]
|
||||
}
|
||||
21
frontend/cypress/fixtures/defaultAllChannels.json
Normal file
21
frontend/cypress/fixtures/defaultAllChannels.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"created_at": 1638083159246,
|
||||
"data": "{}",
|
||||
"id": 1,
|
||||
"name": "First Channels",
|
||||
"type": "slack",
|
||||
"updated_at": 1638083159246
|
||||
},
|
||||
{
|
||||
"created_at": 1638083159246,
|
||||
"data": "{}",
|
||||
"id": 2,
|
||||
"name": "Second Channels",
|
||||
"type": "Slack",
|
||||
"updated_at": 1638083159246
|
||||
}
|
||||
],
|
||||
"message": "Success"
|
||||
}
|
||||
28
frontend/cypress/fixtures/defaultRules.json
Normal file
28
frontend/cypress/fixtures/defaultRules.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"rules": [
|
||||
{
|
||||
"labels": { "severity": "warning" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "First Rule",
|
||||
"id": 1
|
||||
},
|
||||
{
|
||||
"labels": { "severity": "warning" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "Second Rule",
|
||||
"id": 2
|
||||
},
|
||||
{
|
||||
"labels": { "severity": "P0" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "Third Rule",
|
||||
"id": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
52
frontend/cypress/integration/channels/index.spec.ts
Normal file
52
frontend/cypress/integration/channels/index.spec.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
import defaultAllChannels from '../../fixtures/defaultAllChannels.json';
|
||||
|
||||
describe('Channels', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + ROUTES.ALL_CHANNELS);
|
||||
});
|
||||
|
||||
it('Channels', () => {
|
||||
cy
|
||||
.intercept('**channels**', {
|
||||
statusCode: 200,
|
||||
fixture: 'defaultAllChannels',
|
||||
})
|
||||
.as('All Channels');
|
||||
|
||||
cy.wait('@All Channels');
|
||||
|
||||
cy
|
||||
.get('.ant-tabs-tab')
|
||||
.children()
|
||||
.then((e) => {
|
||||
const child = e.get();
|
||||
|
||||
const secondChild = child[1];
|
||||
|
||||
expect(secondChild.outerText).to.be.equals('Alert Channels');
|
||||
|
||||
expect(secondChild.ariaSelected).to.be.equals('true');
|
||||
});
|
||||
|
||||
cy
|
||||
.get('tbody')
|
||||
.should('be.visible')
|
||||
.then((e) => {
|
||||
const allChildren = e.children().get();
|
||||
expect(allChildren.length).to.be.equals(defaultAllChannels.data.length);
|
||||
|
||||
allChildren.forEach((e, index) => {
|
||||
expect(e.firstChild?.textContent).not.null;
|
||||
expect(e.firstChild?.textContent).to.be.equals(
|
||||
defaultAllChannels.data[index].name,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -20,7 +20,7 @@ describe('default time', () => {
|
||||
|
||||
it('Trace Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.TRACES,
|
||||
route: ROUTES.TRACE,
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
128
frontend/cypress/integration/rules/index.spec.ts
Normal file
128
frontend/cypress/integration/rules/index.spec.ts
Normal file
@@ -0,0 +1,128 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
import defaultRules from '../../fixtures/defaultRules.json';
|
||||
|
||||
describe('Alerts', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy
|
||||
.intercept('get', '*rules*', {
|
||||
fixture: 'defaultRules',
|
||||
})
|
||||
.as('defaultRules');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + `${ROUTES.LIST_ALL_ALERT}`);
|
||||
|
||||
cy.wait('@defaultRules');
|
||||
});
|
||||
|
||||
it('Edit Rules Page Failure', async () => {
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
statusCode: 500,
|
||||
})
|
||||
.as('Get Rules Error');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
firstDelete.click();
|
||||
|
||||
cy.waitFor('@Get Rules Error');
|
||||
|
||||
cy
|
||||
.window()
|
||||
.location()
|
||||
.then((e) => {
|
||||
expect(e.pathname).to.be.equals(`/alerts/edit/1`);
|
||||
});
|
||||
|
||||
cy.findByText('Something went wrong').then((e) => {
|
||||
expect(e.length).to.be.equals(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Edit Rules Page Success', async () => {
|
||||
const text = 'this is the sample value';
|
||||
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
statusCode: 200,
|
||||
body: {
|
||||
data: {
|
||||
data: text,
|
||||
},
|
||||
},
|
||||
})
|
||||
.as('Get Rules Success');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
firstDelete.click();
|
||||
|
||||
cy.waitFor('@Get Rules Success');
|
||||
|
||||
cy.wait(1000);
|
||||
|
||||
cy.findByText('Save').then((e) => {
|
||||
const [el] = e.get();
|
||||
|
||||
el.click();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('All Rules are rendered correctly', async () => {
|
||||
cy
|
||||
.window()
|
||||
.location()
|
||||
.then(({ pathname }) => {
|
||||
expect(pathname).to.be.equals(ROUTES.LIST_ALL_ALERT);
|
||||
|
||||
cy.get('tbody').then((e) => {
|
||||
const tarray = e.children().get();
|
||||
|
||||
expect(tarray.length).to.be.equals(3);
|
||||
|
||||
tarray.forEach(({ children }, index) => {
|
||||
const name = children[1]?.textContent;
|
||||
const label = children[2]?.textContent;
|
||||
|
||||
expect(name).to.be.equals(defaultRules.data.rules[index].name);
|
||||
|
||||
const defaultLabels = defaultRules.data.rules[index].labels;
|
||||
|
||||
expect(label).to.be.equals(defaultLabels['severity']);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Rules are Deleted', async () => {
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
body: {
|
||||
data: 'Deleted',
|
||||
message: 'Success',
|
||||
},
|
||||
statusCode: 200,
|
||||
})
|
||||
.as('deleteRules');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:first-child').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
|
||||
firstDelete.click();
|
||||
});
|
||||
|
||||
cy.wait('@deleteRules');
|
||||
|
||||
cy.get('tbody').then((e) => {
|
||||
const trray = e.children().get();
|
||||
expect(trray.length).to.be.equals(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -19,6 +19,8 @@
|
||||
/**
|
||||
* @type {Cypress.PluginConfig}
|
||||
*/
|
||||
module.exports = (on, config: Cypress.ConfigOptions): void => {};
|
||||
module.exports = (): void => {
|
||||
return undefined;
|
||||
};
|
||||
|
||||
export {};
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"noEmit": true,
|
||||
// be explicit about types included
|
||||
// to avoid clashing with Jest types
|
||||
"types": ["cypress", "@testing-library/cypress"],
|
||||
"types": ["cypress", "@testing-library/cypress", "node"],
|
||||
"isolatedModules": false
|
||||
},
|
||||
"include": ["../node_modules/cypress", "./**/*.ts"]
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
const gulp = require('gulp');
|
||||
const gulpless = require('gulp-less');
|
||||
const postcss = require('gulp-postcss');
|
||||
const debug = require('gulp-debug');
|
||||
var csso = require('gulp-csso');
|
||||
const autteoprefixer = require('autoprefixer');
|
||||
const NpmImportPlugin = require('less-plugin-npm-import');
|
||||
|
||||
gulp.task('less', function () {
|
||||
const plugins = [autteoprefixer()];
|
||||
|
||||
return gulp
|
||||
.src('src/themes/*-theme.less')
|
||||
.pipe(debug({ title: 'Less files:' }))
|
||||
.pipe(
|
||||
gulpless({
|
||||
javascriptEnabled: true,
|
||||
plugins: [new NpmImportPlugin({ prefix: '~' })],
|
||||
}),
|
||||
)
|
||||
.pipe(postcss(plugins))
|
||||
.pipe(
|
||||
csso({
|
||||
debug: true,
|
||||
}),
|
||||
)
|
||||
.pipe(gulp.dest('./public'));
|
||||
});
|
||||
@@ -13,7 +13,8 @@
|
||||
"cypress:run": "cypress run",
|
||||
"jest": "jest",
|
||||
"jest:coverage": "jest --coverage",
|
||||
"jest:watch": "jest --watch"
|
||||
"jest:watch": "jest --watch",
|
||||
"bundle:size": "bundlesize"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.13.0"
|
||||
@@ -22,21 +23,9 @@
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@ant-design/icons": "^4.6.2",
|
||||
"@auth0/auth0-react": "^1.2.0",
|
||||
"@pmmmwh/react-refresh-webpack-plugin": "0.4.2",
|
||||
"@svgr/webpack": "5.4.0",
|
||||
"@testing-library/jest-dom": "^5.11.4",
|
||||
"@testing-library/react": "^11.1.0",
|
||||
"@testing-library/user-event": "^12.1.10",
|
||||
"@types/d3": "^6.2.0",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@types/react": "^17.0.0",
|
||||
"@types/react-dom": "^16.9.9",
|
||||
"@types/react-redux": "^7.1.11",
|
||||
"@types/react-router-dom": "^5.1.6",
|
||||
"@types/redux": "^3.6.0",
|
||||
"@types/styled-components": "^5.1.4",
|
||||
"@types/vis": "^4.21.21",
|
||||
"antd": "^4.16.13",
|
||||
"axios": "^0.21.0",
|
||||
"babel-eslint": "^10.1.0",
|
||||
@@ -45,72 +34,40 @@
|
||||
"babel-plugin-named-asset-import": "^0.3.7",
|
||||
"babel-preset-minify": "^0.5.1",
|
||||
"babel-preset-react-app": "^10.0.0",
|
||||
"bfj": "^7.0.2",
|
||||
"camelcase": "^6.1.0",
|
||||
"case-sensitive-paths-webpack-plugin": "2.3.0",
|
||||
"chart.js": "^3.4.0",
|
||||
"chartjs-adapter-date-fns": "^2.0.0",
|
||||
"css-loader": "4.3.0",
|
||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
||||
"d3": "^6.2.0",
|
||||
"d3-flame-graph": "^3.1.1",
|
||||
"d3-tip": "^0.9.1",
|
||||
"dotenv": "8.2.0",
|
||||
"dotenv-expand": "5.1.0",
|
||||
"eslint-config-react-app": "^6.0.0",
|
||||
"eslint-plugin-flowtype": "^5.2.0",
|
||||
"eslint-plugin-jest": "^24.1.0",
|
||||
"eslint-plugin-jsx-a11y": "^6.3.1",
|
||||
"eslint-plugin-react-hooks": "^4.2.0",
|
||||
"eslint-plugin-testing-library": "^3.9.2",
|
||||
"eslint-webpack-plugin": "^2.1.0",
|
||||
"file-loader": "6.1.1",
|
||||
"fs-extra": "^9.0.1",
|
||||
"history": "4.10.1",
|
||||
"html-webpack-plugin": "5.1.0",
|
||||
"identity-obj-proxy": "3.0.0",
|
||||
"jest": "26.6.0",
|
||||
"jest-circus": "26.6.0",
|
||||
"jest-resolve": "26.6.0",
|
||||
"jest-watch-typeahead": "0.6.1",
|
||||
"pnp-webpack-plugin": "1.6.4",
|
||||
"postcss-loader": "3.0.0",
|
||||
"postcss-normalize": "8.0.1",
|
||||
"postcss-preset-env": "6.7.0",
|
||||
"postcss-safe-parser": "5.0.2",
|
||||
"prop-types": "^15.6.2",
|
||||
"mini-css-extract-plugin": "^2.4.5",
|
||||
"monaco-editor": "^0.30.0",
|
||||
"react": "17.0.0",
|
||||
"react-app-polyfill": "^2.0.0",
|
||||
"react-chips": "^0.8.0",
|
||||
"react-css-theme-switcher": "^0.1.6",
|
||||
"react-dev-utils": "^11.0.0",
|
||||
"react-dom": "17.0.0",
|
||||
"react-force-graph": "^1.41.0",
|
||||
"react-graph-vis": "^1.0.5",
|
||||
"react-grid-layout": "^1.2.5",
|
||||
"react-modal": "^3.12.1",
|
||||
"react-redux": "^7.2.2",
|
||||
"react-refresh": "^0.8.3",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-vis": "^1.11.7",
|
||||
"redux": "^4.0.5",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"resolve": "1.18.1",
|
||||
"resolve-url-loader": "^3.1.2",
|
||||
"sass-loader": "8.0.2",
|
||||
"semver": "7.3.2",
|
||||
"style-loader": "1.3.0",
|
||||
"styled-components": "^5.2.1",
|
||||
"terser-webpack-plugin": "4.2.3",
|
||||
"ts-pnp": "1.2.0",
|
||||
"terser-webpack-plugin": "^5.2.5",
|
||||
"ts-node": "^10.2.1",
|
||||
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
||||
"typescript": "^4.0.5",
|
||||
"url-loader": "4.1.1",
|
||||
"uuid": "^8.3.2",
|
||||
"web-vitals": "^0.2.4",
|
||||
"webpack": "^5.23.0",
|
||||
"webpack-dev-server": "^3.11.2",
|
||||
"webpack-manifest-plugin": "2.2.0",
|
||||
"workbox-webpack-plugin": "5.1.4"
|
||||
"webpack-dev-server": "^4.3.1"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
@@ -132,18 +89,33 @@
|
||||
"@babel/preset-react": "^7.12.13",
|
||||
"@babel/preset-typescript": "^7.12.17",
|
||||
"@testing-library/cypress": "^8.0.0",
|
||||
"@types/compression-webpack-plugin": "^9.0.0",
|
||||
"@types/copy-webpack-plugin": "^8.0.1",
|
||||
"@types/d3": "^6.2.0",
|
||||
"@types/d3-tip": "^3.5.5",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@types/lodash-es": "^4.17.4",
|
||||
"@types/node": "^14.17.12",
|
||||
"@types/node": "^16.10.3",
|
||||
"@types/react": "^17.0.0",
|
||||
"@types/react-dom": "^16.9.9",
|
||||
"@types/react-grid-layout": "^1.1.2",
|
||||
"@types/react-redux": "^7.1.11",
|
||||
"@types/react-router-dom": "^5.1.6",
|
||||
"@types/redux": "^3.6.0",
|
||||
"@types/styled-components": "^5.1.4",
|
||||
"@types/uuid": "^8.3.1",
|
||||
"@types/vis": "^4.21.21",
|
||||
"@types/webpack": "^5.28.0",
|
||||
"@types/webpack-dev-server": "^4.3.0",
|
||||
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
||||
"@typescript-eslint/parser": "^4.28.2",
|
||||
"@welldone-software/why-did-you-render": "^6.2.1",
|
||||
"autoprefixer": "^9.0.0",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"compression-webpack-plugin": "^8.0.0",
|
||||
"copy-webpack-plugin": "^7.0.0",
|
||||
"cypress": "8.6.0",
|
||||
"bundlesize": "^0.18.1",
|
||||
"compression-webpack-plugin": "^9.0.0",
|
||||
"copy-webpack-plugin": "^8.1.0",
|
||||
"cypress": "^8.3.0",
|
||||
"eslint": "^7.30.0",
|
||||
"eslint-config-prettier": "^8.3.0",
|
||||
"eslint-config-standard": "^16.0.3",
|
||||
@@ -153,11 +125,6 @@
|
||||
"eslint-plugin-promise": "^5.1.0",
|
||||
"eslint-plugin-react": "^7.24.0",
|
||||
"eslint-plugin-simple-import-sort": "^7.0.0",
|
||||
"gulp": "^4.0.2",
|
||||
"gulp-csso": "^4.0.1",
|
||||
"gulp-debug": "^4.0.0",
|
||||
"gulp-less": "^4.0.1",
|
||||
"gulp-postcss": "^9.0.0",
|
||||
"husky": "4.3.8",
|
||||
"less-plugin-npm-import": "^2.1.0",
|
||||
"lint-staged": "10.5.3",
|
||||
@@ -165,7 +132,6 @@
|
||||
"portfinder-sync": "^0.0.2",
|
||||
"prettier": "2.2.1",
|
||||
"react-hot-loader": "^4.13.0",
|
||||
"react-is": "^17.0.1",
|
||||
"ts-node": "^10.2.1",
|
||||
"webpack-cli": "^4.5.0"
|
||||
}
|
||||
|
||||
@@ -1,27 +1,46 @@
|
||||
import NotFound from 'components/NotFound';
|
||||
import Spinner from 'components/Spinner';
|
||||
import ROUTES from 'constants/routes';
|
||||
import AppLayout from 'container/AppLayout';
|
||||
import history from 'lib/history';
|
||||
import React, { Suspense } from 'react';
|
||||
import { Route, Router, Switch } from 'react-router-dom';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { Redirect, Route, Router, Switch } from 'react-router-dom';
|
||||
import { AppState } from 'store/reducers';
|
||||
import AppReducer from 'types/reducer/app';
|
||||
|
||||
import routes from './routes';
|
||||
|
||||
const App = (): JSX.Element => (
|
||||
<Router history={history}>
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }, index) => {
|
||||
return (
|
||||
|
||||
const App = (): JSX.Element => {
|
||||
const { isLoggedIn } = useSelector<AppState, AppReducer>((state) => state.app);
|
||||
|
||||
return (
|
||||
<Router history={history}>
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }, index) => (
|
||||
<Route key={index} exact={exact} path={path} component={component} />
|
||||
);
|
||||
})}
|
||||
<Route path="*" exact component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
</Router>
|
||||
);
|
||||
))}
|
||||
<Route
|
||||
path="/"
|
||||
exact
|
||||
render={(): JSX.Element =>
|
||||
isLoggedIn ? (
|
||||
<Redirect to={ROUTES.APPLICATION} />
|
||||
) : (
|
||||
<Redirect to={ROUTES.SIGN_UP} />
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Route path="*" component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
</Router>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
export default App;
|
||||
|
||||
@@ -18,11 +18,8 @@ export const ServiceMapPage = Loadable(
|
||||
),
|
||||
);
|
||||
|
||||
export const TraceDetailPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "TraceDetailPage" */ 'modules/Traces/TraceDetail'
|
||||
),
|
||||
export const TraceDetailPages = Loadable(
|
||||
() => import(/* webpackChunkName: "TraceDetailPage" */ 'pages/TraceDetails'),
|
||||
);
|
||||
|
||||
export const TraceGraphPage = Loadable(
|
||||
@@ -66,3 +63,28 @@ export const DashboardWidget = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "DashboardWidgetPage" */ 'pages/DashboardWidget'),
|
||||
);
|
||||
|
||||
export const EditRulesPage = Loadable(
|
||||
() => import(/* webpackChunkName: "Alerts Edit Page" */ 'pages/EditRules'),
|
||||
);
|
||||
|
||||
export const ListAllALertsPage = Loadable(
|
||||
() => import(/* webpackChunkName: "All Alerts Page" */ 'pages/AlertList'),
|
||||
);
|
||||
|
||||
export const CreateNewAlerts = Loadable(
|
||||
() => import(/* webpackChunkName: "Create Alerts" */ 'pages/CreateAlert'),
|
||||
);
|
||||
|
||||
export const CreateAlertChannelAlerts = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "Create Channels" */ 'pages/AlertChannelCreate'),
|
||||
);
|
||||
|
||||
export const EditAlertChannelsAlerts = Loadable(
|
||||
() => import(/* webpackChunkName: "Edit Channels" */ 'pages/ChannelsEdit'),
|
||||
);
|
||||
|
||||
export const AllAlertChannels = Loadable(
|
||||
() => import(/* webpackChunkName: "All Channels" */ 'pages/AllAlertChannels'),
|
||||
);
|
||||
|
||||
@@ -3,15 +3,21 @@ import DashboardWidget from 'pages/DashboardWidget';
|
||||
import { RouteProps } from 'react-router-dom';
|
||||
|
||||
import {
|
||||
AllAlertChannels,
|
||||
CreateAlertChannelAlerts,
|
||||
CreateNewAlerts,
|
||||
DashboardPage,
|
||||
EditAlertChannelsAlerts,
|
||||
EditRulesPage,
|
||||
InstrumentationPage,
|
||||
ListAllALertsPage,
|
||||
NewDashboardPage,
|
||||
ServiceMapPage,
|
||||
ServiceMetricsPage,
|
||||
ServicesTablePage,
|
||||
SettingsPage,
|
||||
SignupPage,
|
||||
TraceDetailPage,
|
||||
TraceDetailPages,
|
||||
TraceGraphPage,
|
||||
UsageExplorerPage,
|
||||
} from './pageComponents';
|
||||
@@ -57,11 +63,6 @@ const routes: AppRoutes[] = [
|
||||
exact: true,
|
||||
component: InstrumentationPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.TRACES,
|
||||
exact: true,
|
||||
component: TraceDetailPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALL_DASHBOARD,
|
||||
exact: true,
|
||||
@@ -77,6 +78,41 @@ const routes: AppRoutes[] = [
|
||||
exact: true,
|
||||
component: DashboardWidget,
|
||||
},
|
||||
{
|
||||
path: ROUTES.EDIT_ALERTS,
|
||||
exact: true,
|
||||
component: EditRulesPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.LIST_ALL_ALERT,
|
||||
exact: true,
|
||||
component: ListAllALertsPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALERTS_NEW,
|
||||
exact: true,
|
||||
component: CreateNewAlerts,
|
||||
},
|
||||
{
|
||||
path: ROUTES.TRACE,
|
||||
exact: true,
|
||||
component: TraceDetailPages,
|
||||
},
|
||||
{
|
||||
path: ROUTES.CHANNELS_NEW,
|
||||
exact: true,
|
||||
component: CreateAlertChannelAlerts,
|
||||
},
|
||||
{
|
||||
path: ROUTES.CHANNELS_EDIT,
|
||||
exact: true,
|
||||
component: EditAlertChannelsAlerts,
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALL_CHANNELS,
|
||||
exact: true,
|
||||
component: AllAlertChannels,
|
||||
},
|
||||
];
|
||||
|
||||
interface AppRoutes {
|
||||
|
||||
26
frontend/src/api/alerts/create.ts
Normal file
26
frontend/src/api/alerts/create.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/create';
|
||||
|
||||
const create = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/rules', {
|
||||
data: props.query,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default create;
|
||||
24
frontend/src/api/alerts/delete.ts
Normal file
24
frontend/src/api/alerts/delete.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/delete';
|
||||
|
||||
const deleteAlerts = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.delete(`/rules/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data.rules,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default deleteAlerts;
|
||||
24
frontend/src/api/alerts/get.ts
Normal file
24
frontend/src/api/alerts/get.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/get';
|
||||
|
||||
const get = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(`/rules/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default get;
|
||||
24
frontend/src/api/alerts/getAll.ts
Normal file
24
frontend/src/api/alerts/getAll.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps } from 'types/api/alerts/getAll';
|
||||
|
||||
const getAll = async (): Promise<
|
||||
SuccessResponse<PayloadProps> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.get('/rules');
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data.rules,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getAll;
|
||||
30
frontend/src/api/alerts/getGroup.ts
Normal file
30
frontend/src/api/alerts/getGroup.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import { AxiosAlertManagerInstance } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/getGroups';
|
||||
|
||||
const getGroups = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const queryParams = Object.keys(props)
|
||||
.map((e) => `${e}=${props[e]}`)
|
||||
.join('&');
|
||||
|
||||
const response = await AxiosAlertManagerInstance.get(
|
||||
`/alerts/groups?${queryParams}`,
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getGroups;
|
||||
26
frontend/src/api/alerts/put.ts
Normal file
26
frontend/src/api/alerts/put.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/put';
|
||||
|
||||
const put = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.put(`/rules/${props.id}`, {
|
||||
data: props.data,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default put;
|
||||
@@ -1,3 +1,4 @@
|
||||
const apiV1 = '/api/v1/';
|
||||
export const apiV2 = '/api/alertmanager';
|
||||
|
||||
export default apiV1;
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
const get = (key: string): string | null => {
|
||||
return localStorage.getItem(key);
|
||||
try {
|
||||
const value = localStorage.getItem(key);
|
||||
return value;
|
||||
} catch (e) {
|
||||
return '';
|
||||
}
|
||||
};
|
||||
|
||||
export default get;
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
const remove = (key: string): void => {
|
||||
window.localStorage.removeItem(key);
|
||||
const remove = (key: string): boolean => {
|
||||
try {
|
||||
window.localStorage.removeItem(key);
|
||||
return true;
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
export default remove;
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
const set = (key: string, value: string): void => {
|
||||
localStorage.setItem(key, value);
|
||||
const set = (key: string, value: string): boolean => {
|
||||
try {
|
||||
localStorage.setItem(key, value);
|
||||
return true;
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
export default set;
|
||||
|
||||
35
frontend/src/api/channels/createSlack.ts
Normal file
35
frontend/src/api/channels/createSlack.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/channels/createSlack';
|
||||
|
||||
const create = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/channels', {
|
||||
name: props.name,
|
||||
slack_configs: [
|
||||
{
|
||||
send_resolved: true,
|
||||
api_url: props.api_url,
|
||||
channel: props.channel,
|
||||
title: props.title,
|
||||
text: props.text,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default create;
|
||||
24
frontend/src/api/channels/delete.ts
Normal file
24
frontend/src/api/channels/delete.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/channels/delete';
|
||||
|
||||
const deleteChannel = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.delete(`/channels/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default deleteChannel;
|
||||
35
frontend/src/api/channels/editSlack.ts
Normal file
35
frontend/src/api/channels/editSlack.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/channels/editSlack';
|
||||
|
||||
const editSlack = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.put(`/channels/${props.id}`, {
|
||||
name: props.name,
|
||||
slack_configs: [
|
||||
{
|
||||
send_resolved: true,
|
||||
api_url: props.api_url,
|
||||
channel: props.channel,
|
||||
title: props.title,
|
||||
text: props.text,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default editSlack;
|
||||
24
frontend/src/api/channels/get.ts
Normal file
24
frontend/src/api/channels/get.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/channels/get';
|
||||
|
||||
const get = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(`/channels/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default get;
|
||||
24
frontend/src/api/channels/getAll.ts
Normal file
24
frontend/src/api/channels/getAll.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps } from 'types/api/channels/getAll';
|
||||
|
||||
const getAll = async (): Promise<
|
||||
SuccessResponse<PayloadProps> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.get('/channels');
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getAll;
|
||||
@@ -1,10 +1,14 @@
|
||||
import axios from 'axios';
|
||||
import { ENVIRONMENT } from 'constants/env';
|
||||
|
||||
import apiV1 from './apiV1';
|
||||
import apiV1, { apiV2 } from './apiV1';
|
||||
|
||||
export default axios.create({
|
||||
baseURL: `${ENVIRONMENT.baseURL}${apiV1}`,
|
||||
});
|
||||
|
||||
export const AxiosAlertManagerInstance = axios.create({
|
||||
baseURL: `${ENVIRONMENT.baseURL}${apiV2}`,
|
||||
});
|
||||
|
||||
export { apiV1 };
|
||||
|
||||
24
frontend/src/api/settings/getRetention.ts
Normal file
24
frontend/src/api/settings/getRetention.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps } from 'types/api/settings/getRetention';
|
||||
|
||||
const getRetention = async (): Promise<
|
||||
SuccessResponse<PayloadProps> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.get<PayloadProps>(`/settings/ttl`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getRetention;
|
||||
26
frontend/src/api/settings/setRetention.ts
Normal file
26
frontend/src/api/settings/setRetention.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/settings/setRetention';
|
||||
|
||||
const setRetention = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post<PayloadProps>(
|
||||
`/settings/ttl?duration=${props.duration}&type=${props.type}`,
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default setRetention;
|
||||
24
frontend/src/api/trace/getServiceList.ts
Normal file
24
frontend/src/api/trace/getServiceList.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps } from 'types/api/trace/getServiceList';
|
||||
|
||||
const getServiceList = async (): Promise<
|
||||
SuccessResponse<PayloadProps> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.get('/services/list');
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getServiceList;
|
||||
24
frontend/src/api/trace/getServiceOperation.ts
Normal file
24
frontend/src/api/trace/getServiceOperation.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/trace/getServiceOperation';
|
||||
|
||||
const getServiceOperation = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(`/service/${props.service}/operations`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getServiceOperation;
|
||||
26
frontend/src/api/trace/getSpan.ts
Normal file
26
frontend/src/api/trace/getSpan.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/trace/getSpans';
|
||||
|
||||
const getSpans = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`/spans?&start=${props.start}&end=${props.end}&kind=${props.kind}&lookback=${props.lookback}&maxDuration=${props.maxDuration}&minDuration=${props.minDuration}&operation=${props.operation}&service=${props.service}&limit=${props.limit}&tags=${props.tags}`,
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getSpans;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user