mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-27 18:54:27 +00:00
Compare commits
36 Commits
v0.69.0-cl
...
config
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba749a12ad | ||
|
|
b59744a4e0 | ||
|
|
fbfc2d3626 | ||
|
|
fb1157515d | ||
|
|
8e7ede0642 | ||
|
|
4a068eb68c | ||
|
|
0aba107436 | ||
|
|
e99d3427ec | ||
|
|
44cbe53705 | ||
|
|
1ccc0b3c48 | ||
|
|
e695f89c85 | ||
|
|
f080bcd3ee | ||
|
|
a5635b10e1 | ||
|
|
1ab9018641 | ||
|
|
c3153012a6 | ||
|
|
8ba479d3bb | ||
|
|
4d398b1bb1 | ||
|
|
8874da0cf6 | ||
|
|
756c9d7364 | ||
|
|
f48a919945 | ||
|
|
f0b58cd5ae | ||
|
|
3095db106b | ||
|
|
0f06ea1a0c | ||
|
|
188d8a4302 | ||
|
|
db95840260 | ||
|
|
c0bf5f5b0a | ||
|
|
35ecd38cef | ||
|
|
6bd1e1387c | ||
|
|
6680622762 | ||
|
|
f3f315726d | ||
|
|
513629e02d | ||
|
|
b180999a71 | ||
|
|
040c0d708b | ||
|
|
64c62896f8 | ||
|
|
a1160b990d | ||
|
|
79d99f21f8 |
32
conf/defaults.yaml
Normal file
32
conf/defaults.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
##################### SigNoz Configuration Defaults #####################
|
||||
#
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Web #####################
|
||||
web:
|
||||
# The prefix to serve web on
|
||||
prefix: /
|
||||
# The directory containing the static build files.
|
||||
directory: /etc/signoz/web
|
||||
|
||||
##################### Cache #####################
|
||||
cache:
|
||||
# specifies the caching provider to use.
|
||||
provider: memory
|
||||
# memory: Uses in-memory caching.
|
||||
memory:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanupInterval:
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
host: localhost
|
||||
# The port on which the Redis server is running. Default is usually 6379.
|
||||
port: 6379
|
||||
# The password for authenticating with the Redis server, if required.
|
||||
password:
|
||||
# The Redis database number to use
|
||||
db: 0
|
||||
@@ -1,70 +0,0 @@
|
||||
##################### SigNoz Configuration Example #####################
|
||||
#
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Instrumentation #####################
|
||||
instrumentation:
|
||||
logs:
|
||||
level: info
|
||||
enabled: false
|
||||
processors:
|
||||
batch:
|
||||
exporter:
|
||||
otlp:
|
||||
endpoint: localhost:4317
|
||||
traces:
|
||||
enabled: false
|
||||
processors:
|
||||
batch:
|
||||
exporter:
|
||||
otlp:
|
||||
endpoint: localhost:4317
|
||||
metrics:
|
||||
enabled: true
|
||||
readers:
|
||||
pull:
|
||||
exporter:
|
||||
prometheus:
|
||||
host: "0.0.0.0"
|
||||
port: 9090
|
||||
|
||||
##################### Web #####################
|
||||
web:
|
||||
# Whether to enable the web frontend
|
||||
enabled: true
|
||||
# The prefix to serve web on
|
||||
prefix: /
|
||||
# The directory containing the static build files.
|
||||
directory: /etc/signoz/web
|
||||
|
||||
##################### Cache #####################
|
||||
cache:
|
||||
# specifies the caching provider to use.
|
||||
provider: memory
|
||||
# memory: Uses in-memory caching.
|
||||
memory:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanupInterval: 1m
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
host: localhost
|
||||
# The port on which the Redis server is running. Default is usually 6379.
|
||||
port: 6379
|
||||
# The password for authenticating with the Redis server, if required.
|
||||
password:
|
||||
# The Redis database number to use
|
||||
db: 0
|
||||
|
||||
##################### SQLStore #####################
|
||||
sqlstore:
|
||||
# specifies the SQLStore provider to use.
|
||||
provider: sqlite
|
||||
# The maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
sqlite:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
@@ -32,11 +32,6 @@ has_cmd() {
|
||||
command -v "$1" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Check if docker compose plugin is present
|
||||
has_docker_compose_plugin() {
|
||||
docker compose version > /dev/null 2>&1
|
||||
}
|
||||
|
||||
is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
@@ -188,7 +183,9 @@ install_docker() {
|
||||
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
echo "Installing docker"
|
||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
compose_version () {
|
||||
@@ -230,6 +227,12 @@ start_docker() {
|
||||
echo "Starting docker service"
|
||||
$sudo_cmd systemctl start docker.service
|
||||
fi
|
||||
# if [[ -z $sudo_cmd ]]; then
|
||||
# docker ps > /dev/null && true
|
||||
# if [[ $? -ne 0 ]]; then
|
||||
# request_sudo
|
||||
# fi
|
||||
# fi
|
||||
if [[ -z $sudo_cmd ]]; then
|
||||
if ! docker ps > /dev/null && true; then
|
||||
request_sudo
|
||||
@@ -262,7 +265,7 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
echo -e "$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
@@ -293,6 +296,11 @@ request_sudo() {
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompted."
|
||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
# echo "Need sudo privileges to proceed with the installation."
|
||||
# exit 1;
|
||||
# fi
|
||||
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
||||
echo "Need sudo privileges to proceed with the installation."
|
||||
exit 1;
|
||||
@@ -309,7 +317,6 @@ echo -e "👋 Thank you for trying out SigNoz! "
|
||||
echo ""
|
||||
|
||||
sudo_cmd=""
|
||||
docker_compose_cmd=""
|
||||
|
||||
# Check sudo permissions
|
||||
if (( $EUID != 0 )); then
|
||||
@@ -355,8 +362,28 @@ else
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||
fi
|
||||
|
||||
# echo ""
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
# do
|
||||
# # echo $choice_setup
|
||||
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# # echo $choice_setup
|
||||
# done
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
|
||||
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
|
||||
@@ -428,6 +455,8 @@ if [[ $desired_os -eq 0 ]]; then
|
||||
send_event "os_not_supported"
|
||||
fi
|
||||
|
||||
# check_ports_occupied
|
||||
|
||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||
if ! is_command_present docker; then
|
||||
|
||||
@@ -457,39 +486,27 @@ if ! is_command_present docker; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if has_docker_compose_plugin; then
|
||||
echo "docker compose plugin is present, using it"
|
||||
docker_compose_cmd="docker compose"
|
||||
# Install docker-compose
|
||||
else
|
||||
docker_compose_cmd="docker-compose"
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
|
||||
start_docker
|
||||
|
||||
# check for open ports, if signoz is not installed
|
||||
if is_command_present docker-compose; then
|
||||
if $sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
|
||||
echo "SigNoz already installed, skipping the occupied ports check"
|
||||
else
|
||||
check_ports_occupied
|
||||
fi
|
||||
fi
|
||||
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The $docker_compose_cmd command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
@@ -499,7 +516,7 @@ if [[ $status_code -ne 200 ]]; then
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
|
||||
echo -e "$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
@@ -520,7 +537,7 @@ else
|
||||
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
@@ -81,6 +82,7 @@ type ServerOptions struct {
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
SkipWebFrontend bool
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
@@ -200,6 +202,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
if err != nil {
|
||||
@@ -387,9 +396,11 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
err := web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !s.serverOptions.SkipWebFrontend {
|
||||
err := web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &http.Server{
|
||||
|
||||
@@ -10,12 +10,12 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/config/fileprovider"
|
||||
signozconfig "go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/confmap/provider/signozenvprovider"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
@@ -108,6 +108,7 @@ func main() {
|
||||
var dialTimeout time.Duration
|
||||
var gatewayUrl string
|
||||
var useLicensesV3 bool
|
||||
var skipWebFrontend bool
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
@@ -125,6 +126,7 @@ func main() {
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.BoolVar(&skipWebFrontend, "skip-web-frontend", false, "skip web frontend")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
@@ -134,18 +136,19 @@ func main() {
|
||||
|
||||
version.PrintVersion()
|
||||
|
||||
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
fileprovider.NewFactory(),
|
||||
config, err := signozconfig.New(context.Background(), signozconfig.ProviderSettings{
|
||||
ResolverSettings: confmap.ResolverSettings{
|
||||
URIs: []string{"signozenv:"},
|
||||
ProviderFactories: []confmap.ProviderFactory{
|
||||
signozenvprovider.NewFactory(),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
|
||||
signoz, err := signoz.New(config, skipWebFrontend)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
}
|
||||
@@ -168,6 +171,7 @@ func main() {
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
SkipWebFrontend: skipWebFrontend,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import axios from 'api';
|
||||
import { ApiBaseInstance } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
@@ -47,7 +47,7 @@ export const getK8sNodesList = async (
|
||||
headers?: Record<string, string>,
|
||||
): Promise<SuccessResponse<K8sNodesListResponse> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/nodes/list', props, {
|
||||
const response = await ApiBaseInstance.post('/nodes/list', props, {
|
||||
signal,
|
||||
headers,
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import axios from 'api';
|
||||
import { ApiBaseInstance } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
@@ -75,7 +75,7 @@ export const getK8sPodsList = async (
|
||||
headers?: Record<string, string>,
|
||||
): Promise<SuccessResponse<K8sPodsListResponse> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/pods/list', props, {
|
||||
const response = await ApiBaseInstance.post('/pods/list', props, {
|
||||
signal,
|
||||
headers,
|
||||
});
|
||||
|
||||
@@ -219,14 +219,12 @@ function ListLogView({
|
||||
<LogStateIndicator type={logType} fontSize={fontSize} />
|
||||
<div>
|
||||
<LogContainer fontSize={fontSize}>
|
||||
{updatedSelecedFields.some((field) => field.name === 'body') && (
|
||||
<LogGeneralField
|
||||
fieldKey="Log"
|
||||
fieldValue={flattenLogData.body}
|
||||
linesPerRow={linesPerRow}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
)}
|
||||
<LogGeneralField
|
||||
fieldKey="Log"
|
||||
fieldValue={flattenLogData.body}
|
||||
linesPerRow={linesPerRow}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
{flattenLogData.stream && (
|
||||
<LogGeneralField
|
||||
fieldKey="Stream"
|
||||
@@ -234,27 +232,23 @@ function ListLogView({
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
)}
|
||||
{updatedSelecedFields.some((field) => field.name === 'timestamp') && (
|
||||
<LogGeneralField
|
||||
fieldKey="Timestamp"
|
||||
fieldValue={timestampValue}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
)}
|
||||
<LogGeneralField
|
||||
fieldKey="Timestamp"
|
||||
fieldValue={timestampValue}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
|
||||
{updatedSelecedFields
|
||||
.filter((field) => !['timestamp', 'body'].includes(field.name))
|
||||
.map((field) =>
|
||||
isValidLogField(flattenLogData[field.name] as never) ? (
|
||||
<LogSelectedField
|
||||
key={field.name}
|
||||
fieldKey={field.name}
|
||||
fieldValue={flattenLogData[field.name] as never}
|
||||
onAddToQuery={onAddToQuery}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
) : null,
|
||||
)}
|
||||
{updatedSelecedFields.map((field) =>
|
||||
isValidLogField(flattenLogData[field.name] as never) ? (
|
||||
<LogSelectedField
|
||||
key={field.name}
|
||||
fieldKey={field.name}
|
||||
fieldValue={flattenLogData[field.name] as never}
|
||||
onAddToQuery={onAddToQuery}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
) : null,
|
||||
)}
|
||||
</LogContainer>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -73,7 +73,6 @@ function RawLogView({
|
||||
);
|
||||
|
||||
const attributesValues = updatedSelecedFields
|
||||
.filter((field) => !['timestamp', 'body'].includes(field.name))
|
||||
.map((field) => flattenLogData[field.name])
|
||||
.filter((attribute) => {
|
||||
// loadash isEmpty doesnot work with numbers
|
||||
@@ -93,40 +92,19 @@ function RawLogView({
|
||||
const { formatTimezoneAdjustedTimestamp } = useTimezone();
|
||||
|
||||
const text = useMemo(() => {
|
||||
const parts = [];
|
||||
const date =
|
||||
typeof data.timestamp === 'string'
|
||||
? formatTimezoneAdjustedTimestamp(data.timestamp, 'YYYY-MM-DD HH:mm:ss.SSS')
|
||||
: formatTimezoneAdjustedTimestamp(
|
||||
data.timestamp / 1e6,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
);
|
||||
|
||||
// Check if timestamp is selected
|
||||
const showTimestamp = selectedFields.some(
|
||||
(field) => field.name === 'timestamp',
|
||||
);
|
||||
if (showTimestamp) {
|
||||
const date =
|
||||
typeof data.timestamp === 'string'
|
||||
? formatTimezoneAdjustedTimestamp(
|
||||
data.timestamp,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
)
|
||||
: formatTimezoneAdjustedTimestamp(
|
||||
data.timestamp / 1e6,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
);
|
||||
parts.push(date);
|
||||
}
|
||||
|
||||
// Check if body is selected
|
||||
const showBody = selectedFields.some((field) => field.name === 'body');
|
||||
if (showBody) {
|
||||
parts.push(`${attributesText} ${data.body}`);
|
||||
} else {
|
||||
parts.push(attributesText);
|
||||
}
|
||||
|
||||
return parts.join(' | ');
|
||||
return `${date} | ${attributesText} ${data.body}`;
|
||||
}, [
|
||||
selectedFields,
|
||||
attributesText,
|
||||
data.timestamp,
|
||||
data.body,
|
||||
attributesText,
|
||||
formatTimezoneAdjustedTimestamp,
|
||||
]);
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
|
||||
|
||||
const columns: ColumnsType<Record<string, unknown>> = useMemo(() => {
|
||||
const fieldColumns: ColumnsType<Record<string, unknown>> = fields
|
||||
.filter((e) => !['id', 'body', 'timestamp'].includes(e.name))
|
||||
.filter((e) => e.name !== 'id')
|
||||
.map(({ name }) => ({
|
||||
title: name,
|
||||
dataIndex: name,
|
||||
@@ -91,67 +91,55 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
|
||||
),
|
||||
}),
|
||||
},
|
||||
...(fields.some((field) => field.name === 'timestamp')
|
||||
? [
|
||||
{
|
||||
title: 'timestamp',
|
||||
dataIndex: 'timestamp',
|
||||
key: 'timestamp',
|
||||
// https://github.com/ant-design/ant-design/discussions/36886
|
||||
render: (
|
||||
field: string | number,
|
||||
): ColumnTypeRender<Record<string, unknown>> => {
|
||||
const date =
|
||||
typeof field === 'string'
|
||||
? formatTimezoneAdjustedTimestamp(field, 'YYYY-MM-DD HH:mm:ss.SSS')
|
||||
: formatTimezoneAdjustedTimestamp(
|
||||
field / 1e6,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
);
|
||||
return {
|
||||
children: (
|
||||
<div className="table-timestamp">
|
||||
<Typography.Paragraph ellipsis className={cx('text', fontSize)}>
|
||||
{date}
|
||||
</Typography.Paragraph>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
},
|
||||
},
|
||||
]
|
||||
: []),
|
||||
{
|
||||
title: 'timestamp',
|
||||
dataIndex: 'timestamp',
|
||||
key: 'timestamp',
|
||||
// https://github.com/ant-design/ant-design/discussions/36886
|
||||
render: (field): ColumnTypeRender<Record<string, unknown>> => {
|
||||
const date =
|
||||
typeof field === 'string'
|
||||
? formatTimezoneAdjustedTimestamp(field, 'YYYY-MM-DD HH:mm:ss.SSS')
|
||||
: formatTimezoneAdjustedTimestamp(
|
||||
field / 1e6,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
);
|
||||
return {
|
||||
children: (
|
||||
<div className="table-timestamp">
|
||||
<Typography.Paragraph ellipsis className={cx('text', fontSize)}>
|
||||
{date}
|
||||
</Typography.Paragraph>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
},
|
||||
},
|
||||
...(appendTo === 'center' ? fieldColumns : []),
|
||||
...(fields.some((field) => field.name === 'body')
|
||||
? [
|
||||
{
|
||||
title: 'body',
|
||||
dataIndex: 'body',
|
||||
key: 'body',
|
||||
render: (
|
||||
field: string | number,
|
||||
): ColumnTypeRender<Record<string, unknown>> => ({
|
||||
props: {
|
||||
style: defaultTableStyle,
|
||||
},
|
||||
children: (
|
||||
<TableBodyContent
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: convert.toHtml(
|
||||
dompurify.sanitize(unescapeString(field as string), {
|
||||
FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS],
|
||||
}),
|
||||
),
|
||||
}}
|
||||
fontSize={fontSize}
|
||||
linesPerRow={linesPerRow}
|
||||
isDarkMode={isDarkMode}
|
||||
/>
|
||||
{
|
||||
title: 'body',
|
||||
dataIndex: 'body',
|
||||
key: 'body',
|
||||
render: (field): ColumnTypeRender<Record<string, unknown>> => ({
|
||||
props: {
|
||||
style: defaultTableStyle,
|
||||
},
|
||||
children: (
|
||||
<TableBodyContent
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: convert.toHtml(
|
||||
dompurify.sanitize(unescapeString(field), {
|
||||
FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS],
|
||||
}),
|
||||
),
|
||||
}),
|
||||
},
|
||||
]
|
||||
: []),
|
||||
}}
|
||||
fontSize={fontSize}
|
||||
linesPerRow={linesPerRow}
|
||||
isDarkMode={isDarkMode}
|
||||
/>
|
||||
),
|
||||
}),
|
||||
},
|
||||
...(appendTo === 'end' ? fieldColumns : []),
|
||||
];
|
||||
}, [
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import 'uplot/dist/uPlot.min.css';
|
||||
import './AnomalyAlertEvaluationView.styles.scss';
|
||||
|
||||
import { Checkbox, Input, Typography } from 'antd';
|
||||
import { Checkbox, Typography } from 'antd';
|
||||
import Search from 'antd/es/input/Search';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import useDebouncedFn from 'hooks/useDebouncedFunction';
|
||||
import { useResizeObserver } from 'hooks/useDimensions';
|
||||
@@ -15,8 +16,6 @@ import uPlot from 'uplot';
|
||||
|
||||
import tooltipPlugin from './tooltipPlugin';
|
||||
|
||||
const { Search } = Input;
|
||||
|
||||
function UplotChart({
|
||||
data,
|
||||
options,
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
import ROUTES from 'constants/routes';
|
||||
import CreateAlertPage from 'pages/CreateAlert';
|
||||
import { MemoryRouter, Route } from 'react-router-dom';
|
||||
import { act, fireEvent, render } from 'tests/test-utils';
|
||||
import { AlertTypes } from 'types/api/alerts/alertTypes';
|
||||
|
||||
import { ALERT_TYPE_TO_TITLE, ALERT_TYPE_URL_MAP } from './constants';
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useLocation: (): { pathname: string } => ({
|
||||
pathname: `${process.env.FRONTEND_API_ENDPOINT}${ROUTES.ALERTS_NEW}`,
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('uplot', () => {
|
||||
const paths = {
|
||||
spline: jest.fn(),
|
||||
bars: jest.fn(),
|
||||
};
|
||||
const uplotMock = jest.fn(() => ({
|
||||
paths,
|
||||
}));
|
||||
return {
|
||||
paths,
|
||||
default: uplotMock,
|
||||
};
|
||||
});
|
||||
|
||||
let mockWindowOpen: jest.Mock;
|
||||
|
||||
window.ResizeObserver =
|
||||
window.ResizeObserver ||
|
||||
jest.fn().mockImplementation(() => ({
|
||||
disconnect: jest.fn(),
|
||||
observe: jest.fn(),
|
||||
unobserve: jest.fn(),
|
||||
}));
|
||||
|
||||
function findLinkForAlertType(
|
||||
links: HTMLElement[],
|
||||
alertType: AlertTypes,
|
||||
): HTMLElement {
|
||||
const link = links.find(
|
||||
(el) =>
|
||||
el.closest('[data-testid]')?.getAttribute('data-testid') ===
|
||||
`alert-type-card-${alertType}`,
|
||||
);
|
||||
expect(link).toBeTruthy();
|
||||
return link as HTMLElement;
|
||||
}
|
||||
|
||||
function clickLinkAndVerifyRedirect(
|
||||
link: HTMLElement,
|
||||
expectedUrl: string,
|
||||
): void {
|
||||
fireEvent.click(link);
|
||||
expect(mockWindowOpen).toHaveBeenCalledWith(expectedUrl, '_blank');
|
||||
}
|
||||
describe('Alert rule documentation redirection', () => {
|
||||
let renderResult: ReturnType<typeof render>;
|
||||
|
||||
beforeAll(() => {
|
||||
mockWindowOpen = jest.fn();
|
||||
window.open = mockWindowOpen;
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
act(() => {
|
||||
renderResult = render(
|
||||
<MemoryRouter initialEntries={['/alerts/new']}>
|
||||
<Route path={ROUTES.ALERTS_NEW}>
|
||||
<CreateAlertPage />
|
||||
</Route>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should render alert type cards', () => {
|
||||
const { getByText, getAllByText } = renderResult;
|
||||
|
||||
// Check for the heading
|
||||
expect(getByText('choose_alert_type')).toBeInTheDocument();
|
||||
|
||||
// Check for alert type titles and descriptions
|
||||
Object.values(AlertTypes).forEach((alertType) => {
|
||||
const title = ALERT_TYPE_TO_TITLE[alertType];
|
||||
expect(getByText(title)).toBeInTheDocument();
|
||||
expect(getByText(`${title}_desc`)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
const clickHereLinks = getAllByText(
|
||||
'Click here to see how to create a sample alert.',
|
||||
);
|
||||
|
||||
expect(clickHereLinks).toHaveLength(5);
|
||||
});
|
||||
|
||||
it('should redirect to correct documentation for each alert type', () => {
|
||||
const { getAllByText } = renderResult;
|
||||
|
||||
const clickHereLinks = getAllByText(
|
||||
'Click here to see how to create a sample alert.',
|
||||
);
|
||||
const alertTypeCount = Object.keys(AlertTypes).length;
|
||||
|
||||
expect(clickHereLinks).toHaveLength(alertTypeCount);
|
||||
|
||||
Object.values(AlertTypes).forEach((alertType) => {
|
||||
const linkForAlertType = findLinkForAlertType(clickHereLinks, alertType);
|
||||
const expectedUrl = ALERT_TYPE_URL_MAP[alertType];
|
||||
|
||||
clickLinkAndVerifyRedirect(linkForAlertType, expectedUrl.selection);
|
||||
});
|
||||
|
||||
expect(mockWindowOpen).toHaveBeenCalledTimes(alertTypeCount);
|
||||
});
|
||||
|
||||
Object.values(AlertTypes)
|
||||
.filter((type) => type !== AlertTypes.ANOMALY_BASED_ALERT)
|
||||
.forEach((alertType) => {
|
||||
it(`should redirect to create alert page for ${alertType} and "Check an example alert" should redirect to the correct documentation`, () => {
|
||||
const { getByTestId, getByRole } = renderResult;
|
||||
|
||||
const alertTypeLink = getByTestId(`alert-type-card-${alertType}`);
|
||||
|
||||
act(() => {
|
||||
fireEvent.click(alertTypeLink);
|
||||
});
|
||||
|
||||
act(() => {
|
||||
fireEvent.click(
|
||||
getByRole('button', {
|
||||
name: /alert setup guide/i,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
expect(mockWindowOpen).toHaveBeenCalledWith(
|
||||
ALERT_TYPE_URL_MAP[alertType].creation,
|
||||
'_blank',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,71 +0,0 @@
|
||||
import ROUTES from 'constants/routes';
|
||||
import CreateAlertPage from 'pages/CreateAlert';
|
||||
import { MemoryRouter, Route } from 'react-router-dom';
|
||||
import { act, fireEvent, render } from 'tests/test-utils';
|
||||
import { AlertTypes } from 'types/api/alerts/alertTypes';
|
||||
|
||||
import { ALERT_TYPE_URL_MAP } from './constants';
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useLocation: (): { pathname: string; search: string } => ({
|
||||
pathname: `${process.env.FRONTEND_API_ENDPOINT}${ROUTES.ALERTS_NEW}`,
|
||||
search: 'ruleType=anomaly_rule',
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('uplot', () => {
|
||||
const paths = {
|
||||
spline: jest.fn(),
|
||||
bars: jest.fn(),
|
||||
};
|
||||
const uplotMock = jest.fn(() => ({
|
||||
paths,
|
||||
}));
|
||||
return {
|
||||
paths,
|
||||
default: uplotMock,
|
||||
};
|
||||
});
|
||||
|
||||
window.ResizeObserver =
|
||||
window.ResizeObserver ||
|
||||
jest.fn().mockImplementation(() => ({
|
||||
disconnect: jest.fn(),
|
||||
observe: jest.fn(),
|
||||
unobserve: jest.fn(),
|
||||
}));
|
||||
|
||||
describe('Anomaly Alert Documentation Redirection', () => {
|
||||
let mockWindowOpen: jest.Mock;
|
||||
|
||||
beforeAll(() => {
|
||||
mockWindowOpen = jest.fn();
|
||||
window.open = mockWindowOpen;
|
||||
});
|
||||
|
||||
it('should handle anomaly alert documentation redirection correctly', () => {
|
||||
const { getByRole } = render(
|
||||
<MemoryRouter initialEntries={['/alerts/new']}>
|
||||
<Route path={ROUTES.ALERTS_NEW}>
|
||||
<CreateAlertPage />
|
||||
</Route>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
|
||||
const alertType = AlertTypes.ANOMALY_BASED_ALERT;
|
||||
|
||||
act(() => {
|
||||
fireEvent.click(
|
||||
getByRole('button', {
|
||||
name: /alert setup guide/i,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
expect(mockWindowOpen).toHaveBeenCalledWith(
|
||||
ALERT_TYPE_URL_MAP[alertType].creation,
|
||||
'_blank',
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -1,47 +0,0 @@
|
||||
import { AlertTypes } from 'types/api/alerts/alertTypes';
|
||||
|
||||
// since we don't have a card in alert creation for anomaly based alert
|
||||
|
||||
export const ALERT_TYPE_URL_MAP: Record<
|
||||
AlertTypes,
|
||||
{ selection: string; creation: string }
|
||||
> = {
|
||||
[AlertTypes.METRICS_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/metrics-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/metrics-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
[AlertTypes.LOGS_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/log-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/log-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
[AlertTypes.TRACES_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/trace-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/trace-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
[AlertTypes.EXCEPTIONS_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/exceptions-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/exceptions-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
[AlertTypes.ANOMALY_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/anomaly-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/anomaly-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
};
|
||||
|
||||
export const ALERT_TYPE_TO_TITLE: Record<AlertTypes, string> = {
|
||||
[AlertTypes.METRICS_BASED_ALERT]: 'metric_based_alert',
|
||||
[AlertTypes.LOGS_BASED_ALERT]: 'log_based_alert',
|
||||
[AlertTypes.TRACES_BASED_ALERT]: 'traces_based_alert',
|
||||
[AlertTypes.EXCEPTIONS_BASED_ALERT]: 'exceptions_based_alert',
|
||||
[AlertTypes.ANOMALY_BASED_ALERT]: 'anomaly_based_alert',
|
||||
};
|
||||
@@ -39,6 +39,10 @@
|
||||
.ant-collapse-header {
|
||||
border-bottom: 1px solid var(--bg-slate-400);
|
||||
padding: 12px 8px;
|
||||
|
||||
&[aria-expanded='true'] {
|
||||
background: var(--bg-ink-400);
|
||||
}
|
||||
}
|
||||
|
||||
.ant-collapse-content-box {
|
||||
@@ -267,6 +271,8 @@
|
||||
|
||||
.group-by-label {
|
||||
min-width: max-content;
|
||||
|
||||
color: var(--bg-vanilla-100, #c0c1c3);
|
||||
font-size: 13px;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
@@ -276,6 +282,7 @@
|
||||
border-radius: 2px 0px 0px 2px;
|
||||
border: 1px solid var(--bg-slate-400, #1d212d);
|
||||
border-right: none;
|
||||
background: var(--bg-ink-100, #16181d);
|
||||
border-top-right-radius: 0px;
|
||||
border-bottom-right-radius: 0px;
|
||||
|
||||
@@ -481,7 +488,7 @@
|
||||
.expanded-table-container {
|
||||
border: 1px solid var(--bg-ink-400);
|
||||
overflow-x: auto;
|
||||
padding-left: 48px;
|
||||
padding-left: 16px;
|
||||
|
||||
&::-webkit-scrollbar {
|
||||
width: 0.1rem;
|
||||
@@ -703,34 +710,8 @@
|
||||
}
|
||||
|
||||
.ant-table-cell {
|
||||
min-width: 140px !important;
|
||||
max-width: 140px !important;
|
||||
}
|
||||
|
||||
.ant-table-cell {
|
||||
&:has(.pod-name-header) {
|
||||
min-width: 250px !important;
|
||||
max-width: 250px !important;
|
||||
}
|
||||
}
|
||||
|
||||
.ant-table-cell {
|
||||
&:has(.med-col) {
|
||||
min-width: 180px !important;
|
||||
max-width: 180px !important;
|
||||
}
|
||||
}
|
||||
|
||||
.expanded-k8s-list-table {
|
||||
.ant-table-cell {
|
||||
min-width: 180px !important;
|
||||
max-width: 180px !important;
|
||||
}
|
||||
|
||||
.ant-table-row-expand-icon-cell {
|
||||
min-width: 30px !important;
|
||||
max-width: 30px !important;
|
||||
}
|
||||
min-width: 170px !important;
|
||||
max-width: 170px !important;
|
||||
}
|
||||
|
||||
.ant-table-row-expand-icon-cell {
|
||||
@@ -827,24 +808,6 @@
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
.infra-monitoring-container {
|
||||
.k8s-list-table {
|
||||
.ant-table-expanded-row {
|
||||
&:hover {
|
||||
background: var(--bg-vanilla-100) !important;
|
||||
}
|
||||
|
||||
.ant-table-cell {
|
||||
background: var(--bg-vanilla-100) !important;
|
||||
}
|
||||
|
||||
.ant-table .ant-table-thead > tr > th {
|
||||
padding: 4px 16px !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.event-content-container {
|
||||
.ant-table {
|
||||
background: var(--bg-vanilla-100);
|
||||
@@ -868,11 +831,4 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.entity-group-header {
|
||||
.ant-tag {
|
||||
background-color: var(--bg-vanilla-300) !important;
|
||||
color: var(--bg-slate-400) !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
|
||||
import { Container, Workflow } from 'lucide-react';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { useState } from 'react';
|
||||
import { useCallback, useState } from 'react';
|
||||
import { Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
import {
|
||||
@@ -24,7 +24,6 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
const [showFilters, setShowFilters] = useState(true);
|
||||
|
||||
const [selectedCategory, setSelectedCategory] = useState(K8sCategories.PODS);
|
||||
const [quickFiltersLastUpdated, setQuickFiltersLastUpdated] = useState(-1);
|
||||
|
||||
const { currentQuery } = useQueryBuilder();
|
||||
|
||||
@@ -38,12 +37,14 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
entityVersion: '',
|
||||
});
|
||||
|
||||
const handleFilterChange = (query: Query): void => {
|
||||
// update the current query with the new filters
|
||||
// in infra monitoring k8s, we are using only one query, hence updating the 0th index of queryData
|
||||
handleChangeQueryData('filters', query.builder.queryData[0].filters);
|
||||
setQuickFiltersLastUpdated(Date.now());
|
||||
};
|
||||
const handleFilterChange = useCallback(
|
||||
(query: Query): void => {
|
||||
// update the current query with the new filters
|
||||
// in infra monitoring k8s, we are using only one query, hence updating the 0th index of queryData
|
||||
handleChangeQueryData('filters', query.builder.queryData[0].filters);
|
||||
},
|
||||
[handleChangeQueryData],
|
||||
);
|
||||
|
||||
const items: CollapseProps['items'] = [
|
||||
{
|
||||
@@ -261,8 +262,6 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
const handleCategoryChange = (key: string | string[]): void => {
|
||||
if (Array.isArray(key) && key.length > 0) {
|
||||
setSelectedCategory(key[0] as string);
|
||||
// Reset filters
|
||||
handleChangeQueryData('filters', { items: [], op: 'and' });
|
||||
}
|
||||
};
|
||||
|
||||
@@ -303,7 +302,6 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
<K8sPodLists
|
||||
isFiltersVisible={showFilters}
|
||||
handleFilterVisibilityChange={handleFilterVisibilityChange}
|
||||
quickFiltersLastUpdated={quickFiltersLastUpdated}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -311,7 +309,6 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
<K8sNodesList
|
||||
isFiltersVisible={showFilters}
|
||||
handleFilterVisibilityChange={handleFilterVisibilityChange}
|
||||
quickFiltersLastUpdated={quickFiltersLastUpdated}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -7,7 +7,7 @@ import { Button, Input } from 'antd';
|
||||
import { GripVertical, TableColumnsSplit, X } from 'lucide-react';
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
|
||||
import { IEntityColumn } from '../utils';
|
||||
import { IPodColumn } from '../utils';
|
||||
|
||||
function K8sFiltersSidePanel({
|
||||
defaultAddedColumns,
|
||||
@@ -17,12 +17,12 @@ function K8sFiltersSidePanel({
|
||||
onAddColumn = () => {},
|
||||
onRemoveColumn = () => {},
|
||||
}: {
|
||||
defaultAddedColumns: IEntityColumn[];
|
||||
defaultAddedColumns: IPodColumn[];
|
||||
onClose: () => void;
|
||||
addedColumns?: IEntityColumn[];
|
||||
availableColumns?: IEntityColumn[];
|
||||
onAddColumn?: (column: IEntityColumn) => void;
|
||||
onRemoveColumn?: (column: IEntityColumn) => void;
|
||||
addedColumns?: IPodColumn[];
|
||||
availableColumns?: IPodColumn[];
|
||||
onAddColumn?: (column: IPodColumn) => void;
|
||||
onRemoveColumn?: (column: IPodColumn) => void;
|
||||
}): JSX.Element {
|
||||
const [searchValue, setSearchValue] = useState('');
|
||||
const sidePanelRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
@@ -12,7 +12,7 @@ import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
import { K8sCategory } from './constants';
|
||||
import K8sFiltersSidePanel from './K8sFiltersSidePanel/K8sFiltersSidePanel';
|
||||
import { IEntityColumn } from './utils';
|
||||
import { IPodColumn } from './utils';
|
||||
|
||||
interface K8sHeaderProps {
|
||||
selectedGroupBy: BaseAutocompleteData[];
|
||||
@@ -20,11 +20,11 @@ interface K8sHeaderProps {
|
||||
isLoadingGroupByFilters: boolean;
|
||||
handleFiltersChange: (value: IBuilderQuery['filters']) => void;
|
||||
handleGroupByChange: (value: IBuilderQuery['groupBy']) => void;
|
||||
defaultAddedColumns: IEntityColumn[];
|
||||
addedColumns?: IEntityColumn[];
|
||||
availableColumns?: IEntityColumn[];
|
||||
onAddColumn?: (column: IEntityColumn) => void;
|
||||
onRemoveColumn?: (column: IEntityColumn) => void;
|
||||
defaultAddedColumns: IPodColumn[];
|
||||
addedColumns?: IPodColumn[];
|
||||
availableColumns?: IPodColumn[];
|
||||
onAddColumn?: (column: IPodColumn) => void;
|
||||
onRemoveColumn?: (column: IPodColumn) => void;
|
||||
handleFilterVisibilityChange: () => void;
|
||||
isFiltersVisible: boolean;
|
||||
entity: K8sCategory;
|
||||
|
||||
@@ -45,11 +45,9 @@ import {
|
||||
function K8sNodesList({
|
||||
isFiltersVisible,
|
||||
handleFilterVisibilityChange,
|
||||
quickFiltersLastUpdated,
|
||||
}: {
|
||||
isFiltersVisible: boolean;
|
||||
handleFilterVisibilityChange: () => void;
|
||||
quickFiltersLastUpdated: number;
|
||||
}): JSX.Element {
|
||||
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
|
||||
(state) => state.globalTime,
|
||||
@@ -62,7 +60,7 @@ function K8sNodesList({
|
||||
const [orderBy, setOrderBy] = useState<{
|
||||
columnName: string;
|
||||
order: 'asc' | 'desc';
|
||||
} | null>({ columnName: 'cpu', order: 'desc' });
|
||||
} | null>(null);
|
||||
|
||||
const [selectedNodeUID, setselectedNodeUID] = useState<string | null>(null);
|
||||
|
||||
@@ -78,28 +76,12 @@ function K8sNodesList({
|
||||
{ value: string; label: string }[]
|
||||
>([]);
|
||||
|
||||
const { currentQuery } = useQueryBuilder();
|
||||
|
||||
const queryFilters = useMemo(
|
||||
() =>
|
||||
currentQuery?.builder?.queryData[0]?.filters || {
|
||||
items: [],
|
||||
op: 'and',
|
||||
},
|
||||
[currentQuery?.builder?.queryData],
|
||||
);
|
||||
|
||||
// Reset pagination every time quick filters are changed
|
||||
useEffect(() => {
|
||||
setCurrentPage(1);
|
||||
}, [quickFiltersLastUpdated]);
|
||||
|
||||
const createFiltersForSelectedRowData = (
|
||||
selectedRowData: K8sNodesRowData,
|
||||
groupBy: IBuilderQuery['groupBy'],
|
||||
): IBuilderQuery['filters'] => {
|
||||
const baseFilters: IBuilderQuery['filters'] = {
|
||||
items: [...queryFilters.items],
|
||||
items: [],
|
||||
op: 'and',
|
||||
};
|
||||
|
||||
@@ -138,7 +120,6 @@ function K8sNodesList({
|
||||
end: Math.floor(maxTime / 1000000),
|
||||
orderBy,
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
|
||||
|
||||
const {
|
||||
@@ -152,6 +133,8 @@ function K8sNodesList({
|
||||
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
|
||||
});
|
||||
|
||||
const { currentQuery } = useQueryBuilder();
|
||||
|
||||
const {
|
||||
data: groupByFiltersData,
|
||||
isLoading: isLoadingGroupByFilters,
|
||||
@@ -170,6 +153,15 @@ function K8sNodesList({
|
||||
K8sCategory.NODES,
|
||||
);
|
||||
|
||||
const queryFilters = useMemo(
|
||||
() =>
|
||||
currentQuery?.builder?.queryData[0]?.filters || {
|
||||
items: [],
|
||||
op: 'and',
|
||||
},
|
||||
[currentQuery?.builder?.queryData],
|
||||
);
|
||||
|
||||
const query = useMemo(() => {
|
||||
const baseQuery = getK8sNodesListQuery();
|
||||
const queryPayload = {
|
||||
@@ -316,7 +308,6 @@ function K8sNodesList({
|
||||
) : (
|
||||
<div className="expanded-table">
|
||||
<Table
|
||||
className="expanded-table-view"
|
||||
columns={nestedColumns as ColumnType<K8sNodesRowData>[]}
|
||||
dataSource={formattedGroupedByNodesData}
|
||||
pagination={false}
|
||||
@@ -391,6 +382,18 @@ function K8sNodesList({
|
||||
setselectedNodeUID(null);
|
||||
};
|
||||
|
||||
const showsNodesTable =
|
||||
!isError &&
|
||||
!isLoading &&
|
||||
!isFetching &&
|
||||
!(formattedNodesData.length === 0 && queryFilters.items.length > 0);
|
||||
|
||||
const showNoFilteredNodesMessage =
|
||||
!isFetching &&
|
||||
!isLoading &&
|
||||
formattedNodesData.length === 0 &&
|
||||
queryFilters.items.length > 0;
|
||||
|
||||
const handleGroupByChange = useCallback(
|
||||
(value: IBuilderQuery['groupBy']) => {
|
||||
const groupBy = [];
|
||||
@@ -439,53 +442,54 @@ function K8sNodesList({
|
||||
/>
|
||||
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
|
||||
|
||||
<Table
|
||||
className="k8s-list-table nodes-list-table"
|
||||
dataSource={isFetching || isLoading ? [] : formattedNodesData}
|
||||
columns={columns}
|
||||
pagination={{
|
||||
current: currentPage,
|
||||
pageSize,
|
||||
total: totalCount,
|
||||
showSizeChanger: false,
|
||||
hideOnSinglePage: true,
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
}}
|
||||
locale={{
|
||||
emptyText:
|
||||
isFetching || isLoading ? null : (
|
||||
<div className="no-filtered-hosts-message-container">
|
||||
<div className="no-filtered-hosts-message-content">
|
||||
<img
|
||||
src="/Icons/emptyState.svg"
|
||||
alt="thinking-emoji"
|
||||
className="empty-state-svg"
|
||||
/>
|
||||
{showNoFilteredNodesMessage && (
|
||||
<div className="no-filtered-hosts-message-container">
|
||||
<div className="no-filtered-hosts-message-content">
|
||||
<img
|
||||
src="/Icons/emptyState.svg"
|
||||
alt="thinking-emoji"
|
||||
className="empty-state-svg"
|
||||
/>
|
||||
|
||||
<Typography.Text className="no-filtered-hosts-message">
|
||||
This query had no results. Edit your query and try again!
|
||||
</Typography.Text>
|
||||
</div>
|
||||
</div>
|
||||
),
|
||||
}}
|
||||
tableLayout="fixed"
|
||||
onChange={handleTableChange}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
onClick: (): void => handleRowClick(record),
|
||||
className: 'clickable-row',
|
||||
})}
|
||||
expandable={{
|
||||
expandedRowRender: isGroupedByAttribute ? expandedRowRender : undefined,
|
||||
expandIcon: expandRowIconRenderer,
|
||||
expandedRowKeys,
|
||||
}}
|
||||
/>
|
||||
<Typography.Text className="no-filtered-hosts-message">
|
||||
This query had no results. Edit your query and try again!
|
||||
</Typography.Text>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{(isFetching || isLoading) && <LoadingContainer />}
|
||||
|
||||
{showsNodesTable && (
|
||||
<Table
|
||||
className="k8s-list-table nodes-list-table"
|
||||
dataSource={isFetching || isLoading ? [] : formattedNodesData}
|
||||
columns={columns}
|
||||
pagination={{
|
||||
current: currentPage,
|
||||
pageSize,
|
||||
total: totalCount,
|
||||
showSizeChanger: false,
|
||||
hideOnSinglePage: true,
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
}}
|
||||
tableLayout="fixed"
|
||||
onChange={handleTableChange}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
onClick: (): void => handleRowClick(record),
|
||||
className: 'clickable-row',
|
||||
})}
|
||||
expandable={{
|
||||
expandedRowRender: isGroupedByAttribute ? expandedRowRender : undefined,
|
||||
expandIcon: expandRowIconRenderer,
|
||||
expandedRowKeys,
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
<NodeDetails
|
||||
node={selectedNodeData}
|
||||
isModalTimeSelection
|
||||
|
||||
@@ -155,7 +155,6 @@ export default function Events({
|
||||
id: event.data.id,
|
||||
key: event.data.id,
|
||||
resources_string: event.data.resources_string,
|
||||
attributes_string: event.data.attributes_string,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -175,9 +174,7 @@ export default function Events({
|
||||
}, [eventsData]);
|
||||
|
||||
const handleExpandRow = (record: EventDataType): JSX.Element => (
|
||||
<EventContents
|
||||
data={{ ...record.attributes_string, ...record.resources_string }}
|
||||
/>
|
||||
<EventContents data={record.resources_string} />
|
||||
);
|
||||
|
||||
const handlePrev = (): void => {
|
||||
|
||||
@@ -12,7 +12,6 @@ import {
|
||||
initialQueryState,
|
||||
} from 'constants/queryBuilder';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { filterDuplicateFilters } from 'container/InfraMonitoringK8s/entityDetailUtils';
|
||||
import {
|
||||
CustomTimeType,
|
||||
Time,
|
||||
@@ -98,9 +97,22 @@ function NodeDetails({
|
||||
op: '=',
|
||||
value: node?.meta.k8s_node_name || '',
|
||||
},
|
||||
{
|
||||
id: uuidv4(),
|
||||
key: {
|
||||
key: QUERY_KEYS.K8S_CLUSTER_NAME,
|
||||
dataType: DataTypes.String,
|
||||
type: 'resource',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
id: 'k8s_node_name--string--resource--false',
|
||||
},
|
||||
op: '=',
|
||||
value: node?.meta.k8s_cluster_name || '',
|
||||
},
|
||||
],
|
||||
}),
|
||||
[node?.meta.k8s_node_name],
|
||||
[node?.meta.k8s_node_name, node?.meta.k8s_cluster_name],
|
||||
);
|
||||
|
||||
const initialEventsFilters = useMemo(
|
||||
@@ -227,13 +239,11 @@ function NodeDetails({
|
||||
|
||||
return {
|
||||
op: 'AND',
|
||||
items: filterDuplicateFilters(
|
||||
[
|
||||
...primaryFilters,
|
||||
...newFilters,
|
||||
...(paginationFilter ? [paginationFilter] : []),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
),
|
||||
items: [
|
||||
...primaryFilters,
|
||||
...newFilters,
|
||||
...(paginationFilter ? [paginationFilter] : []),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
};
|
||||
});
|
||||
},
|
||||
@@ -256,14 +266,12 @@ function NodeDetails({
|
||||
|
||||
return {
|
||||
op: 'AND',
|
||||
items: filterDuplicateFilters(
|
||||
[
|
||||
...primaryFilters,
|
||||
...value.items.filter(
|
||||
(item) => item.key?.key !== QUERY_KEYS.K8S_NODE_NAME,
|
||||
),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
),
|
||||
items: [
|
||||
...primaryFilters,
|
||||
...value.items.filter(
|
||||
(item) => item.key?.key !== QUERY_KEYS.K8S_NODE_NAME,
|
||||
),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
};
|
||||
});
|
||||
},
|
||||
|
||||
@@ -64,7 +64,7 @@ export interface K8sNodesRowData {
|
||||
|
||||
const nodeGroupColumnConfig = {
|
||||
title: (
|
||||
<div className="column-header entity-group-header">
|
||||
<div className="column-header node-group-header">
|
||||
<Group size={14} /> NODE GROUP
|
||||
</div>
|
||||
),
|
||||
@@ -74,7 +74,6 @@ const nodeGroupColumnConfig = {
|
||||
width: 150,
|
||||
align: 'left',
|
||||
sorter: false,
|
||||
className: 'column entity-group-header',
|
||||
};
|
||||
|
||||
export const getK8sNodesListQuery = (): K8sNodesListPayload => ({
|
||||
@@ -87,7 +86,7 @@ export const getK8sNodesListQuery = (): K8sNodesListPayload => ({
|
||||
|
||||
const columnsConfig = [
|
||||
{
|
||||
title: <div className="column-header-left name-header">Node Name</div>,
|
||||
title: <div className="column-header-left">Node Name</div>,
|
||||
dataIndex: 'nodeName',
|
||||
key: 'nodeName',
|
||||
ellipsis: true,
|
||||
@@ -96,7 +95,7 @@ const columnsConfig = [
|
||||
align: 'left',
|
||||
},
|
||||
{
|
||||
title: <div className="column-header-left name-header">Cluster Name</div>,
|
||||
title: <div className="column-header-left">Cluster Name</div>,
|
||||
dataIndex: 'clusterName',
|
||||
key: 'clusterName',
|
||||
ellipsis: true,
|
||||
|
||||
@@ -15,7 +15,6 @@ import get from 'api/browser/localstorage/get';
|
||||
import set from 'api/browser/localstorage/set';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { K8sPodsListPayload } from 'api/infraMonitoring/getK8sPodsList';
|
||||
import classNames from 'classnames';
|
||||
import { useGetK8sPodsList } from 'hooks/infraMonitoring/useGetK8sPodsList';
|
||||
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
@@ -39,7 +38,7 @@ import {
|
||||
formatDataForTable,
|
||||
getK8sPodsListColumns,
|
||||
getK8sPodsListQuery,
|
||||
IEntityColumn,
|
||||
IPodColumn,
|
||||
K8sPodsRowData,
|
||||
} from '../utils';
|
||||
import PodDetails from './PodDetails/PodDetails';
|
||||
@@ -48,11 +47,9 @@ import PodDetails from './PodDetails/PodDetails';
|
||||
function K8sPodsList({
|
||||
isFiltersVisible,
|
||||
handleFilterVisibilityChange,
|
||||
quickFiltersLastUpdated,
|
||||
}: {
|
||||
isFiltersVisible: boolean;
|
||||
handleFilterVisibilityChange: () => void;
|
||||
quickFiltersLastUpdated: number;
|
||||
}): JSX.Element {
|
||||
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
|
||||
(state) => state.globalTime,
|
||||
@@ -60,9 +57,9 @@ function K8sPodsList({
|
||||
|
||||
const [currentPage, setCurrentPage] = useState(1);
|
||||
|
||||
const [addedColumns, setAddedColumns] = useState<IEntityColumn[]>([]);
|
||||
const [addedColumns, setAddedColumns] = useState<IPodColumn[]>([]);
|
||||
|
||||
const [availableColumns, setAvailableColumns] = useState<IEntityColumn[]>(
|
||||
const [availableColumns, setAvailableColumns] = useState<IPodColumn[]>(
|
||||
defaultAvailableColumns,
|
||||
);
|
||||
|
||||
@@ -107,11 +104,6 @@ function K8sPodsList({
|
||||
K8sCategory.PODS, // infraMonitoringEntity
|
||||
);
|
||||
|
||||
// Reset pagination every time quick filters are changed
|
||||
useEffect(() => {
|
||||
setCurrentPage(1);
|
||||
}, [quickFiltersLastUpdated]);
|
||||
|
||||
useEffect(() => {
|
||||
const addedColumns = JSON.parse(get('k8sPodsAddedColumns') ?? '[]');
|
||||
|
||||
@@ -132,7 +124,7 @@ function K8sPodsList({
|
||||
const [orderBy, setOrderBy] = useState<{
|
||||
columnName: string;
|
||||
order: 'asc' | 'desc';
|
||||
} | null>({ columnName: 'cpu', order: 'desc' });
|
||||
} | null>(null);
|
||||
|
||||
const [selectedPodUID, setSelectedPodUID] = useState<string | null>(null);
|
||||
|
||||
@@ -170,7 +162,7 @@ function K8sPodsList({
|
||||
selectedRowData: K8sPodsRowData,
|
||||
): IBuilderQuery['filters'] => {
|
||||
const baseFilters: IBuilderQuery['filters'] = {
|
||||
items: [...query.filters.items],
|
||||
items: [],
|
||||
op: 'and',
|
||||
};
|
||||
|
||||
@@ -209,7 +201,6 @@ function K8sPodsList({
|
||||
end: Math.floor(maxTime / 1000000),
|
||||
orderBy,
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [minTime, maxTime, orderBy, selectedRowData]);
|
||||
|
||||
const {
|
||||
@@ -347,8 +338,20 @@ function K8sPodsList({
|
||||
setSelectedPodUID(null);
|
||||
};
|
||||
|
||||
const showPodsTable =
|
||||
!isError &&
|
||||
!isLoading &&
|
||||
!isFetching &&
|
||||
!(formattedPodsData.length === 0 && queryFilters.items.length > 0);
|
||||
|
||||
const showNoFilteredPodsMessage =
|
||||
!isFetching &&
|
||||
!isLoading &&
|
||||
formattedPodsData.length === 0 &&
|
||||
queryFilters.items.length > 0;
|
||||
|
||||
const handleAddColumn = useCallback(
|
||||
(column: IEntityColumn): void => {
|
||||
(column: IPodColumn): void => {
|
||||
setAddedColumns((prev) => [...prev, column]);
|
||||
|
||||
setAvailableColumns((prev) => prev.filter((c) => c.value !== column.value));
|
||||
@@ -375,7 +378,7 @@ function K8sPodsList({
|
||||
}, [groupByFiltersData]);
|
||||
|
||||
const handleRemoveColumn = useCallback(
|
||||
(column: IEntityColumn): void => {
|
||||
(column: IPodColumn): void => {
|
||||
setAddedColumns((prev) => prev.filter((c) => c.value !== column.value));
|
||||
|
||||
setAvailableColumns((prev) => [...prev, column]);
|
||||
@@ -502,54 +505,54 @@ function K8sPodsList({
|
||||
/>
|
||||
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
|
||||
|
||||
<Table
|
||||
className={classNames('k8s-list-table', {
|
||||
'expanded-k8s-list-table': isGroupedByAttribute,
|
||||
})}
|
||||
dataSource={isFetching || isLoading ? [] : formattedPodsData}
|
||||
columns={columns}
|
||||
pagination={{
|
||||
current: currentPage,
|
||||
pageSize,
|
||||
total: totalCount,
|
||||
showSizeChanger: false,
|
||||
hideOnSinglePage: true,
|
||||
}}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
}}
|
||||
locale={{
|
||||
emptyText:
|
||||
isFetching || isLoading ? null : (
|
||||
<div className="no-filtered-hosts-message-container">
|
||||
<div className="no-filtered-hosts-message-content">
|
||||
<img
|
||||
src="/Icons/emptyState.svg"
|
||||
alt="thinking-emoji"
|
||||
className="empty-state-svg"
|
||||
/>
|
||||
{showNoFilteredPodsMessage && (
|
||||
<div className="no-filtered-hosts-message-container">
|
||||
<div className="no-filtered-hosts-message-content">
|
||||
<img
|
||||
src="/Icons/emptyState.svg"
|
||||
alt="thinking-emoji"
|
||||
className="empty-state-svg"
|
||||
/>
|
||||
|
||||
<Typography.Text className="no-filtered-hosts-message">
|
||||
This query had no results. Edit your query and try again!
|
||||
</Typography.Text>
|
||||
</div>
|
||||
</div>
|
||||
),
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
tableLayout="fixed"
|
||||
onChange={handleTableChange}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
onClick: (): void => handleRowClick(record),
|
||||
className: 'clickable-row',
|
||||
})}
|
||||
expandable={{
|
||||
expandedRowRender: isGroupedByAttribute ? expandedRowRender : undefined,
|
||||
expandIcon: expandRowIconRenderer,
|
||||
expandedRowKeys,
|
||||
}}
|
||||
/>
|
||||
<Typography.Text className="no-filtered-hosts-message">
|
||||
This query had no results. Edit your query and try again!
|
||||
</Typography.Text>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{(isFetching || isLoading) && <LoadingContainer />}
|
||||
|
||||
{showPodsTable && (
|
||||
<Table
|
||||
className="k8s-list-table"
|
||||
dataSource={isFetching || isLoading ? [] : formattedPodsData}
|
||||
columns={columns}
|
||||
pagination={{
|
||||
current: currentPage,
|
||||
pageSize,
|
||||
total: totalCount,
|
||||
showSizeChanger: false,
|
||||
hideOnSinglePage: true,
|
||||
}}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
tableLayout="fixed"
|
||||
onChange={handleTableChange}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
onClick: (): void => handleRowClick(record),
|
||||
className: 'clickable-row',
|
||||
})}
|
||||
expandable={{
|
||||
expandedRowRender: isGroupedByAttribute ? expandedRowRender : undefined,
|
||||
expandIcon: expandRowIconRenderer,
|
||||
expandedRowKeys,
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
{selectedPodData && (
|
||||
<PodDetails
|
||||
|
||||
@@ -155,7 +155,6 @@ export default function Events({
|
||||
id: event.data.id,
|
||||
key: event.data.id,
|
||||
resources_string: event.data.resources_string,
|
||||
attributes_string: event.data.attributes_string,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -175,9 +174,7 @@ export default function Events({
|
||||
}, [eventsData]);
|
||||
|
||||
const handleExpandRow = (record: EventDataType): JSX.Element => (
|
||||
<EventContents
|
||||
data={{ ...record.attributes_string, ...record.resources_string }}
|
||||
/>
|
||||
<EventContents data={record.resources_string} />
|
||||
);
|
||||
|
||||
const handlePrev = (): void => {
|
||||
|
||||
@@ -13,7 +13,6 @@ import {
|
||||
initialQueryState,
|
||||
} from 'constants/queryBuilder';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { filterDuplicateFilters } from 'container/InfraMonitoringK8s/entityDetailUtils';
|
||||
import {
|
||||
CustomTimeType,
|
||||
Time,
|
||||
@@ -51,7 +50,7 @@ import { PodDetailProps } from './PodDetail.interfaces';
|
||||
import PodLogsDetailedView from './PodLogs/PodLogsDetailedView';
|
||||
import PodTraces from './PodTraces/PodTraces';
|
||||
|
||||
const TimeRangeOffset = 1000000000;
|
||||
const TimeRangeOffset = 1000000;
|
||||
|
||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||
function PodDetails({
|
||||
@@ -102,6 +101,19 @@ function PodDetails({
|
||||
op: '=',
|
||||
value: pod?.meta.k8s_pod_name || '',
|
||||
},
|
||||
{
|
||||
id: uuidv4(),
|
||||
key: {
|
||||
key: QUERY_KEYS.K8S_CLUSTER_NAME,
|
||||
dataType: DataTypes.String,
|
||||
type: 'resource',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
id: 'k8s_pod_name--string--resource--false',
|
||||
},
|
||||
op: '=',
|
||||
value: pod?.meta.k8s_cluster_name || '',
|
||||
},
|
||||
{
|
||||
id: uuidv4(),
|
||||
key: {
|
||||
@@ -117,7 +129,11 @@ function PodDetails({
|
||||
},
|
||||
],
|
||||
}),
|
||||
[pod?.meta.k8s_namespace_name, pod?.meta.k8s_pod_name],
|
||||
[
|
||||
pod?.meta.k8s_cluster_name,
|
||||
pod?.meta.k8s_namespace_name,
|
||||
pod?.meta.k8s_pod_name,
|
||||
],
|
||||
);
|
||||
|
||||
const initialEventsFilters = useMemo(
|
||||
@@ -246,13 +262,11 @@ function PodDetails({
|
||||
|
||||
return {
|
||||
op: 'AND',
|
||||
items: filterDuplicateFilters(
|
||||
[
|
||||
...primaryFilters,
|
||||
...newFilters,
|
||||
...(paginationFilter ? [paginationFilter] : []),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
),
|
||||
items: [
|
||||
...primaryFilters,
|
||||
...newFilters,
|
||||
...(paginationFilter ? [paginationFilter] : []),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
};
|
||||
});
|
||||
},
|
||||
@@ -277,14 +291,12 @@ function PodDetails({
|
||||
|
||||
return {
|
||||
op: 'AND',
|
||||
items: filterDuplicateFilters(
|
||||
[
|
||||
...primaryFilters,
|
||||
...value.items.filter(
|
||||
(item) => item.key?.key !== QUERY_KEYS.K8S_POD_NAME,
|
||||
),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
),
|
||||
items: [
|
||||
...primaryFilters,
|
||||
...value.items.filter(
|
||||
(item) => item.key?.key !== QUERY_KEYS.K8S_POD_NAME,
|
||||
),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
};
|
||||
});
|
||||
},
|
||||
|
||||
@@ -78,6 +78,8 @@ function PodTraces({
|
||||
[currentQuery],
|
||||
);
|
||||
|
||||
console.log({ updatedCurrentQuery });
|
||||
|
||||
const query = updatedCurrentQuery?.builder?.queryData[0] || null;
|
||||
|
||||
const { queryData: paginationQueryData } = useUrlQueryData<Pagination>(
|
||||
|
||||
@@ -100,13 +100,7 @@ export function getStrokeColorForLimitUtilization(value: number): string {
|
||||
export const getProgressBarText = (percent: number): React.ReactNode =>
|
||||
`${percent}%`;
|
||||
|
||||
export function EntityProgressBar({
|
||||
value,
|
||||
type,
|
||||
}: {
|
||||
value: number;
|
||||
type: 'request' | 'limit';
|
||||
}): JSX.Element {
|
||||
export function EntityProgressBar({ value }: { value: number }): JSX.Element {
|
||||
const percentage = Number((value * 100).toFixed(1));
|
||||
|
||||
return (
|
||||
@@ -116,11 +110,7 @@ export function EntityProgressBar({
|
||||
strokeLinecap="butt"
|
||||
size="small"
|
||||
status="normal"
|
||||
strokeColor={
|
||||
type === 'limit'
|
||||
? getStrokeColorForLimitUtilization(value)
|
||||
: getStrokeColorForRequestUtilization(value)
|
||||
}
|
||||
strokeColor={getStrokeColorForLimitUtilization(value)}
|
||||
className="progress-bar"
|
||||
showInfo={false}
|
||||
/>
|
||||
|
||||
@@ -150,8 +150,6 @@ export const PodsQuickFiltersConfig: IQuickFiltersConfig[] = [
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
aggregateOperator: 'noop',
|
||||
aggregateAttribute: 'k8s_pod_cpu_utilization',
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: false,
|
||||
},
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
import { TagFilterItem } from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
export const filterDuplicateFilters = (
|
||||
filters: TagFilterItem[],
|
||||
): TagFilterItem[] => {
|
||||
const uniqueFilters = [];
|
||||
const seenIds = new Set();
|
||||
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
for (const filter of filters) {
|
||||
if (!seenIds.has(filter.id)) {
|
||||
seenIds.add(filter.id);
|
||||
uniqueFilters.push(filter);
|
||||
}
|
||||
}
|
||||
|
||||
return uniqueFilters;
|
||||
};
|
||||
@@ -26,9 +26,16 @@ export interface IEntityColumn {
|
||||
canRemove: boolean;
|
||||
}
|
||||
|
||||
export interface IPodColumn {
|
||||
label: string;
|
||||
value: string;
|
||||
id: string;
|
||||
canRemove: boolean;
|
||||
}
|
||||
|
||||
const columnProgressBarClassName = 'column-progress-bar';
|
||||
|
||||
export const defaultAddedColumns: IEntityColumn[] = [
|
||||
export const defaultAddedColumns: IPodColumn[] = [
|
||||
{
|
||||
label: 'Pod name',
|
||||
value: 'podName',
|
||||
@@ -71,13 +78,12 @@ export const defaultAddedColumns: IEntityColumn[] = [
|
||||
id: 'memory',
|
||||
canRemove: false,
|
||||
},
|
||||
// TODO - Re-enable the column once backend issue is fixed
|
||||
// {
|
||||
// label: 'Restarts',
|
||||
// value: 'restarts',
|
||||
// id: 'restarts',
|
||||
// canRemove: false,
|
||||
// },
|
||||
{
|
||||
label: 'Restarts',
|
||||
value: 'restarts',
|
||||
id: 'restarts',
|
||||
canRemove: false,
|
||||
},
|
||||
];
|
||||
|
||||
export const defaultAvailableColumns = [
|
||||
@@ -125,7 +131,7 @@ export const getK8sPodsListQuery = (): K8sPodsListPayload => ({
|
||||
|
||||
const podGroupColumnConfig = {
|
||||
title: (
|
||||
<div className="column-header entity-group-header">
|
||||
<div className="column-header pod-group-header">
|
||||
<Group size={14} /> POD GROUP
|
||||
</div>
|
||||
),
|
||||
@@ -134,7 +140,7 @@ const podGroupColumnConfig = {
|
||||
ellipsis: true,
|
||||
width: 180,
|
||||
sorter: false,
|
||||
className: 'column entity-group-header',
|
||||
className: 'column column-pod-group',
|
||||
};
|
||||
|
||||
export const dummyColumnConfig = {
|
||||
@@ -154,11 +160,11 @@ const columnsConfig = [
|
||||
key: 'podName',
|
||||
width: 180,
|
||||
ellipsis: true,
|
||||
sorter: false,
|
||||
sorter: true,
|
||||
className: 'column column-pod-name',
|
||||
},
|
||||
{
|
||||
title: <div className="column-header med-col">CPU Req Usage (%)</div>,
|
||||
title: <div className="column-header">CPU Req Usage (%)</div>,
|
||||
dataIndex: 'cpu_request',
|
||||
key: 'cpu_request',
|
||||
width: 180,
|
||||
@@ -168,7 +174,7 @@ const columnsConfig = [
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
{
|
||||
title: <div className="column-header med-col">CPU Limit Usage (%)</div>,
|
||||
title: <div className="column-header">CPU Limit Usage (%)</div>,
|
||||
dataIndex: 'cpu_limit',
|
||||
key: 'cpu_limit',
|
||||
width: 120,
|
||||
@@ -186,7 +192,7 @@ const columnsConfig = [
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
{
|
||||
title: <div className="column-heade med-col">Mem Req Usage (%)</div>,
|
||||
title: <div className="column-header">Mem Req Usage (%)</div>,
|
||||
dataIndex: 'memory_request',
|
||||
key: 'memory_request',
|
||||
width: 120,
|
||||
@@ -195,7 +201,7 @@ const columnsConfig = [
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
{
|
||||
title: <div className="column-header med-col">Mem Limit Usage (%)</div>,
|
||||
title: <div className="column-header">Mem Limit Usage (%)</div>,
|
||||
dataIndex: 'memory_limit',
|
||||
key: 'memory_limit',
|
||||
width: 120,
|
||||
@@ -213,21 +219,20 @@ const columnsConfig = [
|
||||
align: 'left',
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
// TODO - Re-enable the column once backend issue is fixed
|
||||
// {
|
||||
// title: (
|
||||
// <div className="column-header">
|
||||
// <Tooltip title="Container Restarts">Restarts</Tooltip>
|
||||
// </div>
|
||||
// ),
|
||||
// dataIndex: 'restarts',
|
||||
// key: 'restarts',
|
||||
// width: 40,
|
||||
// ellipsis: true,
|
||||
// sorter: true,
|
||||
// align: 'left',
|
||||
// className: `column ${columnProgressBarClassName}`,
|
||||
// },
|
||||
{
|
||||
title: (
|
||||
<div className="column-header">
|
||||
<Tooltip title="Container Restarts">Restarts</Tooltip>
|
||||
</div>
|
||||
),
|
||||
dataIndex: 'restarts',
|
||||
key: 'restarts',
|
||||
width: 40,
|
||||
ellipsis: true,
|
||||
sorter: true,
|
||||
align: 'left',
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
];
|
||||
|
||||
export const namespaceColumnConfig = {
|
||||
@@ -246,7 +251,7 @@ export const nodeColumnConfig = {
|
||||
dataIndex: 'node',
|
||||
key: 'node',
|
||||
width: 100,
|
||||
sorter: false,
|
||||
sorter: true,
|
||||
ellipsis: true,
|
||||
align: 'left',
|
||||
className: 'column column-node',
|
||||
@@ -257,7 +262,7 @@ export const clusterColumnConfig = {
|
||||
dataIndex: 'cluster',
|
||||
key: 'cluster',
|
||||
width: 100,
|
||||
sorter: false,
|
||||
sorter: true,
|
||||
ellipsis: true,
|
||||
align: 'left',
|
||||
className: 'column column-cluster',
|
||||
@@ -270,7 +275,7 @@ export const columnConfigMap = {
|
||||
};
|
||||
|
||||
export const getK8sPodsListColumns = (
|
||||
addedColumns: IEntityColumn[],
|
||||
addedColumns: IPodColumn[],
|
||||
groupBy: IBuilderQuery['groupBy'],
|
||||
): ColumnType<K8sPodsRowData>[] => {
|
||||
const updatedColumnsConfig = [...columnsConfig];
|
||||
@@ -336,7 +341,7 @@ export const formatDataForTable = (
|
||||
attribute="CPU Request"
|
||||
>
|
||||
<div className="progress-container">
|
||||
<EntityProgressBar value={pod.podCPURequest} type="request" />
|
||||
<EntityProgressBar value={pod.podCPURequest} />
|
||||
</div>
|
||||
</ValidateColumnValueWrapper>
|
||||
),
|
||||
@@ -347,7 +352,7 @@ export const formatDataForTable = (
|
||||
attribute="CPU Limit"
|
||||
>
|
||||
<div className="progress-container">
|
||||
<EntityProgressBar value={pod.podCPULimit} type="limit" />
|
||||
<EntityProgressBar value={pod.podCPULimit} />
|
||||
</div>
|
||||
</ValidateColumnValueWrapper>
|
||||
),
|
||||
@@ -363,7 +368,7 @@ export const formatDataForTable = (
|
||||
attribute="Memory Request"
|
||||
>
|
||||
<div className="progress-container">
|
||||
<EntityProgressBar value={pod.podMemoryRequest} type="request" />
|
||||
<EntityProgressBar value={pod.podMemoryRequest} />
|
||||
</div>
|
||||
</ValidateColumnValueWrapper>
|
||||
),
|
||||
@@ -374,7 +379,7 @@ export const formatDataForTable = (
|
||||
attribute="Memory Limit"
|
||||
>
|
||||
<div className="progress-container">
|
||||
<EntityProgressBar value={pod.podMemoryLimit} type="limit" />
|
||||
<EntityProgressBar value={pod.podMemoryLimit} />
|
||||
</div>
|
||||
</ValidateColumnValueWrapper>
|
||||
),
|
||||
|
||||
@@ -121,25 +121,23 @@ const InfinityTable = forwardRef<TableVirtuosoHandle, InfinityTableProps>(
|
||||
const tableHeader = useCallback(
|
||||
() => (
|
||||
<tr>
|
||||
{tableColumns
|
||||
.filter((column) => column.key)
|
||||
.map((column) => {
|
||||
const isDragColumn = column.key !== 'expand';
|
||||
{tableColumns.map((column) => {
|
||||
const isDragColumn = column.key !== 'expand';
|
||||
|
||||
return (
|
||||
<TableHeaderCellStyled
|
||||
$isLogIndicator={column.key === 'state-indicator'}
|
||||
$isDarkMode={isDarkMode}
|
||||
$isDragColumn={isDragColumn}
|
||||
key={column.key}
|
||||
fontSize={tableViewProps?.fontSize}
|
||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||
{...(isDragColumn && { className: 'dragHandler' })}
|
||||
>
|
||||
{(column.title as string).replace(/^\w/, (c) => c.toUpperCase())}
|
||||
</TableHeaderCellStyled>
|
||||
);
|
||||
})}
|
||||
return (
|
||||
<TableHeaderCellStyled
|
||||
$isLogIndicator={column.key === 'state-indicator'}
|
||||
$isDarkMode={isDarkMode}
|
||||
$isDragColumn={isDragColumn}
|
||||
key={column.key}
|
||||
fontSize={tableViewProps?.fontSize}
|
||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||
{...(isDragColumn && { className: 'dragHandler' })}
|
||||
>
|
||||
{(column.title as string).replace(/^\w/, (c) => c.toUpperCase())}
|
||||
</TableHeaderCellStyled>
|
||||
);
|
||||
})}
|
||||
</tr>
|
||||
),
|
||||
[tableColumns, isDarkMode, tableViewProps?.fontSize],
|
||||
|
||||
@@ -29,7 +29,7 @@ export const TableCellStyled = styled.td<TableHeaderCellStyledProps>`
|
||||
props.$isDarkMode ? 'inherit' : themeColors.whiteCream};
|
||||
|
||||
${({ $isLogIndicator }): string =>
|
||||
$isLogIndicator ? 'padding: 0 0 0 8px;width: 15px;' : ''}
|
||||
$isLogIndicator ? 'padding: 0 0 0 8px;' : ''}
|
||||
color: ${(props): string =>
|
||||
props.$isDarkMode ? themeColors.white : themeColors.bckgGrey};
|
||||
`;
|
||||
|
||||
@@ -76,7 +76,6 @@ receivers:
|
||||
azureeventhub:
|
||||
connection: Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
format: "azure"
|
||||
apply_semantic_conventions: true
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
|
||||
@@ -22,7 +22,6 @@ receivers:
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
apply_semantic_conventions: true
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
|
||||
@@ -22,7 +22,6 @@ receivers:
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
apply_semantic_conventions: true
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
|
||||
@@ -22,7 +22,6 @@ receivers:
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
apply_semantic_conventions: true
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
|
||||
@@ -22,7 +22,6 @@ receivers:
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
apply_semantic_conventions: true
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
|
||||
@@ -22,7 +22,6 @@ receivers:
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
apply_semantic_conventions: true
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
|
||||
@@ -22,7 +22,6 @@ receivers:
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
apply_semantic_conventions: true
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
|
||||
@@ -5,26 +5,7 @@ import { FontSize, OptionsQuery } from './types';
|
||||
export const URL_OPTIONS = 'options';
|
||||
|
||||
export const defaultOptionsQuery: OptionsQuery = {
|
||||
selectColumns: [
|
||||
{
|
||||
key: 'timestamp',
|
||||
dataType: DataTypes.String,
|
||||
type: 'tag',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
id: 'timestamp--string--tag--true',
|
||||
isIndexed: false,
|
||||
},
|
||||
{
|
||||
key: 'body',
|
||||
dataType: DataTypes.String,
|
||||
type: 'tag',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
id: 'body--string--tag--true',
|
||||
isIndexed: false,
|
||||
},
|
||||
],
|
||||
selectColumns: [],
|
||||
maxLines: 2,
|
||||
format: 'raw',
|
||||
fontSize: FontSize.SMALL,
|
||||
|
||||
@@ -169,15 +169,6 @@ const useOptionsMenu = ({
|
||||
|
||||
const searchedAttributeKeys = useMemo(() => {
|
||||
if (searchedAttributesData?.payload?.attributeKeys?.length) {
|
||||
if (dataSource === DataSource.LOGS) {
|
||||
// add timestamp and body to the list of attributes
|
||||
return [
|
||||
...defaultOptionsQuery.selectColumns,
|
||||
...searchedAttributesData.payload.attributeKeys.filter(
|
||||
(attribute) => attribute.key !== 'body',
|
||||
),
|
||||
];
|
||||
}
|
||||
return searchedAttributesData.payload.attributeKeys;
|
||||
}
|
||||
if (dataSource === DataSource.TRACES) {
|
||||
@@ -207,17 +198,12 @@ const useOptionsMenu = ({
|
||||
);
|
||||
|
||||
const optionsFromAttributeKeys = useMemo(() => {
|
||||
const filteredAttributeKeys = searchedAttributeKeys.filter((item) => {
|
||||
// For other data sources, only filter out 'body' if it exists
|
||||
if (dataSource !== DataSource.LOGS) {
|
||||
return item.key !== 'body';
|
||||
}
|
||||
// For LOGS, keep all keys
|
||||
return true;
|
||||
});
|
||||
const filteredAttributeKeys = searchedAttributeKeys.filter(
|
||||
(item) => item.key !== 'body',
|
||||
);
|
||||
|
||||
return getOptionsFromKeys(filteredAttributeKeys, selectedColumnKeys);
|
||||
}, [dataSource, searchedAttributeKeys, selectedColumnKeys]);
|
||||
}, [searchedAttributeKeys, selectedColumnKeys]);
|
||||
|
||||
const handleRedirectWithOptionsData = useCallback(
|
||||
(newQueryData: OptionsQuery) => {
|
||||
|
||||
@@ -95,7 +95,6 @@ function QueryBuilderSearch({
|
||||
isMulti,
|
||||
isFetching,
|
||||
setSearchKey,
|
||||
setSearchValue,
|
||||
searchKey,
|
||||
key,
|
||||
exampleQueries,
|
||||
@@ -146,11 +145,7 @@ function QueryBuilderSearch({
|
||||
|
||||
const tagEditHandler = (value: string): void => {
|
||||
updateTag(value);
|
||||
if (isInfraMonitoring) {
|
||||
setSearchValue(value);
|
||||
} else {
|
||||
handleSearch(value);
|
||||
}
|
||||
handleSearch(value);
|
||||
};
|
||||
|
||||
const isDisabled = !!searchValue;
|
||||
|
||||
@@ -153,7 +153,6 @@ export const useAutoComplete = (
|
||||
isMulti,
|
||||
isFetching,
|
||||
setSearchKey,
|
||||
setSearchValue,
|
||||
searchKey,
|
||||
key,
|
||||
exampleQueries,
|
||||
@@ -173,7 +172,6 @@ interface IAutoComplete {
|
||||
isMulti: boolean;
|
||||
isFetching: boolean;
|
||||
setSearchKey: (value: string) => void;
|
||||
setSearchValue: (value: string) => void;
|
||||
searchKey: string;
|
||||
key: string;
|
||||
exampleQueries: TagFilter[];
|
||||
|
||||
@@ -5,12 +5,12 @@ import { TabRoutes } from 'components/RouteTab/types';
|
||||
import history from 'lib/history';
|
||||
import { useLocation } from 'react-use';
|
||||
|
||||
import { Hosts, Kubernetes } from './constants';
|
||||
import { Hosts } from './constants';
|
||||
|
||||
export default function InfrastructureMonitoringPage(): JSX.Element {
|
||||
const { pathname } = useLocation();
|
||||
|
||||
const routes: TabRoutes[] = [Hosts, Kubernetes];
|
||||
const routes: TabRoutes[] = [Hosts];
|
||||
|
||||
return (
|
||||
<div className="infra-monitoring-module-container">
|
||||
|
||||
14
pkg/cache/config.go
vendored
14
pkg/cache/config.go
vendored
@@ -4,9 +4,12 @@ import (
|
||||
"time"
|
||||
|
||||
go_cache "github.com/patrickmn/go-cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/confmap"
|
||||
)
|
||||
|
||||
// Config satisfies the confmap.Config interface
|
||||
var _ confmap.Config = (*Config)(nil)
|
||||
|
||||
type Memory struct {
|
||||
TTL time.Duration `mapstructure:"ttl"`
|
||||
CleanupInterval time.Duration `mapstructure:"cleanupInterval"`
|
||||
@@ -25,11 +28,7 @@ type Config struct {
|
||||
Redis Redis `mapstructure:"redis"`
|
||||
}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("cache"), newConfig)
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
func (c *Config) NewWithDefaults() confmap.Config {
|
||||
return &Config{
|
||||
Provider: "memory",
|
||||
Memory: Memory{
|
||||
@@ -43,9 +42,8 @@ func newConfig() factory.Config {
|
||||
DB: 0,
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
func (c *Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
29
pkg/cache/memorycache/provider.go
vendored
29
pkg/cache/memorycache/provider.go
vendored
@@ -7,20 +7,15 @@ import (
|
||||
"time"
|
||||
|
||||
go_cache "github.com/patrickmn/go-cache"
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
_cache "go.signoz.io/signoz/pkg/cache"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
cc *go_cache.Cache
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("memory"), New)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config cache.Config) (cache.Cache, error) {
|
||||
return &provider{cc: go_cache.New(config.Memory.TTL, config.Memory.CleanupInterval)}, nil
|
||||
func New(opts *_cache.Memory) *provider {
|
||||
return &provider{cc: go_cache.New(opts.TTL, opts.CleanupInterval)}
|
||||
}
|
||||
|
||||
// Connect does nothing
|
||||
@@ -29,11 +24,11 @@ func (c *provider) Connect(_ context.Context) error {
|
||||
}
|
||||
|
||||
// Store stores the data in the cache
|
||||
func (c *provider) Store(_ context.Context, cacheKey string, data cache.CacheableEntity, ttl time.Duration) error {
|
||||
func (c *provider) Store(_ context.Context, cacheKey string, data _cache.CacheableEntity, ttl time.Duration) error {
|
||||
// check if the data being passed is a pointer and is not nil
|
||||
rv := reflect.ValueOf(data)
|
||||
if rv.Kind() != reflect.Pointer || rv.IsNil() {
|
||||
return cache.WrapCacheableEntityErrors(reflect.TypeOf(data), "inmemory")
|
||||
return _cache.WrapCacheableEntityErrors(reflect.TypeOf(data), "inmemory")
|
||||
}
|
||||
|
||||
c.cc.Set(cacheKey, data, ttl)
|
||||
@@ -41,32 +36,32 @@ func (c *provider) Store(_ context.Context, cacheKey string, data cache.Cacheabl
|
||||
}
|
||||
|
||||
// Retrieve retrieves the data from the cache
|
||||
func (c *provider) Retrieve(_ context.Context, cacheKey string, dest cache.CacheableEntity, allowExpired bool) (cache.RetrieveStatus, error) {
|
||||
func (c *provider) Retrieve(_ context.Context, cacheKey string, dest _cache.CacheableEntity, allowExpired bool) (_cache.RetrieveStatus, error) {
|
||||
// check if the destination being passed is a pointer and is not nil
|
||||
dstv := reflect.ValueOf(dest)
|
||||
if dstv.Kind() != reflect.Pointer || dstv.IsNil() {
|
||||
return cache.RetrieveStatusError, cache.WrapCacheableEntityErrors(reflect.TypeOf(dest), "inmemory")
|
||||
return _cache.RetrieveStatusError, _cache.WrapCacheableEntityErrors(reflect.TypeOf(dest), "inmemory")
|
||||
}
|
||||
|
||||
// check if the destination value is settable
|
||||
if !dstv.Elem().CanSet() {
|
||||
return cache.RetrieveStatusError, fmt.Errorf("destination value is not settable, %s", dstv.Elem())
|
||||
return _cache.RetrieveStatusError, fmt.Errorf("destination value is not settable, %s", dstv.Elem())
|
||||
}
|
||||
|
||||
data, found := c.cc.Get(cacheKey)
|
||||
if !found {
|
||||
return cache.RetrieveStatusKeyMiss, nil
|
||||
return _cache.RetrieveStatusKeyMiss, nil
|
||||
}
|
||||
|
||||
// check the type compatbility between the src and dest
|
||||
srcv := reflect.ValueOf(data)
|
||||
if !srcv.Type().AssignableTo(dstv.Type()) {
|
||||
return cache.RetrieveStatusError, fmt.Errorf("src type is not assignable to dst type")
|
||||
return _cache.RetrieveStatusError, fmt.Errorf("src type is not assignable to dst type")
|
||||
}
|
||||
|
||||
// set the value to from src to dest
|
||||
dstv.Elem().Set(srcv.Elem())
|
||||
return cache.RetrieveStatusHit, nil
|
||||
return _cache.RetrieveStatusHit, nil
|
||||
}
|
||||
|
||||
// SetTTL sets the TTL for the cache entry
|
||||
@@ -96,6 +91,6 @@ func (c *provider) Close(_ context.Context) error {
|
||||
}
|
||||
|
||||
// Configuration returns the cache configuration
|
||||
func (c *provider) Configuration() *cache.Memory {
|
||||
func (c *provider) Configuration() *_cache.Memory {
|
||||
return nil
|
||||
}
|
||||
|
||||
84
pkg/cache/memorycache/provider_test.go
vendored
84
pkg/cache/memorycache/provider_test.go
vendored
@@ -7,21 +7,18 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory/providertest"
|
||||
_cache "go.signoz.io/signoz/pkg/cache"
|
||||
)
|
||||
|
||||
// TestNew tests the New function
|
||||
func TestNew(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
assert.NotNil(t, c)
|
||||
assert.NotNil(t, c.(*provider).cc)
|
||||
assert.NotNil(t, c.cc)
|
||||
assert.NoError(t, c.Connect(context.Background()))
|
||||
}
|
||||
|
||||
@@ -56,35 +53,32 @@ func (dce DCacheableEntity) UnmarshalBinary(data []byte) error {
|
||||
// TestStore tests the Store function
|
||||
// this should fail because of nil pointer error
|
||||
func TestStoreWithNilPointer(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
var storeCacheableEntity *CacheableEntity
|
||||
assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
|
||||
}
|
||||
|
||||
// this should fail because of no pointer error
|
||||
func TestStoreWithStruct(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
var storeCacheableEntity CacheableEntity
|
||||
assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
|
||||
}
|
||||
|
||||
func TestStoreWithNonNilPointer(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -95,12 +89,11 @@ func TestStoreWithNonNilPointer(t *testing.T) {
|
||||
|
||||
// TestRetrieve tests the Retrieve function
|
||||
func TestRetrieveWithNilPointer(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -112,16 +105,15 @@ func TestRetrieveWithNilPointer(t *testing.T) {
|
||||
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError)
|
||||
}
|
||||
|
||||
func TestRetrieveWitNonPointer(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -133,16 +125,15 @@ func TestRetrieveWitNonPointer(t *testing.T) {
|
||||
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError)
|
||||
}
|
||||
|
||||
func TestRetrieveWithDifferentTypes(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -153,16 +144,15 @@ func TestRetrieveWithDifferentTypes(t *testing.T) {
|
||||
retrieveCacheableEntity := new(DCacheableEntity)
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError)
|
||||
}
|
||||
|
||||
func TestRetrieveWithSameTypes(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -173,14 +163,13 @@ func TestRetrieveWithSameTypes(t *testing.T) {
|
||||
retrieveCacheableEntity := new(CacheableEntity)
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit)
|
||||
assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity)
|
||||
}
|
||||
|
||||
// TestSetTTL tests the SetTTL function
|
||||
func TestSetTTL(t *testing.T) {
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second}})
|
||||
require.NoError(t, err)
|
||||
c := New(&_cache.Memory{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second})
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -191,7 +180,7 @@ func TestSetTTL(t *testing.T) {
|
||||
time.Sleep(3 * time.Second)
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
|
||||
|
||||
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 2*time.Second))
|
||||
@@ -199,18 +188,17 @@ func TestSetTTL(t *testing.T) {
|
||||
time.Sleep(3 * time.Second)
|
||||
retrieveStatus, err = c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit)
|
||||
assert.Equal(t, retrieveCacheableEntity, storeCacheableEntity)
|
||||
}
|
||||
|
||||
// TestRemove tests the Remove function
|
||||
func TestRemove(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -222,18 +210,17 @@ func TestRemove(t *testing.T) {
|
||||
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
|
||||
}
|
||||
|
||||
// TestBulkRemove tests the BulkRemove function
|
||||
func TestBulkRemove(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -246,23 +233,22 @@ func TestBulkRemove(t *testing.T) {
|
||||
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key1", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
|
||||
|
||||
retrieveStatus, err = c.Retrieve(context.Background(), "key2", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
|
||||
}
|
||||
|
||||
// TestCache tests the cache
|
||||
func TestCache(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c := New(opts)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -272,7 +258,7 @@ func TestCache(t *testing.T) {
|
||||
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit)
|
||||
assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity)
|
||||
c.Remove(context.Background(), "key")
|
||||
}
|
||||
|
||||
28
pkg/cache/rediscache/provider.go
vendored
28
pkg/cache/rediscache/provider.go
vendored
@@ -7,22 +7,17 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
_cache "go.signoz.io/signoz/pkg/cache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
client *redis.Client
|
||||
opts cache.Redis
|
||||
opts *_cache.Redis
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("redis"), New)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config cache.Config) (cache.Cache, error) {
|
||||
return &provider{opts: config.Redis}, nil
|
||||
func New(opts *_cache.Redis) *provider {
|
||||
return &provider{opts: opts}
|
||||
}
|
||||
|
||||
// WithClient creates a new cache with the given client
|
||||
@@ -41,20 +36,20 @@ func (c *provider) Connect(_ context.Context) error {
|
||||
}
|
||||
|
||||
// Store stores the data in the cache
|
||||
func (c *provider) Store(ctx context.Context, cacheKey string, data cache.CacheableEntity, ttl time.Duration) error {
|
||||
func (c *provider) Store(ctx context.Context, cacheKey string, data _cache.CacheableEntity, ttl time.Duration) error {
|
||||
return c.client.Set(ctx, cacheKey, data, ttl).Err()
|
||||
}
|
||||
|
||||
// Retrieve retrieves the data from the cache
|
||||
func (c *provider) Retrieve(ctx context.Context, cacheKey string, dest cache.CacheableEntity, allowExpired bool) (cache.RetrieveStatus, error) {
|
||||
func (c *provider) Retrieve(ctx context.Context, cacheKey string, dest _cache.CacheableEntity, allowExpired bool) (_cache.RetrieveStatus, error) {
|
||||
err := c.client.Get(ctx, cacheKey).Scan(dest)
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
return cache.RetrieveStatusKeyMiss, nil
|
||||
return _cache.RetrieveStatusKeyMiss, nil
|
||||
}
|
||||
return cache.RetrieveStatusError, err
|
||||
return _cache.RetrieveStatusError, err
|
||||
}
|
||||
return cache.RetrieveStatusHit, nil
|
||||
return _cache.RetrieveStatusHit, nil
|
||||
}
|
||||
|
||||
// SetTTL sets the TTL for the cache entry
|
||||
@@ -92,6 +87,11 @@ func (c *provider) GetClient() *redis.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
// GetOptions returns the options
|
||||
func (c *provider) GetOptions() *_cache.Redis {
|
||||
return c.opts
|
||||
}
|
||||
|
||||
// GetTTL returns the TTL for the cache entry
|
||||
func (c *provider) GetTTL(ctx context.Context, cacheKey string) time.Duration {
|
||||
ttl, err := c.client.TTL(ctx, cacheKey).Result()
|
||||
|
||||
@@ -30,18 +30,6 @@ func TestGetWithStrings(t *testing.T) {
|
||||
assert.Equal(t, expected, actual.All())
|
||||
}
|
||||
|
||||
func TestGetWithNoPrefix(t *testing.T) {
|
||||
t.Setenv("K1_K2", "string")
|
||||
t.Setenv("K3_K4", "string")
|
||||
expected := map[string]any{}
|
||||
|
||||
provider := New(config.ProviderConfig{})
|
||||
actual, err := provider.Get(context.Background(), config.MustNewUri("env:"))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual.All())
|
||||
}
|
||||
|
||||
func TestGetWithGoTypes(t *testing.T) {
|
||||
t.Setenv("SIGNOZ_BOOL", "true")
|
||||
t.Setenv("SIGNOZ_STRING", "string")
|
||||
|
||||
9
pkg/confmap/config.go
Normal file
9
pkg/confmap/config.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package confmap
|
||||
|
||||
// Config is an interface that defines methods for creating and validating configurations.
|
||||
type Config interface {
|
||||
// New creates a new instance of the configuration with default values.
|
||||
NewWithDefaults() Config
|
||||
// Validate the configuration and returns an error if invalid.
|
||||
Validate() error
|
||||
}
|
||||
3
pkg/confmap/doc.go
Normal file
3
pkg/confmap/doc.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// Package confmap is a wrapper on top of the confmap defined here:
|
||||
// https://github.com/open-telemetry/opentelemetry-collector/blob/main/otelcol/configprovider.go/
|
||||
package confmap
|
||||
94
pkg/confmap/provider/signozenvprovider/provider.go
Normal file
94
pkg/confmap/provider/signozenvprovider/provider.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package signozenvprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
"go.uber.org/zap"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
schemeName string = "signozenv"
|
||||
envPrefix string = "signoz"
|
||||
separator string = "__"
|
||||
envPrefixWithOneSeparator string = "signoz_"
|
||||
envRegexString string = `^[a-zA-Z][a-zA-Z0-9_]*$`
|
||||
)
|
||||
|
||||
var (
|
||||
envRegex = regexp.MustCompile(envRegexString)
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewFactory returns a factory for a confmap.Provider that reads the configuration from the environment.
|
||||
// All variables starting with `SIGNOZ__` are read from the environment.
|
||||
// The separator is `__` (2 underscores) in order to incorporate env variables having keys with a single `_`
|
||||
func NewFactory() confmap.ProviderFactory {
|
||||
return confmap.NewProviderFactory(newProvider)
|
||||
}
|
||||
|
||||
func newProvider(settings confmap.ProviderSettings) confmap.Provider {
|
||||
return &provider{
|
||||
logger: settings.Logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) {
|
||||
if !strings.HasPrefix(uri, schemeName+":") {
|
||||
return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName)
|
||||
}
|
||||
|
||||
// Read and Sort environment variables for consistent output
|
||||
envvars := os.Environ()
|
||||
sort.Strings(envvars)
|
||||
|
||||
// Create a map m containing key value pairs
|
||||
m := make(map[string]any)
|
||||
for _, envvar := range envvars {
|
||||
parts := strings.SplitN(envvar, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
key := strings.ToLower(parts[0])
|
||||
val := parts[1]
|
||||
|
||||
if strings.HasPrefix(key, envPrefixWithOneSeparator) {
|
||||
// Remove the envPrefix from the key
|
||||
key = strings.Replace(key, envPrefix+separator, "", 1)
|
||||
|
||||
// Check whether the resulting key matches with the regex
|
||||
if !envRegex.MatchString(key) {
|
||||
provider.logger.Warn("Configuration references invalid environment variable key", zap.String("key", key))
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert key into yaml format
|
||||
key = strings.ToLower(strings.ReplaceAll(key, separator, confmap.KeyDelimiter))
|
||||
m[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
out, err := yaml.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return confmap.NewRetrievedFromYAML(out)
|
||||
}
|
||||
|
||||
func (*provider) Scheme() string {
|
||||
return schemeName
|
||||
}
|
||||
|
||||
func (*provider) Shutdown(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
40
pkg/confmap/provider/signozenvprovider/provider_test.go
Normal file
40
pkg/confmap/provider/signozenvprovider/provider_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package signozenvprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
"go.opentelemetry.io/collector/confmap/confmaptest"
|
||||
)
|
||||
|
||||
func createProvider() confmap.Provider {
|
||||
return NewFactory().Create(confmaptest.NewNopProviderSettings())
|
||||
}
|
||||
|
||||
func TestValidateProviderScheme(t *testing.T) {
|
||||
assert.NoError(t, confmaptest.ValidateProviderScheme(createProvider()))
|
||||
}
|
||||
|
||||
func TestRetrieve(t *testing.T) {
|
||||
t.Setenv("SIGNOZ__STORAGE__DSN", "localhost:9000")
|
||||
t.Setenv("SIGNOZ__SIGNOZ_ENABLED", "true")
|
||||
t.Setenv("SIGNOZ__INSTRUMENTATION__LOGS__ENABLED", "true")
|
||||
expected := confmap.NewFromStringMap(map[string]any{
|
||||
"storage::dsn": "localhost:9000",
|
||||
"signoz_enabled": "true",
|
||||
"instrumentation::logs::enabled": "true",
|
||||
})
|
||||
|
||||
signoz := createProvider()
|
||||
retrieved, err := signoz.Retrieve(context.Background(), schemeName+":", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := retrieved.AsConf()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected.ToStringMap(), actual.ToStringMap())
|
||||
assert.NoError(t, signoz.Shutdown(context.Background()))
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package factory
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
import "context"
|
||||
|
||||
type Provider = any
|
||||
|
||||
@@ -23,17 +21,10 @@ func (factory *providerFactory[P, C]) Name() Name {
|
||||
return factory.name
|
||||
}
|
||||
|
||||
func (factory *providerFactory[P, C]) New(ctx context.Context, settings ProviderSettings, config C) (p P, err error) {
|
||||
provider, err := factory.newProviderFunc(ctx, settings, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
p = provider
|
||||
return
|
||||
func (factory *providerFactory[P, C]) New(ctx context.Context, settings ProviderSettings, config C) (P, error) {
|
||||
return factory.newProviderFunc(ctx, settings, config)
|
||||
}
|
||||
|
||||
// NewProviderFactory creates a new provider factory.
|
||||
func NewProviderFactory[P Provider, C Config](name Name, newProviderFunc NewProviderFunc[P, C]) ProviderFactory[P, C] {
|
||||
return &providerFactory[P, C]{
|
||||
name: name,
|
||||
@@ -41,8 +32,7 @@ func NewProviderFactory[P Provider, C Config](name Name, newProviderFunc NewProv
|
||||
}
|
||||
}
|
||||
|
||||
// NewProviderFromNamedMap creates a new provider from a factory based on the input key.
|
||||
func NewProviderFromNamedMap[P Provider, C Config](ctx context.Context, settings ProviderSettings, config C, factories NamedMap[ProviderFactory[P, C]], key string) (p P, err error) {
|
||||
func NewFromFactory[P Provider, C Config](ctx context.Context, settings ProviderSettings, config C, factories NamedMap[ProviderFactory[P, C]], key string) (p P, err error) {
|
||||
providerFactory, err := factories.Get(key)
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -32,10 +32,10 @@ func TestNewProviderFactoryFromFactory(t *testing.T) {
|
||||
|
||||
m := MustNewNamedMap(pf)
|
||||
assert.Equal(t, MustNewName("p1"), pf.Name())
|
||||
p, err := NewProviderFromNamedMap(context.Background(), ProviderSettings{}, pc1{}, m, "p1")
|
||||
p, err := NewFromFactory(context.Background(), ProviderSettings{}, pc1{}, m, "p1")
|
||||
assert.NoError(t, err)
|
||||
assert.IsType(t, p1{}, p)
|
||||
|
||||
_, err = NewProviderFromNamedMap(context.Background(), ProviderSettings{}, pc1{}, m, "p2")
|
||||
_, err = NewFromFactory(context.Background(), ProviderSettings{}, pc1{}, m, "p2")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
package providertest
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/instrumentation/instrumentationtest"
|
||||
)
|
||||
|
||||
func NewSettings() factory.ProviderSettings {
|
||||
return instrumentationtest.New().ToProviderSettings()
|
||||
}
|
||||
@@ -1,5 +1,12 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/confmap"
|
||||
)
|
||||
|
||||
// Config satisfies the confmap.Config interface
|
||||
var _ confmap.Config = (*Config)(nil)
|
||||
|
||||
// Config holds the configuration for http.
|
||||
type Config struct {
|
||||
//Address specifies the TCP address for the server to listen on, in the form "host:port".
|
||||
@@ -7,3 +14,14 @@ type Config struct {
|
||||
// See net.Dial for details of the address format.
|
||||
Address string `mapstructure:"address"`
|
||||
}
|
||||
|
||||
func (c *Config) NewWithDefaults() confmap.Config {
|
||||
return &Config{
|
||||
Address: "0.0.0.0:8080",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,20 +6,21 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/registry"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var _ factory.Service = (*Server)(nil)
|
||||
var _ registry.NamedService = (*Server)(nil)
|
||||
|
||||
type Server struct {
|
||||
srv *http.Server
|
||||
logger *zap.Logger
|
||||
handler http.Handler
|
||||
cfg Config
|
||||
name string
|
||||
}
|
||||
|
||||
func New(logger *zap.Logger, cfg Config, handler http.Handler) (*Server, error) {
|
||||
func New(logger *zap.Logger, name string, cfg Config, handler http.Handler) (*Server, error) {
|
||||
if handler == nil {
|
||||
return nil, fmt.Errorf("cannot build http server, handler is required")
|
||||
}
|
||||
@@ -28,6 +29,10 @@ func New(logger *zap.Logger, cfg Config, handler http.Handler) (*Server, error)
|
||||
return nil, fmt.Errorf("cannot build http server, logger is required")
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("cannot build http server, name is required")
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: cfg.Address,
|
||||
Handler: handler,
|
||||
@@ -41,9 +46,14 @@ func New(logger *zap.Logger, cfg Config, handler http.Handler) (*Server, error)
|
||||
logger: logger.Named("go.signoz.io/pkg/http/server"),
|
||||
handler: handler,
|
||||
cfg: cfg,
|
||||
name: name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (server *Server) Name() string {
|
||||
return server.name
|
||||
}
|
||||
|
||||
func (server *Server) Start(ctx context.Context) error {
|
||||
server.logger.Info("starting http server", zap.String("address", server.srv.Addr))
|
||||
if err := server.srv.ListenAndServe(); err != nil {
|
||||
|
||||
@@ -218,7 +218,6 @@ func NewReaderFromClickhouseConnection(
|
||||
MaxBytesToRead: os.Getenv("ClickHouseMaxBytesToRead"),
|
||||
OptimizeReadInOrderRegex: os.Getenv("ClickHouseOptimizeReadInOrderRegex"),
|
||||
OptimizeReadInOrderRegexCompiled: regexCompiled,
|
||||
MaxResultRowsForCHQuery: constants.MaxResultRowsForCHQuery,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -4199,26 +4198,9 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([
|
||||
var t time.Time
|
||||
for idx, v := range vars {
|
||||
if columnNames[idx] == "timestamp" {
|
||||
switch v := v.(type) {
|
||||
case *uint64:
|
||||
t = time.Unix(0, int64(*v))
|
||||
case *time.Time:
|
||||
t = *v
|
||||
}
|
||||
t = time.Unix(0, int64(*v.(*uint64)))
|
||||
} else if columnNames[idx] == "timestamp_datetime" {
|
||||
t = *v.(*time.Time)
|
||||
} else if columnNames[idx] == "events" {
|
||||
var events []map[string]interface{}
|
||||
eventsFromDB, ok := v.(*[]string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, event := range *eventsFromDB {
|
||||
var eventMap map[string]interface{}
|
||||
json.Unmarshal([]byte(event), &eventMap)
|
||||
events = append(events, eventMap)
|
||||
}
|
||||
row[columnNames[idx]] = events
|
||||
} else {
|
||||
row[columnNames[idx]] = v
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ type ClickhouseQuerySettings struct {
|
||||
MaxBytesToRead string
|
||||
OptimizeReadInOrderRegex string
|
||||
OptimizeReadInOrderRegexCompiled *regexp.Regexp
|
||||
MaxResultRowsForCHQuery int
|
||||
}
|
||||
|
||||
type clickhouseConnWrapper struct {
|
||||
@@ -45,10 +44,6 @@ func (c clickhouseConnWrapper) addClickHouseSettings(ctx context.Context, query
|
||||
settings["log_comment"] = logComment
|
||||
}
|
||||
|
||||
if ctx.Value("enforce_max_result_rows") != nil {
|
||||
settings["max_result_rows"] = c.settings.MaxResultRowsForCHQuery
|
||||
}
|
||||
|
||||
if c.settings.MaxBytesToRead != "" {
|
||||
settings["max_bytes_to_read"] = c.settings.MaxBytesToRead
|
||||
}
|
||||
|
||||
@@ -288,7 +288,11 @@ func GetDashboard(ctx context.Context, uuid string) (*Dashboard, *model.ApiError
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no dashboard found with uuid: %s", uuid)}
|
||||
}
|
||||
|
||||
|
||||
if dashboard.Data["title"] == "Ingestion" && dashboard.Data["description"] != nil {
|
||||
dashboard.Data["description"] = "This dashboard is deprecated. Please use the new Ingestion V2 dashboard. " + dashboard.Data["description"].(string)
|
||||
}
|
||||
|
||||
return &dashboard, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -338,7 +338,5 @@ func (p *ClustersRepo) GetClusterList(ctx context.Context, req model.ClusterList
|
||||
resp.Total = len(allClusterGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -440,7 +440,5 @@ func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, req model.DaemonS
|
||||
resp.Total = len(allDaemonSetGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -440,7 +440,5 @@ func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, req model.Deplo
|
||||
resp.Total = len(allDeploymentGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -494,7 +494,5 @@ func (d *JobsRepo) GetJobList(ctx context.Context, req model.JobListRequest) (mo
|
||||
resp.Total = len(allJobGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -341,7 +341,5 @@ func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.Namespa
|
||||
resp.Total = len(allNamespaceGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ var (
|
||||
|
||||
nodeAttrsToEnrich = []string{"k8s_node_name", "k8s_node_uid", "k8s_cluster_name"}
|
||||
|
||||
k8sNodeGroupAttrKey = "k8s_node_name"
|
||||
k8sNodeUIDAttrKey = "k8s_node_uid"
|
||||
|
||||
queryNamesForNodes = map[string][]string{
|
||||
"cpu": {"A"},
|
||||
@@ -125,7 +125,7 @@ func (p *NodesRepo) getMetadataAttributes(ctx context.Context, req model.NodeLis
|
||||
}
|
||||
}
|
||||
|
||||
nodeUID := stringData[k8sNodeGroupAttrKey]
|
||||
nodeUID := stringData[k8sNodeUIDAttrKey]
|
||||
if _, ok := nodeAttrs[nodeUID]; !ok {
|
||||
nodeAttrs[nodeUID] = map[string]string{}
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
|
||||
}
|
||||
|
||||
if req.GroupBy == nil {
|
||||
req.GroupBy = []v3.AttributeKey{{Key: k8sNodeGroupAttrKey}}
|
||||
req.GroupBy = []v3.AttributeKey{{Key: k8sNodeUIDAttrKey}}
|
||||
resp.Type = model.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = model.ResponseTypeGroupedList
|
||||
@@ -306,7 +306,7 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
|
||||
NodeMemoryAllocatable: -1,
|
||||
}
|
||||
|
||||
if nodeUID, ok := row.Data[k8sNodeGroupAttrKey].(string); ok {
|
||||
if nodeUID, ok := row.Data[k8sNodeUIDAttrKey].(string); ok {
|
||||
record.NodeUID = nodeUID
|
||||
}
|
||||
|
||||
@@ -354,6 +354,5 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
|
||||
resp.Total = len(allNodeGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -46,7 +46,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -72,7 +72,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -98,7 +98,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -132,7 +132,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -166,7 +166,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
|
||||
@@ -404,7 +404,5 @@ func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (mo
|
||||
resp.Total = len(allPodGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -374,7 +374,5 @@ func (p *PvcsRepo) GetPvcList(ctx context.Context, req model.VolumeListRequest)
|
||||
resp.Total = len(allVolumeGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -440,7 +440,5 @@ func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, req model.Sta
|
||||
resp.Total = len(allStatefulSetGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -167,22 +167,6 @@ func jsonFilterEnrich(filter v3.FilterItem) v3.FilterItem {
|
||||
// check if the value is a int, float, string, bool
|
||||
valueType := ""
|
||||
switch filter.Value.(type) {
|
||||
// even the filter value is an array the actual type of the value is string.
|
||||
case []interface{}:
|
||||
// check first value type in array and use that
|
||||
if len(filter.Value.([]interface{})) > 0 {
|
||||
firstVal := filter.Value.([]interface{})[0]
|
||||
switch firstVal.(type) {
|
||||
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64:
|
||||
valueType = "int64"
|
||||
case float32, float64:
|
||||
valueType = "float64"
|
||||
case bool:
|
||||
valueType = "bool"
|
||||
default:
|
||||
valueType = "string"
|
||||
}
|
||||
}
|
||||
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64:
|
||||
valueType = "int64"
|
||||
case float32, float64:
|
||||
|
||||
@@ -563,50 +563,6 @@ var testJSONFilterEnrichData = []struct {
|
||||
Value: 10.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "check IN",
|
||||
Filter: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.attr",
|
||||
DataType: v3.AttributeKeyDataTypeUnspecified,
|
||||
Type: v3.AttributeKeyTypeUnspecified,
|
||||
},
|
||||
Operator: "IN",
|
||||
Value: []interface{}{"hello", "world"},
|
||||
},
|
||||
Result: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.attr",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeUnspecified,
|
||||
IsJSON: true,
|
||||
},
|
||||
Operator: "IN",
|
||||
Value: []interface{}{"hello", "world"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "check NOT_IN",
|
||||
Filter: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.attr",
|
||||
DataType: v3.AttributeKeyDataTypeUnspecified,
|
||||
Type: v3.AttributeKeyTypeUnspecified,
|
||||
},
|
||||
Operator: "NOT_IN",
|
||||
Value: []interface{}{10, 20},
|
||||
},
|
||||
Result: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.attr",
|
||||
DataType: v3.AttributeKeyDataTypeInt64,
|
||||
Type: v3.AttributeKeyTypeUnspecified,
|
||||
IsJSON: true,
|
||||
},
|
||||
Operator: "NOT_IN",
|
||||
Value: []interface{}{10, 20},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestJsonEnrich(t *testing.T) {
|
||||
|
||||
@@ -183,71 +183,6 @@ var testGetJSONFilterData = []struct {
|
||||
},
|
||||
Filter: "lower(body) like lower('%message%') AND JSON_EXISTS(body, '$.\"message\"')",
|
||||
},
|
||||
{
|
||||
Name: "test json in array string",
|
||||
FilterItem: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.name",
|
||||
DataType: "string",
|
||||
IsJSON: true,
|
||||
},
|
||||
Operator: "in",
|
||||
Value: []interface{}{"hello", "world"},
|
||||
},
|
||||
Filter: "lower(body) like lower('%name%') AND JSON_EXISTS(body, '$.\"name\"') AND JSON_VALUE(body, '$.\"name\"') IN ['hello','world']",
|
||||
},
|
||||
{
|
||||
Name: "test json in array number",
|
||||
FilterItem: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.value",
|
||||
DataType: "int64",
|
||||
IsJSON: true,
|
||||
},
|
||||
Operator: "in",
|
||||
Value: []interface{}{10, 11},
|
||||
},
|
||||
Filter: "lower(body) like lower('%value%') AND JSON_EXISTS(body, '$.\"value\"') AND JSONExtract(JSON_VALUE(body, '$.\"value\"'), 'Int64') IN [10,11]",
|
||||
},
|
||||
{
|
||||
Name: "test json in array mixed data- allow",
|
||||
FilterItem: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.value",
|
||||
DataType: "int64",
|
||||
IsJSON: true,
|
||||
},
|
||||
Operator: "in",
|
||||
Value: []interface{}{11, "11"},
|
||||
},
|
||||
Filter: "lower(body) like lower('%value%') AND JSON_EXISTS(body, '$.\"value\"') AND JSONExtract(JSON_VALUE(body, '$.\"value\"'), 'Int64') IN [11,11]",
|
||||
},
|
||||
{
|
||||
Name: "test json in array mixed data- fail",
|
||||
FilterItem: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.value",
|
||||
DataType: "int64",
|
||||
IsJSON: true,
|
||||
},
|
||||
Operator: "in",
|
||||
Value: []interface{}{11, "11", "hello"},
|
||||
},
|
||||
Error: true,
|
||||
},
|
||||
{
|
||||
Name: "test json in array mixed data- allow",
|
||||
FilterItem: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body.value",
|
||||
DataType: "string",
|
||||
IsJSON: true,
|
||||
},
|
||||
Operator: "in",
|
||||
Value: []interface{}{"hello", 11},
|
||||
},
|
||||
Filter: "lower(body) like lower('%value%') AND JSON_EXISTS(body, '$.\"value\"') AND JSON_VALUE(body, '$.\"value\"') IN ['hello','11']",
|
||||
},
|
||||
}
|
||||
|
||||
func TestGetJSONFilter(t *testing.T) {
|
||||
|
||||
@@ -465,15 +465,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
||||
}
|
||||
}
|
||||
|
||||
queries := make(map[string]string)
|
||||
var err error
|
||||
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
queries, err = q.builder.PrepareQueries(params)
|
||||
} else if params.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL {
|
||||
for name, chQuery := range params.CompositeQuery.ClickHouseQueries {
|
||||
queries[name] = chQuery.Query
|
||||
}
|
||||
}
|
||||
queries, err := q.builder.PrepareQueries(params)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -542,12 +534,7 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3)
|
||||
case v3.QueryTypePromQL:
|
||||
results, errQueriesByName, err = q.runPromQueries(ctx, params)
|
||||
case v3.QueryTypeClickHouseSQL:
|
||||
ctx = context.WithValue(ctx, "enforce_max_result_rows", true)
|
||||
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
|
||||
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
|
||||
} else {
|
||||
results, errQueriesByName, err = q.runClickHouseQueries(ctx, params)
|
||||
}
|
||||
results, errQueriesByName, err = q.runClickHouseQueries(ctx, params)
|
||||
default:
|
||||
err = fmt.Errorf("invalid query type")
|
||||
}
|
||||
|
||||
@@ -548,7 +548,6 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3)
|
||||
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
|
||||
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
|
||||
} else {
|
||||
ctx = context.WithValue(ctx, "enforce_max_result_rows", true)
|
||||
results, errQueriesByName, err = q.runClickHouseQueries(ctx, params)
|
||||
}
|
||||
default:
|
||||
|
||||
@@ -33,9 +33,8 @@ import (
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/preferences"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
@@ -71,7 +70,6 @@ type ServerOptions struct {
|
||||
Cluster string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
SigNoz *signoz.SigNoz
|
||||
}
|
||||
|
||||
// Server runs HTTP, Mux and a grpc server
|
||||
@@ -168,6 +166,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -221,7 +226,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
|
||||
httpServer, err := s.createPublicServer(apiHandler)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -285,7 +290,7 @@ func (s *Server) createPrivateServer(api *APIHandler) (*http.Server, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(api *APIHandler, web web.Web) (*http.Server, error) {
|
||||
func (s *Server) createPublicServer(api *APIHandler) (*http.Server, error) {
|
||||
|
||||
r := NewRouter()
|
||||
|
||||
@@ -330,11 +335,6 @@ func (s *Server) createPublicServer(api *APIHandler, web web.Web) (*http.Server,
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
err := web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
|
||||
@@ -93,8 +93,8 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
if fs != nil && len(fs.Items) != 0 {
|
||||
for _, item := range fs.Items {
|
||||
|
||||
// skip if it's a resource attribute or Span search scope attribute
|
||||
if item.Key.Type == v3.AttributeKeyTypeResource || item.Key.Type == v3.AttributeKeyTypeSpanSearchScope {
|
||||
// skip if it's a resource attribute
|
||||
if item.Key.Type == v3.AttributeKeyTypeResource {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -213,31 +213,6 @@ func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []
|
||||
return str
|
||||
}
|
||||
|
||||
func buildSpanScopeQuery(fs *v3.FilterSet) (string, error) {
|
||||
var query string
|
||||
if fs == nil || len(fs.Items) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
for _, item := range fs.Items {
|
||||
// skip anything other than Span Search scope attribute
|
||||
if item.Key.Type != v3.AttributeKeyTypeSpanSearchScope {
|
||||
continue
|
||||
}
|
||||
keyName := strings.ToLower(item.Key.Key)
|
||||
|
||||
if keyName == constants.SpanSearchScopeRoot {
|
||||
query = "parent_span_id = '' "
|
||||
return query, nil
|
||||
} else if keyName == constants.SpanSearchScopeEntryPoint {
|
||||
query = "((name, `resource_string_service$$name`) IN ( SELECT DISTINCT name, serviceName from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_TOP_LEVEL_OPERATIONS_TABLENAME + " )) "
|
||||
return query, nil
|
||||
} else {
|
||||
return "", fmt.Errorf("invalid scope item type: %s", item.Key.Type)
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.PanelType, options v3.QBOptions) (string, error) {
|
||||
tracesStart := utils.GetEpochNanoSecs(start)
|
||||
tracesEnd := utils.GetEpochNanoSecs(end)
|
||||
@@ -273,11 +248,6 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + ")"
|
||||
}
|
||||
|
||||
spanScopeSubQuery, err := buildSpanScopeQuery(mq.Filters)
|
||||
if spanScopeSubQuery != "" {
|
||||
filterSubQuery = filterSubQuery + " AND " + spanScopeSubQuery
|
||||
}
|
||||
|
||||
// timerange will be sent in epoch millisecond
|
||||
selectLabels := getSelectLabels(mq.GroupBy)
|
||||
if selectLabels != "" {
|
||||
@@ -304,8 +274,8 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
if len(mq.SelectColumns) == 0 {
|
||||
return "", fmt.Errorf("select columns cannot be empty for panelType %s", panelType)
|
||||
}
|
||||
selectLabels = getSelectLabels(mq.SelectColumns)
|
||||
// add it to the select labels
|
||||
selectLabels = getSelectLabels(mq.SelectColumns)
|
||||
queryNoOpTmpl := fmt.Sprintf("SELECT timestamp as timestamp_datetime, spanID, traceID,%s ", selectLabels) + "from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_V3 + " where %s %s" + "%s"
|
||||
query = fmt.Sprintf(queryNoOpTmpl, timeFilter, filterSubQuery, orderBy)
|
||||
} else {
|
||||
|
||||
@@ -552,70 +552,6 @@ func Test_buildTracesQuery(t *testing.T) {
|
||||
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
|
||||
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) order by timestamp ASC",
|
||||
},
|
||||
{
|
||||
name: "test noop list view with entry_point_spans",
|
||||
args: args{
|
||||
panelType: v3.PanelTypeList,
|
||||
start: 1680066360726210000,
|
||||
end: 1680066458000000000,
|
||||
mq: &v3.BuilderQuery{
|
||||
AggregateOperator: v3.AggregateOperatorNoOp,
|
||||
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{{Key: v3.AttributeKey{Key: "isEntryPoint", Type: v3.AttributeKeyTypeSpanSearchScope, IsColumn: false}, Value: true, Operator: v3.FilterOperatorEqual}}},
|
||||
SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
|
||||
OrderBy: []v3.OrderBy{{ColumnName: "timestamp", Order: "ASC"}},
|
||||
},
|
||||
},
|
||||
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
|
||||
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND ((name, `resource_string_service$$name`) IN ( SELECT DISTINCT name, serviceName from signoz_traces.distributed_top_level_operations )) order by timestamp ASC",
|
||||
},
|
||||
{
|
||||
name: "test noop list view with root_spans",
|
||||
args: args{
|
||||
panelType: v3.PanelTypeList,
|
||||
start: 1680066360726210000,
|
||||
end: 1680066458000000000,
|
||||
mq: &v3.BuilderQuery{
|
||||
AggregateOperator: v3.AggregateOperatorNoOp,
|
||||
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{{Key: v3.AttributeKey{Key: "isRoot", Type: v3.AttributeKeyTypeSpanSearchScope, IsColumn: false}, Value: true, Operator: v3.FilterOperatorEqual}}},
|
||||
SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
|
||||
OrderBy: []v3.OrderBy{{ColumnName: "timestamp", Order: "ASC"}},
|
||||
},
|
||||
},
|
||||
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
|
||||
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND parent_span_id = '' order by timestamp ASC",
|
||||
},
|
||||
{
|
||||
name: "test noop list view with root_spans and entry_point_spans both existing",
|
||||
args: args{
|
||||
panelType: v3.PanelTypeList,
|
||||
start: 1680066360726210000,
|
||||
end: 1680066458000000000,
|
||||
mq: &v3.BuilderQuery{
|
||||
AggregateOperator: v3.AggregateOperatorNoOp,
|
||||
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{{Key: v3.AttributeKey{Key: "isRoot", Type: v3.AttributeKeyTypeSpanSearchScope, IsColumn: false}, Value: true, Operator: v3.FilterOperatorEqual}, {Key: v3.AttributeKey{Key: "isEntryPoint", Type: v3.AttributeKeyTypeSpanSearchScope, IsColumn: false}, Value: true, Operator: v3.FilterOperatorEqual}}},
|
||||
SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
|
||||
OrderBy: []v3.OrderBy{{ColumnName: "timestamp", Order: "ASC"}},
|
||||
},
|
||||
},
|
||||
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
|
||||
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND parent_span_id = '' order by timestamp ASC",
|
||||
},
|
||||
{
|
||||
name: "test noop list view with root_spans with other attributes",
|
||||
args: args{
|
||||
panelType: v3.PanelTypeList,
|
||||
start: 1680066360726210000,
|
||||
end: 1680066458000000000,
|
||||
mq: &v3.BuilderQuery{
|
||||
AggregateOperator: v3.AggregateOperatorNoOp,
|
||||
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{{Key: v3.AttributeKey{Key: "isRoot", Type: v3.AttributeKeyTypeSpanSearchScope, IsColumn: false}, Value: true, Operator: v3.FilterOperatorEqual}, {Key: v3.AttributeKey{Key: "service.name", Type: v3.AttributeKeyTypeResource, IsColumn: true, DataType: v3.AttributeKeyDataTypeString}, Value: "cartservice", Operator: v3.FilterOperatorEqual}}},
|
||||
SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
|
||||
OrderBy: []v3.OrderBy{{ColumnName: "timestamp", Order: "ASC"}},
|
||||
},
|
||||
},
|
||||
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
|
||||
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'service.name') = 'cartservice' AND labels like '%service.name%cartservice%')) AND parent_span_id = '' order by timestamp ASC",
|
||||
},
|
||||
{
|
||||
name: "test noop list view-without ts",
|
||||
args: args{
|
||||
|
||||
@@ -54,9 +54,6 @@ const DurationSort = "DurationSort"
|
||||
const TimestampSort = "TimestampSort"
|
||||
const PreferRPM = "PreferRPM"
|
||||
|
||||
const SpanSearchScopeRoot = "isroot"
|
||||
const SpanSearchScopeEntryPoint = "isentrypoint"
|
||||
|
||||
func GetAlertManagerApiPrefix() string {
|
||||
if os.Getenv("ALERTMANAGER_API_PREFIX") != "" {
|
||||
return os.Getenv("ALERTMANAGER_API_PREFIX")
|
||||
@@ -251,7 +248,6 @@ const (
|
||||
SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day"
|
||||
SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME = "time_series_v4_1week"
|
||||
SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME = "distributed_time_series_v4_1day"
|
||||
SIGNOZ_TOP_LEVEL_OPERATIONS_TABLENAME = "distributed_top_level_operations"
|
||||
)
|
||||
|
||||
var TimeoutExcludedRoutes = map[string]bool{
|
||||
@@ -738,5 +734,3 @@ func init() {
|
||||
}
|
||||
|
||||
const TRACE_V4_MAX_PAGINATION_LIMIT = 10000
|
||||
|
||||
const MaxResultRowsForCHQuery = 1_000_000
|
||||
|
||||
@@ -9,15 +9,11 @@ import (
|
||||
"time"
|
||||
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/config/fileprovider"
|
||||
"go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
@@ -78,22 +74,6 @@ func main() {
|
||||
logger := loggerMgr.Sugar()
|
||||
version.PrintVersion()
|
||||
|
||||
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
fileprovider.NewFactory(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
}
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
HTTPHostPort: constants.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
@@ -110,7 +90,6 @@ func main() {
|
||||
Cluster: cluster,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
SigNoz: signoz,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
@@ -52,3 +55,90 @@ func Migrate(dsn string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ClickHouseMigrate(conn driver.Conn, cluster string) error {
|
||||
|
||||
database := "CREATE DATABASE IF NOT EXISTS signoz_analytics ON CLUSTER %s"
|
||||
|
||||
localTable := `CREATE TABLE IF NOT EXISTS signoz_analytics.rule_state_history_v0 ON CLUSTER %s
|
||||
(
|
||||
_retention_days UInt32 DEFAULT 180,
|
||||
rule_id LowCardinality(String),
|
||||
rule_name LowCardinality(String),
|
||||
overall_state LowCardinality(String),
|
||||
overall_state_changed Bool,
|
||||
state LowCardinality(String),
|
||||
state_changed Bool,
|
||||
unix_milli Int64 CODEC(Delta(8), ZSTD(1)),
|
||||
fingerprint UInt64 CODEC(ZSTD(1)),
|
||||
value Float64 CODEC(Gorilla, ZSTD(1)),
|
||||
labels String CODEC(ZSTD(5)),
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toDate(unix_milli / 1000)
|
||||
ORDER BY (rule_id, unix_milli)
|
||||
TTL toDateTime(unix_milli / 1000) + toIntervalDay(_retention_days)
|
||||
SETTINGS ttl_only_drop_parts = 1, index_granularity = 8192`
|
||||
|
||||
distributedTable := `CREATE TABLE IF NOT EXISTS signoz_analytics.distributed_rule_state_history_v0 ON CLUSTER %s
|
||||
(
|
||||
rule_id LowCardinality(String),
|
||||
rule_name LowCardinality(String),
|
||||
overall_state LowCardinality(String),
|
||||
overall_state_changed Bool,
|
||||
state LowCardinality(String),
|
||||
state_changed Bool,
|
||||
unix_milli Int64 CODEC(Delta(8), ZSTD(1)),
|
||||
fingerprint UInt64 CODEC(ZSTD(1)),
|
||||
value Float64 CODEC(Gorilla, ZSTD(1)),
|
||||
labels String CODEC(ZSTD(5)),
|
||||
)
|
||||
ENGINE = Distributed(%s, signoz_analytics, rule_state_history_v0, cityHash64(rule_id, rule_name, fingerprint))`
|
||||
|
||||
// check if db exists
|
||||
dbExists := `SELECT count(*) FROM system.databases WHERE name = 'signoz_analytics'`
|
||||
var count uint64
|
||||
err := conn.QueryRow(context.Background(), dbExists).Scan(&count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
err = conn.Exec(context.Background(), fmt.Sprintf(database, cluster))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// check if table exists
|
||||
tableExists := `SELECT count(*) FROM system.tables WHERE name = 'rule_state_history_v0' AND database = 'signoz_analytics'`
|
||||
var tableCount uint64
|
||||
err = conn.QueryRow(context.Background(), tableExists).Scan(&tableCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tableCount == 0 {
|
||||
err = conn.Exec(context.Background(), fmt.Sprintf(localTable, cluster))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// check if distributed table exists
|
||||
distributedTableExists := `SELECT count(*) FROM system.tables WHERE name = 'distributed_rule_state_history_v0' AND database = 'signoz_analytics'`
|
||||
var distributedTableCount uint64
|
||||
err = conn.QueryRow(context.Background(), distributedTableExists).Scan(&distributedTableCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if distributedTableCount == 0 {
|
||||
err = conn.Exec(context.Background(), fmt.Sprintf(distributedTable, cluster, cluster))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -114,47 +114,6 @@ type PodListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *PodListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "cpu":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].PodCPU > r.Records[j].PodCPU
|
||||
})
|
||||
case "cpu_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].PodCPURequest > r.Records[j].PodCPURequest
|
||||
})
|
||||
case "cpu_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].PodCPULimit > r.Records[j].PodCPULimit
|
||||
})
|
||||
case "memory":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].PodMemory > r.Records[j].PodMemory
|
||||
})
|
||||
case "memory_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].PodMemoryRequest > r.Records[j].PodMemoryRequest
|
||||
})
|
||||
case "memory_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].PodMemoryLimit > r.Records[j].PodMemoryLimit
|
||||
})
|
||||
case "restarts":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].RestartCount > r.Records[j].RestartCount
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type PodListRecord struct {
|
||||
PodUID string `json:"podUID,omitempty"`
|
||||
PodCPU float64 `json:"podCPU"`
|
||||
@@ -192,35 +151,6 @@ type NodeListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *NodeListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "cpu":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].NodeCPUUsage > r.Records[j].NodeCPUUsage
|
||||
})
|
||||
case "cpu_allocatable":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].NodeCPUAllocatable > r.Records[j].NodeCPUAllocatable
|
||||
})
|
||||
case "memory":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].NodeMemoryUsage > r.Records[j].NodeMemoryUsage
|
||||
})
|
||||
case "memory_allocatable":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].NodeMemoryAllocatable > r.Records[j].NodeMemoryAllocatable
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type NodeCountByCondition struct {
|
||||
Ready int `json:"ready"`
|
||||
NotReady int `json:"notReady"`
|
||||
@@ -253,31 +183,6 @@ type NamespaceListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *NamespaceListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "cpu":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPUUsage > r.Records[j].CPUUsage
|
||||
})
|
||||
case "memory":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryUsage > r.Records[j].MemoryUsage
|
||||
})
|
||||
case "pod_phase":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CountByPhase.Pending > r.Records[j].CountByPhase.Pending
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type NamespaceListRecord struct {
|
||||
NamespaceName string `json:"namespaceName"`
|
||||
CPUUsage float64 `json:"cpuUsage"`
|
||||
@@ -302,35 +207,6 @@ type ClusterListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *ClusterListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "cpu":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPUUsage > r.Records[j].CPUUsage
|
||||
})
|
||||
case "cpu_allocatable":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPUAllocatable > r.Records[j].CPUAllocatable
|
||||
})
|
||||
case "memory":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryUsage > r.Records[j].MemoryUsage
|
||||
})
|
||||
case "memory_allocatable":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryAllocatable > r.Records[j].MemoryAllocatable
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ClusterListRecord struct {
|
||||
ClusterUID string `json:"clusterUID"`
|
||||
CPUUsage float64 `json:"cpuUsage"`
|
||||
@@ -356,55 +232,6 @@ type DeploymentListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *DeploymentListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "cpu":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPUUsage > r.Records[j].CPUUsage
|
||||
})
|
||||
case "cpu_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPURequest > r.Records[j].CPURequest
|
||||
})
|
||||
case "cpu_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPULimit > r.Records[j].CPULimit
|
||||
})
|
||||
case "memory":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryUsage > r.Records[j].MemoryUsage
|
||||
})
|
||||
case "memory_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryRequest > r.Records[j].MemoryRequest
|
||||
})
|
||||
case "memory_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryLimit > r.Records[j].MemoryLimit
|
||||
})
|
||||
case "desired_pods":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].DesiredPods > r.Records[j].DesiredPods
|
||||
})
|
||||
case "available_pods":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].AvailablePods > r.Records[j].AvailablePods
|
||||
})
|
||||
case "restarts":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].Restarts > r.Records[j].Restarts
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type DeploymentListRecord struct {
|
||||
DeploymentName string `json:"deploymentName"`
|
||||
CPUUsage float64 `json:"cpuUsage"`
|
||||
@@ -435,55 +262,6 @@ type DaemonSetListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *DaemonSetListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "cpu":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPUUsage > r.Records[j].CPUUsage
|
||||
})
|
||||
case "cpu_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPURequest > r.Records[j].CPURequest
|
||||
})
|
||||
case "cpu_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPULimit > r.Records[j].CPULimit
|
||||
})
|
||||
case "memory":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryUsage > r.Records[j].MemoryUsage
|
||||
})
|
||||
case "memory_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryRequest > r.Records[j].MemoryRequest
|
||||
})
|
||||
case "memory_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryLimit > r.Records[j].MemoryLimit
|
||||
})
|
||||
case "restarts":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].Restarts > r.Records[j].Restarts
|
||||
})
|
||||
case "desired_nodes":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].DesiredNodes > r.Records[j].DesiredNodes
|
||||
})
|
||||
case "available_nodes":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].AvailableNodes > r.Records[j].AvailableNodes
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type DaemonSetListRecord struct {
|
||||
DaemonSetName string `json:"daemonSetName"`
|
||||
CPUUsage float64 `json:"cpuUsage"`
|
||||
@@ -514,55 +292,6 @@ type StatefulSetListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *StatefulSetListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "cpu":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPUUsage > r.Records[j].CPUUsage
|
||||
})
|
||||
case "cpu_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPURequest > r.Records[j].CPURequest
|
||||
})
|
||||
case "cpu_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPULimit > r.Records[j].CPULimit
|
||||
})
|
||||
case "memory":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryUsage > r.Records[j].MemoryUsage
|
||||
})
|
||||
case "memory_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryRequest > r.Records[j].MemoryRequest
|
||||
})
|
||||
case "memory_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryLimit > r.Records[j].MemoryLimit
|
||||
})
|
||||
case "restarts":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].Restarts > r.Records[j].Restarts
|
||||
})
|
||||
case "desired_pods":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].DesiredPods > r.Records[j].DesiredPods
|
||||
})
|
||||
case "available_pods":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].AvailablePods > r.Records[j].AvailablePods
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type StatefulSetListRecord struct {
|
||||
StatefulSetName string `json:"statefulSetName"`
|
||||
CPUUsage float64 `json:"cpuUsage"`
|
||||
@@ -593,63 +322,6 @@ type JobListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *JobListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "cpu":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPUUsage > r.Records[j].CPUUsage
|
||||
})
|
||||
case "cpu_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPURequest > r.Records[j].CPURequest
|
||||
})
|
||||
case "cpu_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].CPULimit > r.Records[j].CPULimit
|
||||
})
|
||||
case "memory":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryUsage > r.Records[j].MemoryUsage
|
||||
})
|
||||
case "memory_request":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryRequest > r.Records[j].MemoryRequest
|
||||
})
|
||||
case "memory_limit":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].MemoryLimit > r.Records[j].MemoryLimit
|
||||
})
|
||||
case "restarts":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].Restarts > r.Records[j].Restarts
|
||||
})
|
||||
case "desired_pods":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].DesiredSuccessfulPods > r.Records[j].DesiredSuccessfulPods
|
||||
})
|
||||
case "active_pods":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].ActivePods > r.Records[j].ActivePods
|
||||
})
|
||||
case "failed_pods":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].FailedPods > r.Records[j].FailedPods
|
||||
})
|
||||
case "successful_pods":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].SuccessfulPods > r.Records[j].SuccessfulPods
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type JobListRecord struct {
|
||||
JobName string `json:"jobName"`
|
||||
CPUUsage float64 `json:"cpuUsage"`
|
||||
@@ -682,43 +354,6 @@ type VolumeListResponse struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
func (r *VolumeListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
switch orderBy.ColumnName {
|
||||
case "available":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].VolumeAvailable > r.Records[j].VolumeAvailable
|
||||
})
|
||||
case "capacity":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].VolumeCapacity > r.Records[j].VolumeCapacity
|
||||
})
|
||||
case "usage":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].VolumeUsage > r.Records[j].VolumeUsage
|
||||
})
|
||||
case "inodes":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].VolumeInodes > r.Records[j].VolumeInodes
|
||||
})
|
||||
case "inodes_free":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].VolumeInodesFree > r.Records[j].VolumeInodesFree
|
||||
})
|
||||
case "inodes_used":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].VolumeInodesUsed > r.Records[j].VolumeInodesUsed
|
||||
})
|
||||
}
|
||||
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
// reverse the list
|
||||
for i, j := 0, len(r.Records)-1; i < j; i, j = i+1, j-1 {
|
||||
r.Records[i], r.Records[j] = r.Records[j], r.Records[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type VolumeListRecord struct {
|
||||
PersistentVolumeClaimName string `json:"persistentVolumeClaimName"`
|
||||
VolumeAvailable float64 `json:"volumeAvailable"`
|
||||
|
||||
@@ -322,7 +322,6 @@ const (
|
||||
AttributeKeyTypeTag AttributeKeyType = "tag"
|
||||
AttributeKeyTypeResource AttributeKeyType = "resource"
|
||||
AttributeKeyTypeInstrumentationScope AttributeKeyType = "scope"
|
||||
AttributeKeyTypeSpanSearchScope AttributeKeyType = "spanSearchScope"
|
||||
)
|
||||
|
||||
func (t AttributeKeyType) String() string {
|
||||
|
||||
@@ -8,19 +8,18 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Registry struct {
|
||||
services []factory.Service
|
||||
services []NamedService
|
||||
logger *zap.Logger
|
||||
startCh chan error
|
||||
stopCh chan error
|
||||
}
|
||||
|
||||
// New creates a new registry of services. It needs at least one service in the input.
|
||||
func New(logger *zap.Logger, services ...factory.Service) (*Registry, error) {
|
||||
func New(logger *zap.Logger, services ...NamedService) (*Registry, error) {
|
||||
if logger == nil {
|
||||
return nil, fmt.Errorf("cannot build registry, logger is required")
|
||||
}
|
||||
@@ -39,7 +38,7 @@ func New(logger *zap.Logger, services ...factory.Service) (*Registry, error) {
|
||||
|
||||
func (r *Registry) Start(ctx context.Context) error {
|
||||
for _, s := range r.services {
|
||||
go func(s factory.Service) {
|
||||
go func(s Service) {
|
||||
err := s.Start(ctx)
|
||||
r.startCh <- err
|
||||
}(s)
|
||||
@@ -67,7 +66,7 @@ func (r *Registry) Wait(ctx context.Context) error {
|
||||
|
||||
func (r *Registry) Stop(ctx context.Context) error {
|
||||
for _, s := range r.services {
|
||||
go func(s factory.Service) {
|
||||
go func(s Service) {
|
||||
err := s.Stop(ctx)
|
||||
r.stopCh <- err
|
||||
}(s)
|
||||
|
||||
@@ -6,15 +6,14 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/factory/servicetest"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestRegistryWith2HttpServers(t *testing.T) {
|
||||
http1, err := servicetest.NewHttpService("http1")
|
||||
http1, err := newHttpService("http1")
|
||||
require.NoError(t, err)
|
||||
|
||||
http2, err := servicetest.NewHttpService("http2")
|
||||
http2, err := newHttpService("http2")
|
||||
require.NoError(t, err)
|
||||
|
||||
registry, err := New(zap.NewNop(), http1, http2)
|
||||
@@ -35,10 +34,10 @@ func TestRegistryWith2HttpServers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRegistryWith2HttpServersWithoutWait(t *testing.T) {
|
||||
http1, err := servicetest.NewHttpService("http1")
|
||||
http1, err := newHttpService("http1")
|
||||
require.NoError(t, err)
|
||||
|
||||
http2, err := servicetest.NewHttpService("http2")
|
||||
http2, err := newHttpService("http2")
|
||||
require.NoError(t, err)
|
||||
|
||||
registry, err := New(zap.NewNop(), http1, http2)
|
||||
|
||||
16
pkg/registry/service.go
Normal file
16
pkg/registry/service.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package registry
|
||||
|
||||
import "context"
|
||||
|
||||
type Service interface {
|
||||
// Starts a service. The service should return an error if it cannot be started.
|
||||
Start(context.Context) error
|
||||
// Stops a service.
|
||||
Stop(context.Context) error
|
||||
}
|
||||
|
||||
type NamedService interface {
|
||||
// Identifier of a service. It should be unique across all services.
|
||||
Name() string
|
||||
Service
|
||||
}
|
||||
49
pkg/registry/service_test.go
Normal file
49
pkg/registry/service_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var _ NamedService = (*httpService)(nil)
|
||||
|
||||
type httpService struct {
|
||||
Listener net.Listener
|
||||
Server *http.Server
|
||||
name string
|
||||
}
|
||||
|
||||
func newHttpService(name string) (*httpService, error) {
|
||||
return &httpService{
|
||||
name: name,
|
||||
Server: &http.Server{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (service *httpService) Name() string {
|
||||
return service.name
|
||||
}
|
||||
|
||||
func (service *httpService) Start(ctx context.Context) error {
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service.Listener = listener
|
||||
|
||||
if err := service.Server.Serve(service.Listener); err != nil {
|
||||
if err != http.ErrServerClosed {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *httpService) Stop(ctx context.Context) error {
|
||||
if err := service.Server.Shutdown(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -7,36 +7,24 @@ import (
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
"go.signoz.io/signoz/pkg/sqlmigrator"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
)
|
||||
|
||||
// Config defines the entire input configuration of signoz.
|
||||
// Config defines the entire configuration of signoz.
|
||||
type Config struct {
|
||||
// Instrumentation config
|
||||
Instrumentation instrumentation.Config `mapstructure:"instrumentation"`
|
||||
|
||||
// Web config
|
||||
Web web.Config `mapstructure:"web"`
|
||||
|
||||
// Cache config
|
||||
Cache cache.Config `mapstructure:"cache"`
|
||||
|
||||
// SQLStore config
|
||||
SQLStore sqlstore.Config `mapstructure:"sqlstore"`
|
||||
|
||||
// SQLMigrator config
|
||||
SQLMigrator sqlmigrator.Config `mapstructure:"sqlmigrator"`
|
||||
Web web.Config `mapstructure:"web"`
|
||||
Cache cache.Config `mapstructure:"cache"`
|
||||
SQLStore sqlstore.Config `mapstructure:"sqlstore"`
|
||||
}
|
||||
|
||||
func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig) (Config, error) {
|
||||
configFactories := []factory.ConfigFactory{
|
||||
instrumentation.NewConfigFactory(),
|
||||
web.NewConfigFactory(),
|
||||
cache.NewConfigFactory(),
|
||||
sqlstore.NewConfigFactory(),
|
||||
sqlmigrator.NewConfigFactory(),
|
||||
cache.NewConfigFactory(),
|
||||
}
|
||||
|
||||
conf, err := config.New(ctx, resolverConfig, configFactories)
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package signoz
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/cache/memorycache"
|
||||
"go.signoz.io/signoz/pkg/cache/rediscache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/sqlmigration"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
"go.signoz.io/signoz/pkg/sqlstore/sqlitesqlstore"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
"go.signoz.io/signoz/pkg/web/noopweb"
|
||||
"go.signoz.io/signoz/pkg/web/routerweb"
|
||||
)
|
||||
|
||||
type ProviderConfig struct {
|
||||
// Map of all cache provider factories
|
||||
CacheProviderFactories factory.NamedMap[factory.ProviderFactory[cache.Cache, cache.Config]]
|
||||
|
||||
// Map of all web provider factories
|
||||
WebProviderFactories factory.NamedMap[factory.ProviderFactory[web.Web, web.Config]]
|
||||
|
||||
// Map of all sqlstore provider factories
|
||||
SQLStoreProviderFactories factory.NamedMap[factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config]]
|
||||
|
||||
// Map of all sql migration provider factories
|
||||
SQLMigrationProviderFactories factory.NamedMap[factory.ProviderFactory[sqlmigration.SQLMigration, sqlmigration.Config]]
|
||||
}
|
||||
|
||||
func NewProviderConfig() ProviderConfig {
|
||||
return ProviderConfig{
|
||||
CacheProviderFactories: factory.MustNewNamedMap(
|
||||
memorycache.NewFactory(),
|
||||
rediscache.NewFactory(),
|
||||
),
|
||||
WebProviderFactories: factory.MustNewNamedMap(
|
||||
routerweb.NewFactory(),
|
||||
noopweb.NewFactory(),
|
||||
),
|
||||
SQLStoreProviderFactories: factory.MustNewNamedMap(
|
||||
sqlitesqlstore.NewFactory(),
|
||||
),
|
||||
SQLMigrationProviderFactories: factory.MustNewNamedMap(
|
||||
sqlmigration.NewAddDataMigrationsFactory(),
|
||||
sqlmigration.NewAddOrganizationFactory(),
|
||||
sqlmigration.NewAddPreferencesFactory(),
|
||||
sqlmigration.NewAddDashboardsFactory(),
|
||||
sqlmigration.NewAddSavedViewsFactory(),
|
||||
sqlmigration.NewAddAgentsFactory(),
|
||||
sqlmigration.NewAddPipelinesFactory(),
|
||||
sqlmigration.NewAddIntegrationsFactory(),
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package signoz
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewProviderConfig(t *testing.T) {
|
||||
// This is a test to ensure that provider factories can be created without panicking since
|
||||
// we are using the factory.MustNewNamedMap function to initialize the provider factories.
|
||||
// It also helps us catch these errors during testing instead of runtime.
|
||||
assert.NotPanics(t, func() {
|
||||
NewProviderConfig()
|
||||
})
|
||||
}
|
||||
@@ -1,14 +1,13 @@
|
||||
package signoz
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
"go.signoz.io/signoz/pkg/version"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache/memorycache"
|
||||
"go.signoz.io/signoz/pkg/cache/rediscache"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
"go.signoz.io/signoz/pkg/web/routerweb"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type SigNoz struct {
|
||||
@@ -16,41 +15,19 @@ type SigNoz struct {
|
||||
Web web.Web
|
||||
}
|
||||
|
||||
func New(
|
||||
ctx context.Context,
|
||||
config Config,
|
||||
providerConfig ProviderConfig,
|
||||
) (*SigNoz, error) {
|
||||
// Initialize instrumentation
|
||||
instrumentation, err := instrumentation.New(ctx, version.Build{}, config.Instrumentation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func New(config config.Config, skipWebFrontend bool) (*SigNoz, error) {
|
||||
var cache cache.Cache
|
||||
|
||||
// init for the cache
|
||||
switch config.Cache.Provider {
|
||||
case "memory":
|
||||
cache = memorycache.New(&config.Cache.Memory)
|
||||
case "redis":
|
||||
cache = rediscache.New(&config.Cache.Redis)
|
||||
}
|
||||
|
||||
// Get the provider settings from instrumentation
|
||||
providerSettings := instrumentation.ToProviderSettings()
|
||||
|
||||
// Initialize cache from the available cache provider factories
|
||||
cache, err := factory.NewProviderFromNamedMap(
|
||||
ctx,
|
||||
providerSettings,
|
||||
config.Cache,
|
||||
providerConfig.CacheProviderFactories,
|
||||
config.Cache.Provider,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize web from the available web provider factories
|
||||
web, err := factory.NewProviderFromNamedMap(
|
||||
ctx,
|
||||
providerSettings,
|
||||
config.Web,
|
||||
providerConfig.WebProviderFactories,
|
||||
config.Web.Provider(),
|
||||
)
|
||||
if err != nil {
|
||||
web, err := routerweb.New(zap.L(), config.Web)
|
||||
if err != nil && !skipWebFrontend {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
type Config struct{}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("sqlmigration"), newConfig)
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
return Config{}
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package sqlmigrationtest
|
||||
package sqlmigrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,13 +6,12 @@ import (
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/sqlmigration"
|
||||
)
|
||||
|
||||
type noopMigration struct{}
|
||||
|
||||
func NoopMigrationFactory() factory.ProviderFactory[sqlmigration.SQLMigration, sqlmigration.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("noop"), func(_ context.Context, _ factory.ProviderSettings, _ sqlmigration.Config) (sqlmigration.SQLMigration, error) {
|
||||
func NoopMigrationFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("noop"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return &noopMigration{}, nil
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package sqlmigration
|
||||
package sqlmigrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -11,22 +11,11 @@ import (
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
// SQLMigration is the interface for a single migration.
|
||||
type SQLMigration interface {
|
||||
// Register registers the migration with the given migrations. Each migration needs to be registered
|
||||
//in a dedicated `*.go` file so that the correct migration semantics can be detected.
|
||||
Register(*migrate.Migrations) error
|
||||
// Up runs the migration.
|
||||
Up(context.Context, *bun.DB) error
|
||||
// Down rolls back the migration.
|
||||
Down(context.Context, *bun.DB) error
|
||||
}
|
||||
|
||||
var (
|
||||
ErrNoExecute = errors.New("no execute")
|
||||
)
|
||||
|
||||
func New(
|
||||
func NewMigrations(
|
||||
ctx context.Context,
|
||||
settings factory.ProviderSettings,
|
||||
config Config,
|
||||
@@ -49,13 +38,13 @@ func New(
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
func MustNew(
|
||||
func MustNewMigrations(
|
||||
ctx context.Context,
|
||||
settings factory.ProviderSettings,
|
||||
config Config,
|
||||
factories factory.NamedMap[factory.ProviderFactory[SQLMigration, Config]],
|
||||
) *migrate.Migrations {
|
||||
migrations, err := New(ctx, settings, config, factories)
|
||||
migrations, err := NewMigrations(ctx, settings, config, factories)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package sqlmigration
|
||||
package migration
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,15 +6,16 @@ import (
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/sqlmigrator"
|
||||
)
|
||||
|
||||
type addDataMigrations struct{}
|
||||
|
||||
func NewAddDataMigrationsFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
func NewAddDataMigrationsFactory() factory.ProviderFactory[sqlmigrator.SQLMigration, sqlmigrator.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_data_migrations"), newAddDataMigrations)
|
||||
}
|
||||
|
||||
func newAddDataMigrations(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
func newAddDataMigrations(_ context.Context, _ factory.ProviderSettings, _ sqlmigrator.Config) (sqlmigrator.SQLMigration, error) {
|
||||
return &addDataMigrations{}, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package sqlmigration
|
||||
package migration
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,15 +6,16 @@ import (
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/sqlmigrator"
|
||||
)
|
||||
|
||||
type addOrganization struct{}
|
||||
|
||||
func NewAddOrganizationFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
func NewAddOrganizationFactory() factory.ProviderFactory[sqlmigrator.SQLMigration, sqlmigrator.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_organization"), newAddOrganization)
|
||||
}
|
||||
|
||||
func newAddOrganization(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
func newAddOrganization(_ context.Context, _ factory.ProviderSettings, _ sqlmigrator.Config) (sqlmigrator.SQLMigration, error) {
|
||||
return &addOrganization{}, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package sqlmigration
|
||||
package migration
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,15 +6,16 @@ import (
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/sqlmigrator"
|
||||
)
|
||||
|
||||
type addPreferences struct{}
|
||||
|
||||
func NewAddPreferencesFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
func NewAddPreferencesFactory() factory.ProviderFactory[sqlmigrator.SQLMigration, sqlmigrator.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_preferences"), newAddPreferences)
|
||||
}
|
||||
|
||||
func newAddPreferences(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
func newAddPreferences(_ context.Context, _ factory.ProviderSettings, _ sqlmigrator.Config) (sqlmigrator.SQLMigration, error) {
|
||||
return &addPreferences{}, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package sqlmigration
|
||||
package migration
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,15 +6,16 @@ import (
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/sqlmigrator"
|
||||
)
|
||||
|
||||
type addDashboards struct{}
|
||||
|
||||
func NewAddDashboardsFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
func NewAddDashboardsFactory() factory.ProviderFactory[sqlmigrator.SQLMigration, sqlmigrator.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_dashboards"), newAddDashboards)
|
||||
}
|
||||
|
||||
func newAddDashboards(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
func newAddDashboards(_ context.Context, _ factory.ProviderSettings, _ sqlmigrator.Config) (sqlmigrator.SQLMigration, error) {
|
||||
return &addDashboards{}, nil
|
||||
}
|
||||
|
||||
@@ -95,8 +96,8 @@ func (migration *addDashboards) Up(ctx context.Context, db *bun.DB) error {
|
||||
NewAddColumn().
|
||||
Table("rules").
|
||||
ColumnExpr("created_at datetime").
|
||||
Apply(WrapIfNotExists(ctx, db, "rules", "created_at")).
|
||||
Exec(ctx); err != nil && err != ErrNoExecute {
|
||||
Apply(sqlmigrator.WrapIfNotExists(ctx, db, "rules", "created_at")).
|
||||
Exec(ctx); err != nil && err != sqlmigrator.ErrNoExecute {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -105,8 +106,8 @@ func (migration *addDashboards) Up(ctx context.Context, db *bun.DB) error {
|
||||
NewAddColumn().
|
||||
Table("rules").
|
||||
ColumnExpr("created_by TEXT").
|
||||
Apply(WrapIfNotExists(ctx, db, "rules", "created_by")).
|
||||
Exec(ctx); err != nil && err != ErrNoExecute {
|
||||
Apply(sqlmigrator.WrapIfNotExists(ctx, db, "rules", "created_by")).
|
||||
Exec(ctx); err != nil && err != sqlmigrator.ErrNoExecute {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -115,8 +116,8 @@ func (migration *addDashboards) Up(ctx context.Context, db *bun.DB) error {
|
||||
NewAddColumn().
|
||||
Table("rules").
|
||||
ColumnExpr("updated_by TEXT").
|
||||
Apply(WrapIfNotExists(ctx, db, "rules", "updated_by")).
|
||||
Exec(ctx); err != nil && err != ErrNoExecute {
|
||||
Apply(sqlmigrator.WrapIfNotExists(ctx, db, "rules", "updated_by")).
|
||||
Exec(ctx); err != nil && err != sqlmigrator.ErrNoExecute {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -125,8 +126,8 @@ func (migration *addDashboards) Up(ctx context.Context, db *bun.DB) error {
|
||||
NewAddColumn().
|
||||
Table("dashboards").
|
||||
ColumnExpr("created_by TEXT").
|
||||
Apply(WrapIfNotExists(ctx, db, "dashboards", "created_by")).
|
||||
Exec(ctx); err != nil && err != ErrNoExecute {
|
||||
Apply(sqlmigrator.WrapIfNotExists(ctx, db, "dashboards", "created_by")).
|
||||
Exec(ctx); err != nil && err != sqlmigrator.ErrNoExecute {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -135,8 +136,8 @@ func (migration *addDashboards) Up(ctx context.Context, db *bun.DB) error {
|
||||
NewAddColumn().
|
||||
Table("dashboards").
|
||||
ColumnExpr("updated_by TEXT").
|
||||
Apply(WrapIfNotExists(ctx, db, "dashboards", "updated_by")).
|
||||
Exec(ctx); err != nil && err != ErrNoExecute {
|
||||
Apply(sqlmigrator.WrapIfNotExists(ctx, db, "dashboards", "updated_by")).
|
||||
Exec(ctx); err != nil && err != sqlmigrator.ErrNoExecute {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -145,8 +146,8 @@ func (migration *addDashboards) Up(ctx context.Context, db *bun.DB) error {
|
||||
NewAddColumn().
|
||||
Table("dashboards").
|
||||
ColumnExpr("locked INTEGER DEFAULT 0").
|
||||
Apply(WrapIfNotExists(ctx, db, "dashboards", "locked")).
|
||||
Exec(ctx); err != nil && err != ErrNoExecute {
|
||||
Apply(sqlmigrator.WrapIfNotExists(ctx, db, "dashboards", "locked")).
|
||||
Exec(ctx); err != nil && err != sqlmigrator.ErrNoExecute {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package sqlmigration
|
||||
package migration
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,15 +6,16 @@ import (
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/sqlmigrator"
|
||||
)
|
||||
|
||||
type addSavedViews struct{}
|
||||
|
||||
func NewAddSavedViewsFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
func NewAddSavedViewsFactory() factory.ProviderFactory[sqlmigrator.SQLMigration, sqlmigrator.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_saved_views"), newAddSavedViews)
|
||||
}
|
||||
|
||||
func newAddSavedViews(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
func newAddSavedViews(_ context.Context, _ factory.ProviderSettings, _ sqlmigrator.Config) (sqlmigrator.SQLMigration, error) {
|
||||
return &addSavedViews{}, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user