Compare commits

...

22 Commits

Author SHA1 Message Date
nityanandagohain
99edf96910 fix: use new error in pipelines handler 2025-07-11 17:38:27 +05:30
Nityananda Gohain
552d44d208 chore: send email on role update (#8489)
* chore: send email on role update

* fix: minor changes

* fix: update template

* fix: minor changes

* fix: return updated user
2025-07-10 15:17:04 +00:00
SagarRajput-7
497315579f chore: added got at 11.8.5 patch to fix image-webpack-loader vulnerability (#8500) 2025-07-10 20:22:39 +05:30
SagarRajput-7
bfaac15ccb chore: replace image-webpack-loader (deprecated) with image-minimizer-webpack-plugin (#8498)
* chore: replace image-webpack-loader (deprecated) with image-minimizer-webpack-plugin

* chore: used sharp

* chore: remove got resolution
2025-07-10 19:17:02 +05:30
SagarRajput-7
5e18be6a23 chore: added got at 11.8.5 patch to fix image-webpack-loader vulnerability (#8493) 2025-07-10 16:07:10 +05:30
Yunus M
1793706f87 feat: show ingestion keys to self hosted users (#8490) 2025-07-10 14:51:53 +05:30
aniketio-ctrl
da2a3c738a fix(aws-elastic-cache): corrected variable query for elastic cache (#8487)
* fix(aws-elastic-cache): corrected variable query for elastic cache overview.json

* fix(aws-elastic-cache): corrected variable query for elastic cache overview.json

---------

Co-authored-by: Piyush Singariya <piyushsingariya@gmail.com>
2025-07-09 10:21:15 +00:00
primus-bot[bot]
d17dab9a1d chore(release): bump to v0.89.0 (#8482) 2025-07-09 12:06:47 +05:30
Srikanth Chekuri
88b75d4e72 fix(apdex): use right metric name for metadata (#8463) 2025-07-09 09:08:40 +05:30
Sahil Khan
6327ab5ec6 fix: allowed user to select text in json body field in log details (#8450) 2025-07-08 21:28:05 +05:30
Sahil Khan
5b09490ad7 fix: trace details v2 ui bugs (#8448) 2025-07-08 13:51:40 +00:00
Nageshbansal
b50127b567 feat(statsreporter): add railway platform detection (#8467) 2025-07-08 13:01:21 +00:00
Vishal Sharma
ba2ed3ad22 chore: only log telemetry query for explorer, rule and dashboard pages (#8464)
* chore: only log telemetry query for explorer, rule and dashboard pages

* chore: add dashboard and rule properties for no telemetry result
2025-07-08 11:32:46 +00:00
0xflotus
eb3dfbf63b docs: fixed small typo error (#8458)
Co-authored-by: Vibhu Pandey <vibhupandey28@gmail.com>
2025-07-08 16:46:43 +05:30
Nageshbansal
c3e048470d fix: add DOT_METRICS_ENABLED and remove clickhousemetricswrite (#8461) 2025-07-08 15:36:32 +05:30
Vibhu Pandey
4563ff0e62 fix(users): skip sending email if frontend base url is not set (#8459)
skip sending email if frontend base url is not set
2025-07-08 01:47:37 +05:30
Vibhu Pandey
c9e48b6de9 feat(sqlschema): add sqlschema (#8384)
## 📄 Summary

- add sqlschema package
- add unique index on email,org_id in users and user_invite
2025-07-08 00:21:26 +05:30
Amlan Kumar Nandy
06ef9ff384 fix: resolve ui full reload on auto-refresh (#8383) 2025-07-07 16:51:06 +00:00
Amlan Kumar Nandy
26d55875f5 chore: fix metrics explorer events (#8411) 2025-07-07 16:34:17 +00:00
Srikanth Chekuri
b1864ee328 chore: use {k8s.pod/k8s.node/container}.cpu.usage metric for metadata and CPU usage charts (#8398) 2025-07-07 11:25:20 +00:00
Amlan Kumar Nandy
8b62c8dced chore: fix regex issue in route tab (#8440) 2025-07-07 16:55:23 +07:00
aniketio-ctrl
273452352d chore(2354): added preloaded metrics metadata at first api call (#8229)
* chore(2354): added preloaded metrics metadata at first api call
2025-07-06 17:09:29 +05:30
121 changed files with 5559 additions and 1781 deletions

View File

@@ -174,7 +174,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.88.1
image: signoz/signoz:v0.89.0
command:
- --config=/root/config/prometheus.yml
ports:
@@ -194,6 +194,7 @@ services:
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- SIGNOZ_JWT_SECRET=secret
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD

View File

@@ -115,7 +115,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.88.1
image: signoz/signoz:v0.89.0
command:
- --config=/root/config/prometheus.yml
ports:
@@ -135,6 +135,7 @@ services:
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD

View File

@@ -26,7 +26,7 @@ processors:
detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
metrics_exporter: signozclickhousemetrics
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
@@ -60,14 +60,6 @@ exporters:
datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:
enabled: true
disable_v2: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
disable_v2: true
signozclickhousemetrics:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
@@ -89,11 +81,11 @@ service:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, signozclickhousemetrics]
exporters: [signozclickhousemetrics]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
exporters: [signozclickhousemetrics]
logs:
receivers: [otlp]
processors: [batch]

View File

@@ -177,7 +177,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.88.1}
image: signoz/signoz:${VERSION:-v0.89.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
@@ -197,6 +197,7 @@ services:
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD

View File

@@ -110,7 +110,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.88.1}
image: signoz/signoz:${VERSION:-v0.89.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
@@ -130,6 +130,7 @@ services:
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD

View File

@@ -26,7 +26,7 @@ processors:
detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
metrics_exporter: signozclickhousemetrics
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
@@ -60,14 +60,6 @@ exporters:
datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
disable_v2: true
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
disable_v2: true
signozclickhousemetrics:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
@@ -89,11 +81,11 @@ service:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, signozclickhousemetrics]
exporters: [signozclickhousemetrics]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
exporters: [signozclickhousemetrics]
logs:
receivers: [otlp]
processors: [batch]

View File

@@ -16,7 +16,7 @@ __Table of Contents__
- [Prerequisites](#prerequisites-1)
- [Install Helm Repo and Charts](#install-helm-repo-and-charts)
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
- [Moniitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
- [Monitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
- [What's next](#whats-next)

View File

@@ -203,17 +203,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
&opAmpModel.AllAgents, agentConfMgr, signoz.Instrumentation,
)
orgs, err := apiHandler.Signoz.Modules.OrgGetter.ListByOwnedKeyRange(context.Background())
if err != nil {
return nil, err
}
for _, org := range orgs {
errorList := reader.PreloadMetricsMetadata(context.Background(), org.ID)
for _, er := range errorList {
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
}
}
return s, nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/query-service/app"
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
"github.com/SigNoz/signoz/ee/zeus"
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
@@ -21,6 +22,7 @@ import (
"github.com/SigNoz/signoz/pkg/modules/organization"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
"github.com/SigNoz/signoz/pkg/types/authtypes"
@@ -145,6 +147,14 @@ func main() {
signoz.NewEmailingProviderFactories(),
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore)
if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
zap.L().Fatal("Failed to add postgressqlschema factory", zap.Error(err))
}
return existingFactories
},
sqlStoreFactories,
signoz.NewTelemetryStoreProviderFactories(),
)

View File

@@ -0,0 +1,36 @@
package postgressqlschema
import (
"strings"
"github.com/SigNoz/signoz/pkg/sqlschema"
)
type Formatter struct {
sqlschema.Formatter
}
func (formatter Formatter) SQLDataTypeOf(dataType sqlschema.DataType) string {
if dataType == sqlschema.DataTypeTimestamp {
return "TIMESTAMPTZ"
}
return strings.ToUpper(dataType.String())
}
func (formatter Formatter) DataTypeOf(dataType string) sqlschema.DataType {
switch strings.ToUpper(dataType) {
case "TIMESTAMPTZ", "TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE", "TIMESTAMP WITH TIME ZONE":
return sqlschema.DataTypeTimestamp
case "INT8":
return sqlschema.DataTypeBigInt
case "INT2", "INT4", "SMALLINT", "INTEGER":
return sqlschema.DataTypeInteger
case "BOOL", "BOOLEAN":
return sqlschema.DataTypeBoolean
case "VARCHAR", "CHARACTER VARYING", "CHARACTER":
return sqlschema.DataTypeText
}
return formatter.Formatter.DataTypeOf(dataType)
}

View File

@@ -0,0 +1,285 @@
package postgressqlschema
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
)
type provider struct {
settings factory.ScopedProviderSettings
fmter sqlschema.SQLFormatter
sqlstore sqlstore.SQLStore
operator sqlschema.SQLOperator
}
func NewFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config] {
return factory.NewProviderFactory(factory.MustNewName("postgres"), func(ctx context.Context, providerSettings factory.ProviderSettings, config sqlschema.Config) (sqlschema.SQLSchema, error) {
return New(ctx, providerSettings, config, sqlstore)
})
}
func New(ctx context.Context, providerSettings factory.ProviderSettings, config sqlschema.Config, sqlstore sqlstore.SQLStore) (sqlschema.SQLSchema, error) {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/sqlschema/postgressqlschema")
fmter := Formatter{Formatter: sqlschema.NewFormatter(sqlstore.BunDB().Dialect())}
return &provider{
sqlstore: sqlstore,
fmter: fmter,
settings: settings,
operator: sqlschema.NewOperator(fmter, sqlschema.OperatorSupport{
DropConstraint: true,
ColumnIfNotExistsExists: true,
AlterColumnSetNotNull: true,
}),
}, nil
}
func (provider *provider) Formatter() sqlschema.SQLFormatter {
return provider.fmter
}
func (provider *provider) Operator() sqlschema.SQLOperator {
return provider.operator
}
func (provider *provider) GetTable(ctx context.Context, tableName sqlschema.TableName) (*sqlschema.Table, []*sqlschema.UniqueConstraint, error) {
rows, err := provider.
sqlstore.
BunDB().
QueryContext(ctx, `
SELECT
c.column_name,
c.is_nullable = 'YES',
c.udt_name,
c.column_default
FROM
information_schema.columns AS c
WHERE
c.table_name = ?`, string(tableName))
if err != nil {
return nil, nil, err
}
defer func() {
if err := rows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
columns := make([]*sqlschema.Column, 0)
for rows.Next() {
var (
name string
sqlDataType string
nullable bool
defaultVal *string
)
if err := rows.Scan(&name, &nullable, &sqlDataType, &defaultVal); err != nil {
return nil, nil, err
}
columnDefault := ""
if defaultVal != nil {
columnDefault = *defaultVal
}
columns = append(columns, &sqlschema.Column{
Name: sqlschema.ColumnName(name),
Nullable: nullable,
DataType: provider.fmter.DataTypeOf(sqlDataType),
Default: columnDefault,
})
}
constraintsRows, err := provider.
sqlstore.
BunDB().
QueryContext(ctx, `
SELECT
c.column_name,
constraint_name,
constraint_type
FROM
information_schema.table_constraints tc
JOIN information_schema.constraint_column_usage AS ccu USING (constraint_schema, constraint_catalog, table_name, constraint_name)
JOIN information_schema.columns AS c ON c.table_schema = tc.constraint_schema AND tc.table_name = c.table_name AND ccu.column_name = c.column_name
WHERE
c.table_name = ?`, string(tableName))
if err != nil {
return nil, nil, err
}
defer func() {
if err := constraintsRows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
var primaryKeyConstraint *sqlschema.PrimaryKeyConstraint
uniqueConstraintsMap := make(map[string]*sqlschema.UniqueConstraint)
for constraintsRows.Next() {
var (
name string
constraintName string
constraintType string
)
if err := constraintsRows.Scan(&name, &constraintName, &constraintType); err != nil {
return nil, nil, err
}
if constraintType == "PRIMARY KEY" {
if primaryKeyConstraint == nil {
primaryKeyConstraint = (&sqlschema.PrimaryKeyConstraint{
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(name)},
}).Named(constraintName).(*sqlschema.PrimaryKeyConstraint)
} else {
primaryKeyConstraint.ColumnNames = append(primaryKeyConstraint.ColumnNames, sqlschema.ColumnName(name))
}
}
if constraintType == "UNIQUE" {
if _, ok := uniqueConstraintsMap[constraintName]; !ok {
uniqueConstraintsMap[constraintName] = (&sqlschema.UniqueConstraint{
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(name)},
}).Named(constraintName).(*sqlschema.UniqueConstraint)
} else {
uniqueConstraintsMap[constraintName].ColumnNames = append(uniqueConstraintsMap[constraintName].ColumnNames, sqlschema.ColumnName(name))
}
}
}
foreignKeyConstraintsRows, err := provider.
sqlstore.
BunDB().
QueryContext(ctx, `
SELECT
tc.constraint_name,
kcu.table_name AS referencing_table,
kcu.column_name AS referencing_column,
ccu.table_name AS referenced_table,
ccu.column_name AS referenced_column
FROM
information_schema.key_column_usage kcu
JOIN information_schema.table_constraints tc ON kcu.constraint_name = tc.constraint_name AND kcu.table_schema = tc.table_schema
JOIN information_schema.constraint_column_usage ccu ON ccu.constraint_name = tc.constraint_name AND ccu.table_schema = tc.table_schema
WHERE
tc.constraint_type = ?
AND kcu.table_name = ?`, "FOREIGN KEY", string(tableName))
if err != nil {
return nil, nil, err
}
defer func() {
if err := foreignKeyConstraintsRows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
foreignKeyConstraints := make([]*sqlschema.ForeignKeyConstraint, 0)
for foreignKeyConstraintsRows.Next() {
var (
constraintName string
referencingTable string
referencingColumn string
referencedTable string
referencedColumn string
)
if err := foreignKeyConstraintsRows.Scan(&constraintName, &referencingTable, &referencingColumn, &referencedTable, &referencedColumn); err != nil {
return nil, nil, err
}
foreignKeyConstraints = append(foreignKeyConstraints, (&sqlschema.ForeignKeyConstraint{
ReferencingColumnName: sqlschema.ColumnName(referencingColumn),
ReferencedTableName: sqlschema.TableName(referencedTable),
ReferencedColumnName: sqlschema.ColumnName(referencedColumn),
}).Named(constraintName).(*sqlschema.ForeignKeyConstraint))
}
uniqueConstraints := make([]*sqlschema.UniqueConstraint, 0)
for _, uniqueConstraint := range uniqueConstraintsMap {
uniqueConstraints = append(uniqueConstraints, uniqueConstraint)
}
return &sqlschema.Table{
Name: tableName,
Columns: columns,
PrimaryKeyConstraint: primaryKeyConstraint,
ForeignKeyConstraints: foreignKeyConstraints,
}, uniqueConstraints, nil
}
func (provider *provider) GetIndices(ctx context.Context, name sqlschema.TableName) ([]sqlschema.Index, error) {
rows, err := provider.
sqlstore.
BunDB().
QueryContext(ctx, `
SELECT
ct.relname AS table_name,
ci.relname AS index_name,
i.indisunique AS unique,
i.indisprimary AS primary,
a.attname AS column_name
FROM
pg_index i
LEFT JOIN pg_class ct ON ct.oid = i.indrelid
LEFT JOIN pg_class ci ON ci.oid = i.indexrelid
LEFT JOIN pg_attribute a ON a.attrelid = ct.oid
LEFT JOIN pg_constraint con ON con.conindid = i.indexrelid
WHERE
a.attnum = ANY(i.indkey)
AND con.oid IS NULL
AND ct.relkind = 'r'
AND ct.relname = ?`, string(name))
if err != nil {
return nil, err
}
defer func() {
if err := rows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
uniqueIndicesMap := make(map[string]*sqlschema.UniqueIndex)
for rows.Next() {
var (
tableName string
indexName string
unique bool
primary bool
columnName string
)
if err := rows.Scan(&tableName, &indexName, &unique, &primary, &columnName); err != nil {
return nil, err
}
if unique {
if _, ok := uniqueIndicesMap[indexName]; !ok {
uniqueIndicesMap[indexName] = &sqlschema.UniqueIndex{
TableName: name,
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(columnName)},
}
} else {
uniqueIndicesMap[indexName].ColumnNames = append(uniqueIndicesMap[indexName].ColumnNames, sqlschema.ColumnName(columnName))
}
}
}
indices := make([]sqlschema.Index, 0)
for _, index := range uniqueIndicesMap {
indices = append(indices, index)
}
return indices, nil
}
func (provider *provider) ToggleFKEnforcement(_ context.Context, _ bun.IDB, _ bool) error {
return nil
}

View File

@@ -213,7 +213,9 @@
"eslint-plugin-simple-import-sort": "^7.0.0",
"eslint-plugin-sonarjs": "^0.12.0",
"husky": "^7.0.4",
"image-webpack-loader": "8.1.0",
"image-minimizer-webpack-plugin": "^4.0.0",
"imagemin": "^8.0.1",
"imagemin-svgo": "^10.0.1",
"is-ci": "^3.0.1",
"jest-styled-components": "^7.0.8",
"lint-staged": "^12.5.0",
@@ -230,6 +232,7 @@
"redux-mock-store": "1.5.4",
"sass": "1.66.1",
"sass-loader": "13.3.2",
"sharp": "^0.33.4",
"ts-jest": "^27.1.5",
"ts-node": "^10.2.1",
"typescript-plugin-css-modules": "5.0.1",
@@ -254,6 +257,7 @@
"cross-spawn": "7.0.5",
"cookie": "^0.7.1",
"serialize-javascript": "6.0.2",
"prismjs": "1.30.0"
"prismjs": "1.30.0",
"got": "11.8.5"
}
}

View File

@@ -194,7 +194,7 @@ function HostMetricTraces({
{!isError && traces.length > 0 && (
<div className="host-metric-traces-table">
<TraceExplorerControls
isLoading={isFetching}
isLoading={isFetching && traces.length === 0}
totalCount={totalCount}
perPageOptions={PER_PAGE_OPTIONS}
showSizeChanger={false}
@@ -203,7 +203,7 @@ function HostMetricTraces({
tableLayout="fixed"
pagination={false}
scroll={{ x: true }}
loading={isFetching}
loading={isFetching && traces.length === 0}
dataSource={traces}
columns={traceListColumns}
onRow={(): Record<string, unknown> => ({

View File

@@ -37,7 +37,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -86,8 +86,12 @@ function HostMetricsDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [selectedView, setSelectedView] = useState<VIEWS>(
@@ -150,10 +154,11 @@ function HostMetricsDetails({
}, [initialFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -181,6 +186,7 @@ function HostMetricsDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -356,6 +362,7 @@ function HostMetricsDetails({
const handleClose = (): void => {
setSelectedInterval(selectedTime as Time);
lastSelectedInterval.current = null;
setSearchParams({});
if (selectedTime !== 'custom') {

View File

@@ -15,11 +15,12 @@ import {
} from 'container/TopNav/DateTimeSelectionV2/config';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useResizeObserver } from 'hooks/useDimensions';
import { useMultiIntersectionObserver } from 'hooks/useMultiIntersectionObserver';
import { GetMetricQueryRange } from 'lib/dashboard/getQueryResults';
import { getUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions';
import { getUPlotChartData } from 'lib/uPlotLib/utils/getUplotChartData';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useQueries, UseQueryResult } from 'react-query';
import { QueryFunctionContext, useQueries, UseQueryResult } from 'react-query';
import { SuccessResponse } from 'types/api';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
@@ -53,6 +54,11 @@ function Metrics({
featureFlags?.find((flag) => flag.name === FeatureKeys.DOT_METRICS_ENABLED)
?.active || false;
const {
visibilities,
setElement,
} = useMultiIntersectionObserver(hostWidgetInfo.length, { threshold: 0.1 });
const queryPayloads = useMemo(
() =>
getHostQueryPayload(
@@ -65,11 +71,15 @@ function Metrics({
);
const queries = useQueries(
queryPayloads.map((payload) => ({
queryPayloads.map((payload, index) => ({
queryKey: ['host-metrics', payload, ENTITY_VERSION_V4, 'HOST'],
queryFn: (): Promise<SuccessResponse<MetricRangePayloadProps>> =>
GetMetricQueryRange(payload, ENTITY_VERSION_V4),
enabled: !!payload,
queryFn: ({
signal,
}: QueryFunctionContext): Promise<
SuccessResponse<MetricRangePayloadProps>
> => GetMetricQueryRange(payload, ENTITY_VERSION_V4, signal),
enabled: !!payload && visibilities[index],
keepPreviousData: true,
})),
);
@@ -143,7 +153,7 @@ function Metrics({
query: UseQueryResult<SuccessResponse<MetricRangePayloadProps>, unknown>,
idx: number,
): JSX.Element => {
if (query.isLoading) {
if ((!query.data && query.isLoading) || !visibilities[idx]) {
return <Skeleton />;
}
@@ -181,7 +191,7 @@ function Metrics({
</div>
<Row gutter={24} className="host-metrics-container">
{queries.map((query, idx) => (
<Col span={12} key={hostWidgetInfo[idx].title}>
<Col ref={setElement(idx)} span={12} key={hostWidgetInfo[idx].title}>
<Typography.Text>{hostWidgetInfo[idx].title}</Typography.Text>
<Card bordered className="host-metrics-card" ref={graphRef}>
{renderCardContent(query, idx)}

View File

@@ -1,5 +1,10 @@
import { Tabs, TabsProps } from 'antd';
import { useLocation, useParams } from 'react-router-dom';
import {
generatePath,
matchPath,
useLocation,
useParams,
} from 'react-router-dom';
import { RouteTabProps } from './types';
@@ -17,20 +22,13 @@ function RouteTab({
const params = useParams<Params>();
const location = useLocation();
// Replace dynamic parameters in routes
const routesWithParams = routes.map((route) => ({
...route,
route: route.route.replace(
/:(\w+)/g,
(match, param) => params[param] || match,
),
}));
// Find the matching route for the current pathname
const currentRoute = routesWithParams.find((route) => {
const routePattern = route.route.replace(/:(\w+)/g, '([^/]+)');
const regex = new RegExp(`^${routePattern}$`);
return regex.test(location.pathname);
const currentRoute = routes.find((route) => {
const routePath = route.route.split('?')[0];
return matchPath(location.pathname, {
path: routePath,
exact: true,
});
});
const onChange = (activeRoute: string): void => {
@@ -38,14 +36,15 @@ function RouteTab({
onChangeHandler(activeRoute);
}
const selectedRoute = routesWithParams.find((e) => e.key === activeRoute);
const selectedRoute = routes.find((e) => e.key === activeRoute);
if (selectedRoute) {
history.push(selectedRoute.route);
const resolvedRoute = generatePath(selectedRoute.route, params);
history.push(resolvedRoute);
}
};
const items = routesWithParams.map(({ Component, name, route, key }) => ({
const items = routes.map(({ Component, name, route, key }) => ({
label: name,
key,
tabKey: route,

View File

@@ -46,4 +46,5 @@ export enum QueryParams {
msgSystem = 'msgSystem',
destination = 'destination',
kindString = 'kindString',
tab = 'tab',
}

View File

@@ -96,11 +96,41 @@ function HostsList(): JSX.Element {
};
}, [pageSize, currentPage, filters, minTime, maxTime, orderBy]);
const queryKey = useMemo(() => {
if (selectedHostName) {
return [
'hostList',
String(pageSize),
String(currentPage),
JSON.stringify(filters),
JSON.stringify(orderBy),
];
}
return [
'hostList',
String(pageSize),
String(currentPage),
JSON.stringify(filters),
JSON.stringify(orderBy),
String(minTime),
String(maxTime),
];
}, [
pageSize,
currentPage,
filters,
orderBy,
selectedHostName,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetHostList(
query as HostListPayload,
{
queryKey: ['hostList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
);
@@ -212,6 +242,7 @@ function HostsList(): JSX.Element {
<HostsListControls
filters={filters}
handleFiltersChange={handleFiltersChange}
showAutoRefresh={!selectedHostData}
/>
</div>
<HostsListTable

View File

@@ -11,9 +11,11 @@ import { DataSource } from 'types/common/queryBuilder';
function HostsListControls({
handleFiltersChange,
filters,
showAutoRefresh,
}: {
handleFiltersChange: (value: IBuilderQuery['filters']) => void;
filters: IBuilderQuery['filters'];
showAutoRefresh: boolean;
}): JSX.Element {
const currentQuery = initialQueriesMap[DataSource.METRICS];
const updatedCurrentQuery = useMemo(
@@ -58,7 +60,7 @@ function HostsListControls({
<div className="time-selector">
<DateTimeSelectionV2
showAutoRefresh
showAutoRefresh={showAutoRefresh}
showRefreshText={false}
hideShareModal
/>

View File

@@ -93,9 +93,13 @@ export default function HostsListTable({
const showHostsEmptyState =
!isFetching &&
!isLoading &&
formattedHostMetricsData.length === 0 &&
(!sentAnyHostMetricsData || isSendingIncorrectK8SAgentMetrics) &&
!filters.items.length;
const showTableLoadingState =
(isLoading || isFetching) && formattedHostMetricsData.length === 0;
if (isError) {
return <Typography>{data?.error || 'Something went wrong'}</Typography>;
}
@@ -127,7 +131,7 @@ export default function HostsListTable({
);
}
if (isLoading || isFetching) {
if (showTableLoadingState) {
return (
<div className="hosts-list-loading-state">
<Skeleton.Input
@@ -155,7 +159,7 @@ export default function HostsListTable({
return (
<Table
className="hosts-list-table"
dataSource={isLoading || isFetching ? [] : formattedHostMetricsData}
dataSource={showTableLoadingState ? [] : formattedHostMetricsData}
columns={columns}
pagination={{
current: currentPage,
@@ -170,7 +174,7 @@ export default function HostsListTable({
}}
scroll={{ x: true }}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
tableLayout="fixed"

View File

@@ -28,6 +28,7 @@ describe('HostsListControls', () => {
<HostsListControls
handleFiltersChange={mockHandleFiltersChange}
filters={mockFilters}
showAutoRefresh={false}
/>,
);

View File

@@ -59,13 +59,27 @@ describe('HostsListTable', () => {
setPageSize: mockSetPageSize,
} as any;
it('renders loading state if isLoading is true', () => {
const { container } = render(<HostsListTable {...mockProps} isLoading />);
it('renders loading state if isLoading is true and tableData is empty', () => {
const { container } = render(
<HostsListTable
{...mockProps}
isLoading
hostMetricsData={[]}
tableData={{ payload: { data: { hosts: [] } } }}
/>,
);
expect(container.querySelector('.hosts-list-loading-state')).toBeTruthy();
});
it('renders loading state if isFetching is true', () => {
const { container } = render(<HostsListTable {...mockProps} isFetching />);
it('renders loading state if isFetching is true and tableData is empty', () => {
const { container } = render(
<HostsListTable
{...mockProps}
isFetching
hostMetricsData={[]}
tableData={{ payload: { data: { hosts: [] } } }}
/>,
);
expect(container.querySelector('.hosts-list-loading-state')).toBeTruthy();
});
@@ -75,7 +89,17 @@ describe('HostsListTable', () => {
});
it('renders empty state if no hosts are found', () => {
const { container } = render(<HostsListTable {...mockProps} />);
const { container } = render(
<HostsListTable
{...mockProps}
hostMetricsData={[]}
tableData={{
payload: {
data: { hosts: [] },
},
}}
/>,
);
expect(container.querySelector(EMPTY_STATE_CONTAINER_CLASS)).toBeTruthy();
});
@@ -83,6 +107,7 @@ describe('HostsListTable', () => {
const { container } = render(
<HostsListTable
{...mockProps}
hostMetricsData={[]}
tableData={{
...mockTableData,
payload: {
@@ -90,6 +115,7 @@ describe('HostsListTable', () => {
data: {
...mockTableData.payload.data,
sentAnyHostMetricsData: false,
hosts: [],
},
},
}}
@@ -102,6 +128,7 @@ describe('HostsListTable', () => {
const { container } = render(
<HostsListTable
{...mockProps}
hostMetricsData={[]}
tableData={{
...mockTableData,
payload: {
@@ -109,6 +136,7 @@ describe('HostsListTable', () => {
data: {
...mockTableData.payload.data,
isSendingIncorrectK8SAgentMetrics: true,
hosts: [],
},
},
}}

View File

@@ -38,7 +38,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -85,8 +85,12 @@ function ClusterDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [searchParams, setSearchParams] = useSearchParams();
@@ -195,10 +199,11 @@ function ClusterDetails({
}, [initialFilters, initialEventsFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -226,6 +231,7 @@ function ClusterDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -462,6 +468,7 @@ function ClusterDetails({
};
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -51,8 +51,8 @@ export const getClusterMetricsQueryPayload = (
const getKey = (dotKey: string, underscoreKey: string): string =>
dotMetricsEnabled ? dotKey : underscoreKey;
const k8sPodCpuUtilizationKey = getKey(
'k8s.pod.cpu.utilization',
'k8s_pod_cpu_utilization',
'k8s.pod.cpu.usage',
'k8s_pod_cpu_usage',
);
const k8sNodeAllocatableCpuKey = getKey(
'k8s.node.allocatable_cpu',
@@ -146,7 +146,7 @@ export const getClusterMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilizationKey,
@@ -189,7 +189,7 @@ export const getClusterMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilizationKey,
@@ -232,7 +232,7 @@ export const getClusterMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilizationKey,

View File

@@ -189,6 +189,32 @@ function K8sClustersList({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
const groupedByRowDataQueryKey = useMemo(() => {
if (selectedClusterName) {
return [
'clusterList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
];
}
return [
'clusterList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
String(minTime),
String(maxTime),
];
}, [
queryFilters,
orderBy,
selectedClusterName,
minTime,
maxTime,
selectedRowData,
]);
const {
data: groupedByRowData,
isFetching: isFetchingGroupedByRowData,
@@ -198,7 +224,7 @@ function K8sClustersList({
} = useGetK8sClustersList(
fetchGroupedByRowDataQuery as K8sClustersListPayload,
{
queryKey: ['clusterList', fetchGroupedByRowDataQuery],
queryKey: groupedByRowDataQueryKey,
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
},
undefined,
@@ -254,11 +280,44 @@ function K8sClustersList({
return groupedByRowData?.payload?.data?.records || [];
}, [groupedByRowData, selectedRowData]);
const queryKey = useMemo(() => {
if (selectedClusterName) {
return [
'clusterList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
];
}
return [
'clusterList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
String(minTime),
String(maxTime),
];
}, [
selectedClusterName,
pageSize,
currentPage,
queryFilters,
orderBy,
groupBy,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetK8sClustersList(
query as K8sClustersListPayload,
{
queryKey: ['clusterList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
undefined,
dotMetricsEnabled,
@@ -583,6 +642,9 @@ function K8sClustersList({
});
};
const showTableLoadingState =
(isFetching || isLoading) && formattedClustersData.length === 0;
return (
<div className="k8s-list">
<K8sHeader
@@ -595,12 +657,13 @@ function K8sClustersList({
handleGroupByChange={handleGroupByChange}
selectedGroupBy={groupBy}
entity={K8sCategory.NODES}
showAutoRefresh={!selectedClusterData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
<Table
className="k8s-list-table clusters-list-table"
dataSource={isFetching || isLoading ? [] : formattedClustersData}
dataSource={showTableLoadingState ? [] : formattedClustersData}
columns={columns}
pagination={{
current: currentPage,
@@ -612,26 +675,25 @@ function K8sClustersList({
}}
scroll={{ x: true }}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText:
isFetching || isLoading ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
emptyText: showTableLoadingState ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
),
</div>
),
}}
tableLayout="fixed"
onChange={handleTableChange}

View File

@@ -33,7 +33,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -84,8 +84,12 @@ function DaemonSetDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [searchParams, setSearchParams] = useSearchParams();
@@ -211,10 +215,11 @@ function DaemonSetDetails({
}, [initialFilters, initialEventsFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -242,6 +247,7 @@ function DaemonSetDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -476,6 +482,7 @@ function DaemonSetDetails({
};
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -33,8 +33,8 @@ export const getDaemonSetMetricsQueryPayload = (
dotMetricsEnabled: boolean,
): GetQueryResultsProps[] => {
const k8sPodCpuUtilizationKey = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
? 'k8s.pod.cpu.usage'
: 'k8s_pod_cpu_usage';
const k8sContainerCpuRequestKey = dotMetricsEnabled
? 'k8s.container.cpu_request'
@@ -84,7 +84,7 @@ export const getDaemonSetMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilizationKey,

View File

@@ -191,6 +191,32 @@ function K8sDaemonSetsList({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
const groupedByRowDataQueryKey = useMemo(() => {
if (selectedDaemonSetUID) {
return [
'daemonSetList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
];
}
return [
'daemonSetList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
String(minTime),
String(maxTime),
];
}, [
queryFilters,
orderBy,
selectedDaemonSetUID,
minTime,
maxTime,
selectedRowData,
]);
const {
data: groupedByRowData,
isFetching: isFetchingGroupedByRowData,
@@ -200,7 +226,7 @@ function K8sDaemonSetsList({
} = useGetK8sDaemonSetsList(
fetchGroupedByRowDataQuery as K8sDaemonSetsListPayload,
{
queryKey: ['daemonSetList', fetchGroupedByRowDataQuery],
queryKey: groupedByRowDataQueryKey,
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
},
undefined,
@@ -251,11 +277,44 @@ function K8sDaemonSetsList({
[groupedByRowData, groupBy],
);
const queryKey = useMemo(() => {
if (selectedDaemonSetUID) {
return [
'daemonSetList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
];
}
return [
'daemonSetList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
String(minTime),
String(maxTime),
];
}, [
selectedDaemonSetUID,
pageSize,
currentPage,
queryFilters,
orderBy,
groupBy,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetK8sDaemonSetsList(
query as K8sDaemonSetsListPayload,
{
queryKey: ['daemonSetList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
undefined,
dotMetricsEnabled,
@@ -591,6 +650,9 @@ function K8sDaemonSetsList({
});
};
const showTableLoadingState =
(isFetching || isLoading) && formattedDaemonSetsData.length === 0;
return (
<div className="k8s-list">
<K8sHeader
@@ -603,6 +665,7 @@ function K8sDaemonSetsList({
handleGroupByChange={handleGroupByChange}
selectedGroupBy={groupBy}
entity={K8sCategory.DAEMONSETS}
showAutoRefresh={!selectedDaemonSetData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
@@ -610,7 +673,7 @@ function K8sDaemonSetsList({
className={classNames('k8s-list-table', 'daemonSets-list-table', {
'expanded-daemonsets-list-table': isGroupedByAttribute,
})}
dataSource={isFetching || isLoading ? [] : formattedDaemonSetsData}
dataSource={showTableLoadingState ? [] : formattedDaemonSetsData}
columns={columns}
pagination={{
current: currentPage,
@@ -622,26 +685,25 @@ function K8sDaemonSetsList({
}}
scroll={{ x: true }}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText:
isFetching || isLoading ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
emptyText: showTableLoadingState ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
),
</div>
),
}}
tableLayout="fixed"
onChange={handleTableChange}

View File

@@ -38,7 +38,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -88,8 +88,12 @@ function DeploymentDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [searchParams, setSearchParams] = useSearchParams();
@@ -215,10 +219,11 @@ function DeploymentDetails({
}, [initialFilters, initialEventsFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -246,6 +251,7 @@ function DeploymentDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -487,6 +493,7 @@ function DeploymentDetails({
};
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -33,8 +33,8 @@ export const getDeploymentMetricsQueryPayload = (
dotMetricsEnabled: boolean,
): GetQueryResultsProps[] => {
const k8sPodCpuUtilizationKey = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
? 'k8s.pod.cpu.usage'
: 'k8s_pod_cpu_usage';
const k8sContainerCpuRequestKey = dotMetricsEnabled
? 'k8s.container.cpu_request'
@@ -80,7 +80,7 @@ export const getDeploymentMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilizationKey,

View File

@@ -192,6 +192,32 @@ function K8sDeploymentsList({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
const groupedByRowDataQueryKey = useMemo(() => {
if (selectedDeploymentUID) {
return [
'deploymentList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
];
}
return [
'deploymentList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
String(minTime),
String(maxTime),
];
}, [
queryFilters,
orderBy,
selectedDeploymentUID,
minTime,
maxTime,
selectedRowData,
]);
const {
data: groupedByRowData,
isFetching: isFetchingGroupedByRowData,
@@ -201,7 +227,7 @@ function K8sDeploymentsList({
} = useGetK8sDeploymentsList(
fetchGroupedByRowDataQuery as K8sDeploymentsListPayload,
{
queryKey: ['deploymentList', fetchGroupedByRowDataQuery],
queryKey: groupedByRowDataQueryKey,
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
},
undefined,
@@ -252,11 +278,44 @@ function K8sDeploymentsList({
[groupedByRowData, groupBy],
);
const queryKey = useMemo(() => {
if (selectedDeploymentUID) {
return [
'deploymentList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
];
}
return [
'deploymentList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
String(minTime),
String(maxTime),
];
}, [
selectedDeploymentUID,
pageSize,
currentPage,
queryFilters,
orderBy,
groupBy,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetK8sDeploymentsList(
query as K8sDeploymentsListPayload,
{
queryKey: ['deploymentList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
undefined,
dotMetricsEnabled,
@@ -596,6 +655,9 @@ function K8sDeploymentsList({
});
};
const showTableLoadingState =
(isFetching || isLoading) && formattedDeploymentsData.length === 0;
return (
<div className="k8s-list">
<K8sHeader
@@ -608,6 +670,7 @@ function K8sDeploymentsList({
handleGroupByChange={handleGroupByChange}
selectedGroupBy={groupBy}
entity={K8sCategory.NODES}
showAutoRefresh={!selectedDeploymentData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
@@ -615,7 +678,7 @@ function K8sDeploymentsList({
className={classNames('k8s-list-table', 'deployments-list-table', {
'expanded-deployments-list-table': isGroupedByAttribute,
})}
dataSource={isFetching || isLoading ? [] : formattedDeploymentsData}
dataSource={showTableLoadingState ? [] : formattedDeploymentsData}
columns={columns}
pagination={{
current: currentPage,
@@ -627,26 +690,25 @@ function K8sDeploymentsList({
}}
scroll={{ x: true }}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText:
isFetching || isLoading ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
emptyText: showTableLoadingState ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
),
</div>
),
}}
tableLayout="fixed"
onChange={handleTableChange}

View File

@@ -270,7 +270,7 @@ export default function Events({
</div>
</div>
{isLoading && <LoadingContainer />}
{isLoading && formattedEntityEvents.length === 0 && <LoadingContainer />}
{!isLoading && !isError && formattedEntityEvents.length === 0 && (
<EntityDetailsEmptyContainer category={category} view="events" />

View File

@@ -24,12 +24,13 @@ import {
import { getUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions';
import { getUPlotChartData } from 'lib/uPlotLib/utils/getUplotChartData';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useQueries, UseQueryResult } from 'react-query';
import { QueryFunctionContext, useQueries, UseQueryResult } from 'react-query';
import { SuccessResponse } from 'types/api';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import { Options } from 'uplot';
import { FeatureKeys } from '../../../../constants/features';
import { useMultiIntersectionObserver } from '../../../../hooks/useMultiIntersectionObserver';
import { useAppContext } from '../../../../providers/App/App';
interface EntityMetricsProps<T> {
@@ -73,6 +74,12 @@ function EntityMetrics<T>({
const dotMetricsEnabled =
featureFlags?.find((flag) => flag.name === FeatureKeys.DOT_METRICS_ENABLED)
?.active || false;
const {
visibilities,
setElement,
} = useMultiIntersectionObserver(entityWidgetInfo.length, { threshold: 0.1 });
const queryPayloads = useMemo(
() =>
getEntityQueryPayload(
@@ -91,11 +98,15 @@ function EntityMetrics<T>({
);
const queries = useQueries(
queryPayloads.map((payload) => ({
queryPayloads.map((payload, index) => ({
queryKey: [queryKey, payload, ENTITY_VERSION_V4, category],
queryFn: (): Promise<SuccessResponse<MetricRangePayloadProps>> =>
GetMetricQueryRange(payload, ENTITY_VERSION_V4),
enabled: !!payload,
queryFn: ({
signal,
}: QueryFunctionContext): Promise<
SuccessResponse<MetricRangePayloadProps>
> => GetMetricQueryRange(payload, ENTITY_VERSION_V4, signal),
enabled: !!payload && visibilities[index],
keepPreviousData: true,
})),
);
@@ -186,7 +197,7 @@ function EntityMetrics<T>({
query: UseQueryResult<SuccessResponse<MetricRangePayloadProps>, unknown>,
idx: number,
): JSX.Element => {
if (query.isLoading) {
if ((!query.data && query.isLoading) || !visibilities[idx]) {
return <Skeleton />;
}
@@ -196,7 +207,7 @@ function EntityMetrics<T>({
return <div>{errorMessage}</div>;
}
const { panelType } = (query.data?.params as any).compositeQuery;
const panelType = (query.data?.params as any)?.compositeQuery?.panelType;
return (
<div
@@ -234,7 +245,7 @@ function EntityMetrics<T>({
</div>
<Row gutter={24} className="entity-metrics-container">
{queries.map((query, idx) => (
<Col span={12} key={entityWidgetInfo[idx].title}>
<Col ref={setElement(idx)} span={12} key={entityWidgetInfo[idx].title}>
<Typography.Text>{entityWidgetInfo[idx].title}</Typography.Text>
<Card bordered className="entity-metrics-card" ref={graphRef}>
{renderCardContent(query, idx)}

View File

@@ -203,7 +203,7 @@ function EntityTraces({
{!isError && traces.length > 0 && (
<div className="entity-traces-table">
<TraceExplorerControls
isLoading={isFetching}
isLoading={isFetching && traces.length === 0}
totalCount={totalCount}
perPageOptions={PER_PAGE_OPTIONS}
showSizeChanger={false}
@@ -212,7 +212,7 @@ function EntityTraces({
tableLayout="fixed"
pagination={false}
scroll={{ x: true }}
loading={isFetching}
loading={isFetching && traces.length === 0}
dataSource={traces}
columns={traceListColumns}
onRow={(): Record<string, unknown> => ({

View File

@@ -33,7 +33,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -81,8 +81,12 @@ function JobDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [searchParams, setSearchParams] = useSearchParams();
@@ -204,10 +208,11 @@ function JobDetails({
}, [initialFilters, initialEventsFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -235,6 +240,7 @@ function JobDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -469,6 +475,7 @@ function JobDetails({
};
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -33,8 +33,8 @@ export const getJobMetricsQueryPayload = (
dotMetricsEnabled: boolean,
): GetQueryResultsProps[] => {
const k8sPodCpuUtilizationKey = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
? 'k8s.pod.cpu.usage'
: 'k8s_pod_cpu_usage';
const k8sPodMemoryUsageKey = dotMetricsEnabled
? 'k8s.pod.memory.usage'
: 'k8s_pod_memory_usage';
@@ -59,7 +59,7 @@ export const getJobMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilizationKey,

View File

@@ -186,6 +186,25 @@ function K8sJobsList({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
const groupedByRowDataQueryKey = useMemo(() => {
if (selectedJobUID) {
return [
'jobList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
];
}
return [
'jobList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
String(minTime),
String(maxTime),
];
}, [queryFilters, orderBy, selectedJobUID, minTime, maxTime, selectedRowData]);
const {
data: groupedByRowData,
isFetching: isFetchingGroupedByRowData,
@@ -195,7 +214,7 @@ function K8sJobsList({
} = useGetK8sJobsList(
fetchGroupedByRowDataQuery as K8sJobsListPayload,
{
queryKey: ['jobList', fetchGroupedByRowDataQuery],
queryKey: groupedByRowDataQueryKey,
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
},
undefined,
@@ -251,11 +270,44 @@ function K8sJobsList({
return groupedByRowData?.payload?.data?.records || [];
}, [groupedByRowData, selectedRowData]);
const queryKey = useMemo(() => {
if (selectedJobUID) {
return [
'jobList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
];
}
return [
'jobList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
String(minTime),
String(maxTime),
];
}, [
selectedJobUID,
pageSize,
currentPage,
queryFilters,
orderBy,
groupBy,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetK8sJobsList(
query as K8sJobsListPayload,
{
queryKey: ['jobList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
undefined,
dotMetricsEnabled,
@@ -581,6 +633,7 @@ function K8sJobsList({
handleGroupByChange={handleGroupByChange}
selectedGroupBy={groupBy}
entity={K8sCategory.JOBS}
showAutoRefresh={!selectedJobData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}

View File

@@ -30,6 +30,7 @@ interface K8sHeaderProps {
handleFilterVisibilityChange: () => void;
isFiltersVisible: boolean;
entity: K8sCategory;
showAutoRefresh: boolean;
}
function K8sHeader({
@@ -46,6 +47,7 @@ function K8sHeader({
handleFilterVisibilityChange,
isFiltersVisible,
entity,
showAutoRefresh,
}: K8sHeaderProps): JSX.Element {
const [isFiltersSidePanelOpen, setIsFiltersSidePanelOpen] = useState(false);
const [searchParams, setSearchParams] = useSearchParams();
@@ -136,7 +138,7 @@ function K8sHeader({
<div className="k8s-list-controls-right">
<DateTimeSelectionV2
showAutoRefresh
showAutoRefresh={showAutoRefresh}
showRefreshText={false}
hideShareModal
/>

View File

@@ -190,6 +190,32 @@ function K8sNamespacesList({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
const groupedByRowDataQueryKey = useMemo(() => {
if (selectedNamespaceUID) {
return [
'namespaceList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
];
}
return [
'namespaceList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
String(minTime),
String(maxTime),
];
}, [
queryFilters,
orderBy,
selectedNamespaceUID,
minTime,
maxTime,
selectedRowData,
]);
const {
data: groupedByRowData,
isFetching: isFetchingGroupedByRowData,
@@ -199,7 +225,7 @@ function K8sNamespacesList({
} = useGetK8sNamespacesList(
fetchGroupedByRowDataQuery as K8sNamespacesListPayload,
{
queryKey: ['namespaceList', fetchGroupedByRowDataQuery],
queryKey: groupedByRowDataQueryKey,
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
},
undefined,
@@ -250,11 +276,44 @@ function K8sNamespacesList({
[groupedByRowData, groupBy],
);
const queryKey = useMemo(() => {
if (selectedNamespaceUID) {
return [
'namespaceList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
];
}
return [
'namespaceList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
String(minTime),
String(maxTime),
];
}, [
selectedNamespaceUID,
pageSize,
currentPage,
queryFilters,
orderBy,
groupBy,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetK8sNamespacesList(
query as K8sNamespacesListPayload,
{
queryKey: ['namespaceList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
undefined,
dotMetricsEnabled,
@@ -592,6 +651,9 @@ function K8sNamespacesList({
});
};
const showTableLoadingState =
(isFetching || isLoading) && formattedNamespacesData.length === 0;
return (
<div className="k8s-list">
<K8sHeader
@@ -604,12 +666,13 @@ function K8sNamespacesList({
handleGroupByChange={handleGroupByChange}
selectedGroupBy={groupBy}
entity={K8sCategory.NODES}
showAutoRefresh={!selectedNamespaceData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
<Table
className="k8s-list-table namespaces-list-table"
dataSource={isFetching || isLoading ? [] : formattedNamespacesData}
dataSource={showTableLoadingState ? [] : formattedNamespacesData}
columns={columns}
pagination={{
current: currentPage,
@@ -621,26 +684,25 @@ function K8sNamespacesList({
}}
scroll={{ x: true }}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText:
isFetching || isLoading ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
emptyText: showTableLoadingState ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
),
</div>
),
}}
tableLayout="fixed"
onChange={handleTableChange}

View File

@@ -35,7 +35,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -85,8 +85,12 @@ function NamespaceDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [searchParams, setSearchParams] = useSearchParams();
@@ -195,10 +199,11 @@ function NamespaceDetails({
}, [initialFilters, initialEventsFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -226,6 +231,7 @@ function NamespaceDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -461,6 +467,7 @@ function NamespaceDetails({
};
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -59,8 +59,8 @@ export const getNamespaceMetricsQueryPayload = (
const getKey = (dotKey: string, underscoreKey: string): string =>
dotMetricsEnabled ? dotKey : underscoreKey;
const k8sPodCpuUtilizationKey = getKey(
'k8s.pod.cpu.utilization',
'k8s_pod_cpu_utilization',
'k8s.pod.cpu.usage',
'k8s_pod_cpu_usage',
);
const k8sContainerCpuRequestKey = getKey(
'k8s.container.cpu_request',

View File

@@ -184,6 +184,32 @@ function K8sNodesList({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
const groupedByRowDataQueryKey = useMemo(() => {
if (selectedNodeUID) {
return [
'nodeList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
];
}
return [
'nodeList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
String(minTime),
String(maxTime),
];
}, [
queryFilters,
orderBy,
selectedNodeUID,
minTime,
maxTime,
selectedRowData,
]);
const {
data: groupedByRowData,
isFetching: isFetchingGroupedByRowData,
@@ -193,7 +219,7 @@ function K8sNodesList({
} = useGetK8sNodesList(
fetchGroupedByRowDataQuery as K8sNodesListPayload,
{
queryKey: ['nodeList', fetchGroupedByRowDataQuery],
queryKey: groupedByRowDataQueryKey,
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
},
undefined,
@@ -249,11 +275,44 @@ function K8sNodesList({
[groupedByRowData, groupBy],
);
const queryKey = useMemo(() => {
if (selectedNodeUID) {
return [
'nodeList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
];
}
return [
'nodeList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
String(minTime),
String(maxTime),
];
}, [
selectedNodeUID,
pageSize,
currentPage,
queryFilters,
orderBy,
groupBy,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetK8sNodesList(
query as K8sNodesListPayload,
{
queryKey: ['nodeList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
undefined,
dotMetricsEnabled,
@@ -571,6 +630,9 @@ function K8sNodesList({
});
};
const showTableLoadingState =
(isFetching || isLoading) && formattedNodesData.length === 0;
return (
<div className="k8s-list">
<K8sHeader
@@ -583,12 +645,13 @@ function K8sNodesList({
handleGroupByChange={handleGroupByChange}
selectedGroupBy={groupBy}
entity={K8sCategory.NODES}
showAutoRefresh={!selectedNodeData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
<Table
className="k8s-list-table nodes-list-table"
dataSource={isFetching || isLoading ? [] : formattedNodesData}
dataSource={showTableLoadingState ? [] : formattedNodesData}
columns={columns}
pagination={{
current: currentPage,
@@ -600,26 +663,25 @@ function K8sNodesList({
}}
scroll={{ x: true }}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText:
isFetching || isLoading ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
emptyText: showTableLoadingState ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
),
</div>
),
}}
tableLayout="fixed"
onChange={handleTableChange}

View File

@@ -38,7 +38,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -85,8 +85,12 @@ function NodeDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [searchParams, setSearchParams] = useSearchParams();
@@ -195,10 +199,11 @@ function NodeDetails({
}, [initialFilters, initialEventsFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -226,6 +231,7 @@ function NodeDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -464,6 +470,7 @@ function NodeDetails({
};
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -59,8 +59,8 @@ export const getNodeMetricsQueryPayload = (
const getKey = (dotKey: string, underscoreKey: string): string =>
dotMetricsEnabled ? dotKey : underscoreKey;
const k8sNodeCpuUtilizationKey = getKey(
'k8s.node.cpu.utilization',
'k8s_node_cpu_utilization',
'k8s.node.cpu.usage',
'k8s_node_cpu_usage',
);
const k8sNodeAllocatableCpuKey = getKey(
@@ -99,8 +99,8 @@ export const getNodeMetricsQueryPayload = (
);
const k8sPodCpuUtilizationKey = getKey(
'k8s.pod.cpu.utilization',
'k8s_pod_cpu_utilization',
'k8s.pod.cpu.usage',
'k8s_pod_cpu_usage',
);
const k8sPodMemoryUsageKey = getKey(
@@ -147,7 +147,7 @@ export const getNodeMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_node_cpu_utilization--float64--Gauge--true',
id: 'k8s_node_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sNodeCpuUtilizationKey,
@@ -276,7 +276,7 @@ export const getNodeMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_node_cpu_utilization--float64--Gauge--true',
id: 'k8s_node_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sNodeCpuUtilizationKey,
@@ -319,7 +319,7 @@ export const getNodeMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_node_cpu_utilization--float64--Gauge--true',
id: 'k8s_node_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sNodeCpuUtilizationKey,
@@ -729,7 +729,7 @@ export const getNodeMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_node_cpu_utilization--float64--Gauge--true',
id: 'k8s_node_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sNodeCpuUtilizationKey,
@@ -1079,7 +1079,7 @@ export const getNodeMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilizationKey,

View File

@@ -205,11 +205,44 @@ function K8sPodsList({
return queryPayload;
}, [pageSize, currentPage, queryFilters, minTime, maxTime, orderBy, groupBy]);
const queryKey = useMemo(() => {
if (selectedPodUID) {
return [
'podList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
];
}
return [
'podList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
String(minTime),
String(maxTime),
];
}, [
selectedPodUID,
pageSize,
currentPage,
queryFilters,
orderBy,
groupBy,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetK8sPodsList(
query as K8sPodsListPayload,
{
queryKey: ['hostList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
undefined,
dotMetricsEnabled,
@@ -261,6 +294,25 @@ function K8sPodsList({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [minTime, maxTime, orderBy, selectedRowData]);
const groupedByRowDataQueryKey = useMemo(() => {
if (selectedPodUID) {
return [
'podList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
];
}
return [
'podList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
String(minTime),
String(maxTime),
];
}, [queryFilters, orderBy, selectedPodUID, minTime, maxTime, selectedRowData]);
const {
data: groupedByRowData,
isFetching: isFetchingGroupedByRowData,
@@ -270,7 +322,7 @@ function K8sPodsList({
} = useGetK8sPodsList(
fetchGroupedByRowDataQuery as K8sPodsListPayload,
{
queryKey: ['hostList', fetchGroupedByRowDataQuery],
queryKey: groupedByRowDataQueryKey,
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
},
undefined,
@@ -629,6 +681,9 @@ function K8sPodsList({
});
};
const showTableLoadingState =
(isFetching || isLoading) && formattedPodsData.length === 0;
return (
<div className="k8s-list">
<K8sHeader
@@ -645,6 +700,7 @@ function K8sPodsList({
onAddColumn={handleAddColumn}
onRemoveColumn={handleRemoveColumn}
entity={K8sCategory.PODS}
showAutoRefresh={!selectedPodData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
@@ -652,7 +708,7 @@ function K8sPodsList({
className={classNames('k8s-list-table', {
'expanded-k8s-list-table': isGroupedByAttribute,
})}
dataSource={isFetching || isLoading ? [] : formattedPodsData}
dataSource={showTableLoadingState ? [] : formattedPodsData}
columns={columns}
pagination={{
current: currentPage,
@@ -663,26 +719,25 @@ function K8sPodsList({
onChange: onPaginationChange,
}}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText:
isFetching || isLoading ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
emptyText: showTableLoadingState ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
),
</div>
),
}}
scroll={{ x: true }}
tableLayout="fixed"

View File

@@ -39,7 +39,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -89,8 +89,12 @@ function PodDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [searchParams, setSearchParams] = useSearchParams();
@@ -212,10 +216,11 @@ function PodDetails({
}, [initialFilters, initialEventsFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / TimeRangeOffset),
@@ -243,6 +248,7 @@ function PodDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -485,6 +491,7 @@ function PodDetails({
};
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -72,10 +72,7 @@ export const getPodMetricsQueryPayload = (
dotMetricsEnabled ? dotKey : underscoreKey;
const k8sContainerNameKey = getKey('k8s.container.name', 'k8s_container_name');
const k8sPodCpuUtilKey = getKey(
'k8s.pod.cpu.utilization',
'k8s_pod_cpu_utilization',
);
const k8sPodCpuUtilKey = getKey('k8s.pod.cpu.usage', 'k8s_pod_cpu_usage');
const k8sPodCpuReqUtilKey = getKey(
'k8s.pod.cpu_request_utilization',
@@ -115,8 +112,8 @@ export const getPodMetricsQueryPayload = (
);
const containerCpuUtilKey = getKey(
'container.cpu.utilization',
'container_cpu_utilization',
'container.cpu.usage',
'container_cpu_usage',
);
const k8sContainerCpuRequestKey = getKey(
@@ -189,7 +186,7 @@ export const getPodMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilKey,
@@ -245,7 +242,7 @@ export const getPodMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilKey,
@@ -301,7 +298,7 @@ export const getPodMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'k8s_pod_cpu_utilization--float64--Gauge--true',
id: 'k8s_pod_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: k8sPodCpuUtilKey,
@@ -1570,7 +1567,7 @@ export const getPodMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'container_cpu_utilization--float64--Gauge--true',
id: 'container_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: containerCpuUtilKey,
@@ -1668,7 +1665,7 @@ export const getPodMetricsQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'container_cpu_utilization--float64--Gauge--true',
id: 'container_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: containerCpuUtilKey,

View File

@@ -191,6 +191,32 @@ function K8sStatefulSetsList({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
const groupedByRowDataQueryKey = useMemo(() => {
if (selectedStatefulSetUID) {
return [
'statefulSetList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
];
}
return [
'statefulSetList',
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(selectedRowData),
String(minTime),
String(maxTime),
];
}, [
queryFilters,
orderBy,
selectedStatefulSetUID,
minTime,
maxTime,
selectedRowData,
]);
const {
data: groupedByRowData,
isFetching: isFetchingGroupedByRowData,
@@ -200,7 +226,7 @@ function K8sStatefulSetsList({
} = useGetK8sStatefulSetsList(
fetchGroupedByRowDataQuery as K8sStatefulSetsListPayload,
{
queryKey: ['statefulSetList', fetchGroupedByRowDataQuery],
queryKey: groupedByRowDataQueryKey,
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
},
undefined,
@@ -256,11 +282,44 @@ function K8sStatefulSetsList({
return groupedByRowData?.payload?.data?.records || [];
}, [groupedByRowData, selectedRowData]);
const queryKey = useMemo(() => {
if (selectedStatefulSetUID) {
return [
'statefulSetList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
];
}
return [
'statefulSetList',
String(pageSize),
String(currentPage),
JSON.stringify(queryFilters),
JSON.stringify(orderBy),
JSON.stringify(groupBy),
String(minTime),
String(maxTime),
];
}, [
selectedStatefulSetUID,
pageSize,
currentPage,
queryFilters,
orderBy,
groupBy,
minTime,
maxTime,
]);
const { data, isFetching, isLoading, isError } = useGetK8sStatefulSetsList(
query as K8sStatefulSetsListPayload,
{
queryKey: ['statefulSetList', query],
queryKey,
enabled: !!query,
keepPreviousData: true,
},
undefined,
dotMetricsEnabled,
@@ -592,6 +651,9 @@ function K8sStatefulSetsList({
});
};
const showTableLoadingState =
(isFetching || isLoading) && formattedStatefulSetsData.length === 0;
return (
<div className="k8s-list">
<K8sHeader
@@ -604,6 +666,7 @@ function K8sStatefulSetsList({
handleGroupByChange={handleGroupByChange}
selectedGroupBy={groupBy}
entity={K8sCategory.STATEFULSETS}
showAutoRefresh={!selectedStatefulSetData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
@@ -611,7 +674,7 @@ function K8sStatefulSetsList({
className={classNames('k8s-list-table', 'statefulSets-list-table', {
'expanded-statefulsets-list-table': isGroupedByAttribute,
})}
dataSource={isFetching || isLoading ? [] : formattedStatefulSetsData}
dataSource={showTableLoadingState ? [] : formattedStatefulSetsData}
columns={columns}
pagination={{
current: currentPage,
@@ -623,26 +686,25 @@ function K8sStatefulSetsList({
}}
scroll={{ x: true }}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText:
isFetching || isLoading ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
emptyText: showTableLoadingState ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
),
</div>
),
}}
tableLayout="fixed"
onChange={handleTableChange}

View File

@@ -38,7 +38,7 @@ import {
ScrollText,
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { useSearchParams } from 'react-router-dom-v5-compat';
import { AppState } from 'store/reducers';
@@ -84,8 +84,12 @@ function StatefulSetDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const [searchParams, setSearchParams] = useSearchParams();
@@ -211,10 +215,11 @@ function StatefulSetDetails({
}, [initialFilters, initialEventsFilters]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -242,6 +247,7 @@ function StatefulSetDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -477,6 +483,7 @@ function StatefulSetDetails({
};
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -49,8 +49,8 @@ export const getStatefulSetMetricsQueryPayload = (
const k8sPodNameKey = dotMetricsEnabled ? 'k8s.pod.name' : 'k8s_pod_name';
const k8sPodCpuUtilKey = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
? 'k8s.pod.cpu.usage'
: 'k8s_pod_cpu_usage';
const k8sContainerCpuRequestKey = dotMetricsEnabled
? 'k8s.container.cpu_request'
: 'k8s_container_cpu_request';

View File

@@ -574,6 +574,9 @@ function K8sVolumesList({
});
};
const showTableLoadingState =
(isFetching || isLoading) && formattedVolumesData.length === 0;
return (
<div className="k8s-list">
<K8sHeader
@@ -586,6 +589,7 @@ function K8sVolumesList({
handleGroupByChange={handleGroupByChange}
selectedGroupBy={groupBy}
entity={K8sCategory.NODES}
showAutoRefresh={!selectedVolumeData}
/>
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
@@ -593,7 +597,7 @@ function K8sVolumesList({
className={classNames('k8s-list-table', 'volumes-list-table', {
'expanded-volumes-list-table': isGroupedByAttribute,
})}
dataSource={isFetching || isLoading ? [] : formattedVolumesData}
dataSource={showTableLoadingState ? [] : formattedVolumesData}
columns={columns}
pagination={{
current: currentPage,
@@ -605,26 +609,25 @@ function K8sVolumesList({
}}
scroll={{ x: true }}
loading={{
spinning: isFetching || isLoading,
spinning: showTableLoadingState,
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText:
isFetching || isLoading ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
emptyText: showTableLoadingState ? null : (
<div className="no-filtered-hosts-message-container">
<div className="no-filtered-hosts-message-content">
<img
src="/Icons/emptyState.svg"
alt="thinking-emoji"
className="empty-state-svg"
/>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
<Typography.Text className="no-filtered-hosts-message">
This query had no results. Edit your query and try again!
</Typography.Text>
</div>
),
</div>
),
}}
tableLayout="fixed"
onChange={handleTableChange}

View File

@@ -13,7 +13,7 @@ import {
import { useIsDarkMode } from 'hooks/useDarkMode';
import GetMinMax from 'lib/getMinMax';
import { X } from 'lucide-react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
import { GlobalReducer } from 'types/reducer/globalTime';
@@ -44,8 +44,12 @@ function VolumeDetails({
endTime: endMs,
}));
const lastSelectedInterval = useRef<Time | null>(null);
const [selectedInterval, setSelectedInterval] = useState<Time>(
selectedTime as Time,
lastSelectedInterval.current
? lastSelectedInterval.current
: (selectedTime as Time),
);
const isDarkMode = useIsDarkMode();
@@ -62,10 +66,11 @@ function VolumeDetails({
}, [volume]);
useEffect(() => {
setSelectedInterval(selectedTime as Time);
const currentSelectedInterval = lastSelectedInterval.current || selectedTime;
setSelectedInterval(currentSelectedInterval as Time);
if (selectedTime !== 'custom') {
const { maxTime, minTime } = GetMinMax(selectedTime);
if (currentSelectedInterval !== 'custom') {
const { maxTime, minTime } = GetMinMax(currentSelectedInterval);
setModalTimeRange({
startTime: Math.floor(minTime / 1000000000),
@@ -76,6 +81,7 @@ function VolumeDetails({
const handleTimeChange = useCallback(
(interval: Time | CustomTimeType, dateTimeRange?: [number, number]): void => {
lastSelectedInterval.current = interval as Time;
setSelectedInterval(interval as Time);
if (interval === 'custom' && dateTimeRange) {
@@ -104,6 +110,7 @@ function VolumeDetails({
);
const handleClose = (): void => {
lastSelectedInterval.current = null;
setSelectedInterval(selectedTime as Time);
if (selectedTime !== 'custom') {

View File

@@ -38,28 +38,28 @@ export const K8sCategories = {
export const underscoreMap = {
[K8sCategory.HOSTS]: 'system_cpu_load_average_15m',
[K8sCategory.PODS]: 'k8s_pod_cpu_utilization',
[K8sCategory.NODES]: 'k8s_node_cpu_utilization',
[K8sCategory.NAMESPACES]: 'k8s_pod_cpu_utilization',
[K8sCategory.CLUSTERS]: 'k8s_node_cpu_utilization',
[K8sCategory.DEPLOYMENTS]: 'k8s_pod_cpu_utilization',
[K8sCategory.STATEFULSETS]: 'k8s_pod_cpu_utilization',
[K8sCategory.DAEMONSETS]: 'k8s_pod_cpu_utilization',
[K8sCategory.CONTAINERS]: 'k8s_pod_cpu_utilization',
[K8sCategory.PODS]: 'k8s_pod_cpu_usage',
[K8sCategory.NODES]: 'k8s_node_cpu_usage',
[K8sCategory.NAMESPACES]: 'k8s_pod_cpu_usage',
[K8sCategory.CLUSTERS]: 'k8s_node_cpu_usage',
[K8sCategory.DEPLOYMENTS]: 'k8s_pod_cpu_usage',
[K8sCategory.STATEFULSETS]: 'k8s_pod_cpu_usage',
[K8sCategory.DAEMONSETS]: 'k8s_pod_cpu_usage',
[K8sCategory.CONTAINERS]: 'k8s_pod_cpu_usage',
[K8sCategory.JOBS]: 'k8s_job_desired_successful_pods',
[K8sCategory.VOLUMES]: 'k8s_volume_capacity',
};
export const dotMap = {
[K8sCategory.HOSTS]: 'system.cpu.load_average.15m',
[K8sCategory.PODS]: 'k8s.pod.cpu.utilization',
[K8sCategory.NODES]: 'k8s.node.cpu.utilization',
[K8sCategory.NAMESPACES]: 'k8s.pod.cpu.utilization',
[K8sCategory.CLUSTERS]: 'k8s.node.cpu.utilization',
[K8sCategory.DEPLOYMENTS]: 'k8s.pod.cpu.utilization',
[K8sCategory.STATEFULSETS]: 'k8s.pod.cpu.utilization',
[K8sCategory.DAEMONSETS]: 'k8s.pod.cpu.utilization',
[K8sCategory.CONTAINERS]: 'k8s.pod.cpu.utilization',
[K8sCategory.PODS]: 'k8s.pod.cpu.usage',
[K8sCategory.NODES]: 'k8s.node.cpu.usage',
[K8sCategory.NAMESPACES]: 'k8s.pod.cpu.usage',
[K8sCategory.CLUSTERS]: 'k8s.node.cpu.usage',
[K8sCategory.DEPLOYMENTS]: 'k8s.pod.cpu.usage',
[K8sCategory.STATEFULSETS]: 'k8s.pod.cpu.usage',
[K8sCategory.DAEMONSETS]: 'k8s.pod.cpu.usage',
[K8sCategory.CONTAINERS]: 'k8s.pod.cpu.usage',
[K8sCategory.JOBS]: 'k8s.job.desired_successful_pods',
[K8sCategory.VOLUMES]: 'k8s.volume.capacity',
};
@@ -96,8 +96,8 @@ export function GetPodsQuickFiltersConfig(
// Define aggregate attribute (metric) name
const cpuUtilizationMetric = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
? 'k8s.pod.cpu.usage'
: 'k8s_pod_cpu_usage';
return [
{
@@ -252,8 +252,8 @@ export function GetNodesQuickFiltersConfig(
// Define aggregate metric name for node CPU utilization
const cpuUtilMetric = dotMetricsEnabled
? 'k8s.node.cpu.utilization'
: 'k8s_node_cpu_utilization';
? 'k8s.node.cpu.usage'
: 'k8s_node_cpu_usage';
const environmentKey = dotMetricsEnabled
? 'deployment.environment'
: 'deployment_environment';
@@ -314,8 +314,8 @@ export function GetNamespaceQuickFiltersConfig(
: 'k8s_namespace_name';
const clusterKey = dotMetricsEnabled ? 'k8s.cluster.name' : 'k8s_cluster_name';
const cpuUtilMetric = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
? 'k8s.pod.cpu.usage'
: 'k8s_pod_cpu_usage';
const environmentKey = dotMetricsEnabled
? 'deployment.environment'
: 'deployment_environment';
@@ -373,8 +373,8 @@ export function GetClustersQuickFiltersConfig(
): IQuickFiltersConfig[] {
const clusterKey = dotMetricsEnabled ? 'k8s.cluster.name' : 'k8s_cluster_name';
const cpuUtilMetric = dotMetricsEnabled
? 'k8s.node.cpu.utilization'
: 'k8s_node_cpu_utilization';
? 'k8s.node.cpu.usage'
: 'k8s_node_cpu_usage';
const environmentKey = dotMetricsEnabled
? 'deployment.environment'
: 'deployment_environment';
@@ -541,9 +541,7 @@ export function GetDeploymentsQuickFiltersConfig(
? 'k8s.namespace.name'
: 'k8s_namespace_name';
const clusterKey = dotMetricsEnabled ? 'k8s.cluster.name' : 'k8s_cluster_name';
const metric = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
const metric = dotMetricsEnabled ? 'k8s.pod.cpu.usage' : 'k8s_pod_cpu_usage';
const environmentKey = dotMetricsEnabled
? 'deployment.environment'
: 'deployment_environment';
@@ -622,9 +620,7 @@ export function GetStatefulsetsQuickFiltersConfig(
? 'k8s.namespace.name'
: 'k8s_namespace_name';
const clusterKey = dotMetricsEnabled ? 'k8s.cluster.name' : 'k8s_cluster_name';
const metric = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
const metric = dotMetricsEnabled ? 'k8s.pod.cpu.usage' : 'k8s_pod_cpu_usage';
const environmentKey = dotMetricsEnabled
? 'deployment.environment'
: 'deployment_environment';
@@ -704,8 +700,8 @@ export function GetDaemonsetsQuickFiltersConfig(
: 'k8s_namespace_name';
const clusterKey = dotMetricsEnabled ? 'k8s.cluster.name' : 'k8s_cluster_name';
const metricName = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
? 'k8s.pod.cpu.usage'
: 'k8s_pod_cpu_usage';
const environmentKey = dotMetricsEnabled
? 'deployment.environment'
: 'deployment_environment';
@@ -781,8 +777,8 @@ export function GetJobsQuickFiltersConfig(
: 'k8s_namespace_name';
const clusterKey = dotMetricsEnabled ? 'k8s.cluster.name' : 'k8s_cluster_name';
const metricName = dotMetricsEnabled
? 'k8s.pod.cpu.utilization'
: 'k8s_pod_cpu_utilization';
? 'k8s.pod.cpu.usage'
: 'k8s_pod_cpu_usage';
const environmentKey = dotMetricsEnabled
? 'deployment.environment'
: 'deployment_environment';

View File

@@ -38,6 +38,7 @@ import dayjs from 'dayjs';
import { useGetDeploymentsData } from 'hooks/CustomDomain/useGetDeploymentsData';
import { useGetAllIngestionsKeys } from 'hooks/IngestionKeys/useGetAllIngestionKeys';
import useDebouncedFn from 'hooks/useDebouncedFunction';
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
import { useNotifications } from 'hooks/useNotifications';
import { isNil, isUndefined } from 'lodash-es';
import {
@@ -167,6 +168,8 @@ function MultiIngestionSettings(): JSX.Element {
const [totalIngestionKeys, setTotalIngestionKeys] = useState(0);
const { isEnterpriseSelfHostedUser } = useGetTenantLicense();
const [
hasCreateLimitForIngestionKeyError,
setHasCreateLimitForIngestionKeyError,
@@ -293,7 +296,7 @@ function MultiIngestionSettings(): JSX.Element {
isLoading: isLoadingDeploymentsData,
isFetching: isFetchingDeploymentsData,
isError: isErrorDeploymentsData,
} = useGetDeploymentsData(true);
} = useGetDeploymentsData(!isEnterpriseSelfHostedUser);
const {
mutate: createIngestionKey,
@@ -1308,7 +1311,8 @@ function MultiIngestionSettings(): JSX.Element {
{!isErrorDeploymentsData &&
!isLoadingDeploymentsData &&
!isFetchingDeploymentsData && (
!isFetchingDeploymentsData &&
deploymentsData && (
<div className="ingestion-setup-details-links">
<div className="ingestion-key-url-container">
<div className="ingestion-key-url-label">Ingestion URL</div>

View File

@@ -1,6 +1,9 @@
import styled from 'styled-components';
export const TitleWrapper = styled.span`
user-select: text !important;
cursor: text;
.hover-reveal {
visibility: hidden;
}

View File

@@ -71,8 +71,13 @@ function BodyTitleRenderer({
onClick: onClickHandler,
};
const handleTextSelection = (e: React.MouseEvent): void => {
// Prevent tree node click when user is trying to select text
e.stopPropagation();
};
return (
<TitleWrapper>
<TitleWrapper onMouseDown={handleTextSelection}>
<Dropdown menu={menu} trigger={['click']}>
<SettingOutlined style={{ marginRight: 8 }} className="hover-reveal" />
</Dropdown>

View File

@@ -17,8 +17,8 @@ export const getPodQueryPayload = (
: 'k8s_cluster_name';
const k8sPodNameKey = dotMetricsEnabled ? 'k8s.pod.name' : 'k8s_pod_name';
const containerCpuUtilKey = dotMetricsEnabled
? 'container.cpu.utilization'
: 'container_cpu_utilization';
? 'container.cpu.usage'
: 'container_cpu_usage';
const containerMemUsageKey = dotMetricsEnabled
? 'container.memory.usage'
: 'container_memory_usage';
@@ -63,7 +63,7 @@ export const getPodQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'container_cpu_utilization--float64--Gauge--true',
id: 'container_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: containerCpuUtilKey,
@@ -231,7 +231,7 @@ export const getPodQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'container_cpu_utilization--float64--Gauge--true',
id: 'container_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: containerCpuUtilKey,
@@ -385,7 +385,7 @@ export const getPodQueryPayload = (
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'container_cpu_utilization--float64--Gauge--true',
id: 'container_cpu_usage--float64--Gauge--true',
isColumn: true,
isJSON: false,
key: containerCpuUtilKey,

View File

@@ -11,6 +11,18 @@
}
}
.selectable-tree {
.ant-tree-node-content-wrapper {
user-select: text !important;
cursor: text !important;
}
.ant-tree-title {
user-select: text !important;
cursor: text !important;
}
}
.table-view-actions-content {
.ant-popover-inner {
border-radius: 4px;

View File

@@ -53,7 +53,12 @@ const convert = new Convert();
// Memoized Tree Component
const MemoizedTree = React.memo<{ treeData: any[] }>(({ treeData }) => (
<Tree defaultExpandAll showLine treeData={treeData} />
<Tree
defaultExpandAll
showLine
treeData={treeData}
className="selectable-tree"
/>
));
MemoizedTree.displayName = 'MemoizedTree';

View File

@@ -3,9 +3,11 @@ import { useGetMetricMeta } from 'hooks/apDex/useGetMetricMeta';
import useErrorNotification from 'hooks/useErrorNotification';
import { useParams } from 'react-router-dom';
import { FeatureKeys } from '../../../../../constants/features';
import { useAppContext } from '../../../../../providers/App/App';
import { WidgetKeys } from '../../../constant';
import { IServiceName } from '../../types';
import ApDexMetrics from './ApDexMetrics';
import { metricMeta } from './constants';
import { ApDexDataSwitcherProps } from './types';
function ApDexMetricsApplication({
@@ -18,7 +20,19 @@ function ApDexMetricsApplication({
const { servicename: encodedServiceName } = useParams<IServiceName>();
const servicename = decodeURIComponent(encodedServiceName);
const { data, isLoading, error } = useGetMetricMeta(metricMeta, servicename);
const { featureFlags } = useAppContext();
const dotMetricsEnabled =
featureFlags?.find((flag) => flag.name === FeatureKeys.DOT_METRICS_ENABLED)
?.active || false;
const signozLatencyBucketMetrics = dotMetricsEnabled
? WidgetKeys.Signoz_latency_bucket
: WidgetKeys.Signoz_latency_bucket_norm;
const { data, isLoading, error } = useGetMetricMeta(
signozLatencyBucketMetrics,
servicename,
);
useErrorNotification(error);
if (isLoading) {

View File

@@ -1 +0,0 @@
export const metricMeta = 'signoz_latency_bucket';

View File

@@ -75,15 +75,14 @@ function Summary(): JSX.Element {
useEffect(() => {
logEvent(MetricsExplorerEvents.TabChanged, {
[MetricsExplorerEventKeys.Tab]: 'summary',
[MetricsExplorerEventKeys.TimeRange]: {
startTime: convertNanoToMilliseconds(minTime),
endTime: convertNanoToMilliseconds(maxTime),
},
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
logEvent(MetricsExplorerEvents.TimeUpdated, {
[MetricsExplorerEventKeys.Tab]: 'summary',
});
}, [maxTime, minTime]);
// This is used to avoid the filters from being serialized with the id
const queryFiltersWithoutId = useMemo(() => {
const filtersWithoutId = {
@@ -168,9 +167,11 @@ function Summary(): JSX.Element {
[SUMMARY_FILTERS_KEY]: JSON.stringify(value),
});
setCurrentPage(1);
logEvent(MetricsExplorerEvents.FilterApplied, {
[MetricsExplorerEventKeys.Tab]: 'summary',
});
if (value.items.length > 0) {
logEvent(MetricsExplorerEvents.FilterApplied, {
[MetricsExplorerEventKeys.Tab]: 'summary',
});
}
},
[setSearchParams, searchParams],
);

View File

@@ -6,7 +6,6 @@ export enum MetricsExplorerEvents {
ModalOpened = 'Metrics Explorer: Modal opened',
MetricClicked = 'Metrics Explorer: Metric clicked',
FilterApplied = 'Metrics Explorer: Filter applied',
TimeUpdated = 'Metrics Explorer: Time updated',
TreemapViewChanged = 'Metrics Explorer: Treemap view changed',
PageNumberChanged = 'Metrics Explorer: Page number changed',
PageSizeChanged = 'Metrics Explorer: Page size changed',
@@ -48,4 +47,5 @@ export enum MetricsExplorerEventKeys {
YAxisUnit = 'yAxisUnit',
ViewName = 'viewName',
Filters = 'filters',
TimeRange = 'timeRange',
}

View File

@@ -215,6 +215,27 @@ export function SpanDuration({
setHasActionButtons(false);
};
// Calculate text positioning to handle overflow cases
const textStyle = useMemo(() => {
const spanRightEdge = leftOffset + width;
const textWidthApprox = 8; // Approximate text width in percentage
// If span would cause text overflow, right-align text to span end
if (leftOffset > 100 - textWidthApprox) {
return {
right: `${100 - spanRightEdge}%`,
color,
textAlign: 'right' as const,
};
}
// Default: left-align text to span start
return {
left: `${leftOffset}%`,
color,
};
}, [leftOffset, width, color]);
return (
<div
className={cx(
@@ -270,7 +291,7 @@ export function SpanDuration({
<Typography.Text
className="span-line-text"
ellipsis
style={{ left: `${leftOffset}%`, color }}
style={textStyle}
>{`${toFixed(time, 2)} ${timeUnitName}`}</Typography.Text>
</Tooltip>
</div>
@@ -311,6 +332,16 @@ function getWaterfallColumns({
/>
),
size: 450,
/**
* Note: The TanStack table currently does not support percentage-based column sizing.
* Therefore, we specify both `minSize` and `maxSize` for the "span-name" column to ensure
* that its width remains between 240px and 900px. Setting a `maxSize` here is important
* because the "span-duration" column has column resizing disabled, making it difficult
* to enforce a minimum width for that column. By constraining the "span-name" column,
* we indirectly control the minimum width available for the "span-duration" column.
*/
minSize: 240,
maxSize: 900,
}),
columnDefHelper.display({
id: 'span-duration',

View File

@@ -0,0 +1,54 @@
import { useCallback, useEffect, useRef, useState } from 'react';
type SetElement = (el: Element | null) => void;
// To manage intersection observers for multiple items
export function useMultiIntersectionObserver(
itemCount: number,
options: IntersectionObserverInit = { threshold: 0.1 },
): {
visibilities: boolean[];
setElement: (index: number) => SetElement;
} {
const elementsRef = useRef<(Element | null)[]>([]);
const [everVisibles, setEverVisibles] = useState<boolean[]>(
new Array(itemCount).fill(false),
);
const setElement = useCallback<(index: number) => SetElement>(
(index) => (el): void => {
elementsRef.current[index] = el;
},
[],
);
useEffect(() => {
if (!elementsRef.current.length) return;
const observer = new IntersectionObserver((entries) => {
setEverVisibles((prev) => {
const newVis = [...prev];
let changed = false;
entries.forEach((entry) => {
const idx = elementsRef.current.indexOf(entry.target);
if (idx !== -1 && entry.isIntersecting && !newVis[idx]) {
newVis[idx] = true;
changed = true;
}
});
return changed ? newVis : prev;
});
}, options);
elementsRef.current.forEach((el) => {
if (el) observer.observe(el);
});
return (): void => {
observer.disconnect();
};
}, [itemCount, options]);
return { visibilities: everVisibles, setElement };
}

View File

@@ -1,3 +1,26 @@
.service-route-tab {
margin-bottom: 64px;
.ant-tabs-nav {
&::before {
border-bottom: 1px solid var(--bg-slate-400);
}
.ant-tabs-nav-wrap {
.ant-tabs-nav-list {
.ant-tabs-ink-bar {
background-color: var(--bg-robin-500) !important;
}
.ant-tabs-tab {
font-size: 13px;
font-family: 'Inter';
color: var(--text-vanilla-100);
line-height: 20px;
letter-spacing: -0.07px;
gap: 10px;
}
.ant-tabs-tab.ant-tabs-tab-active .ant-tabs-tab-btn {
color: var(--bg-robin-500);
}
}
}
}
}

View File

@@ -1,15 +1,14 @@
import './MetricsApplication.styles.scss';
import RouteTab from 'components/RouteTab';
import ROUTES from 'constants/routes';
import { Tabs, TabsProps } from 'antd/lib';
import { QueryParams } from 'constants/query';
import DBCall from 'container/MetricsApplication/Tabs/DBCall';
import External from 'container/MetricsApplication/Tabs/External';
import Overview from 'container/MetricsApplication/Tabs/Overview';
import ResourceAttributesFilter from 'container/ResourceAttributesFilter';
import { useSafeNavigate } from 'hooks/useSafeNavigate';
import useUrlQuery from 'hooks/useUrlQuery';
import history from 'lib/history';
import { useCallback, useMemo } from 'react';
import { generatePath, useParams } from 'react-router-dom';
import { useParams } from 'react-router-dom';
import ApDexApplication from './ApDex/ApDexApplication';
import { MetricsApplicationTab, TAB_KEY_VS_LABEL } from './types';
@@ -25,50 +24,41 @@ function MetricsApplication(): JSX.Element {
const activeKey = useMetricsApplicationTabKey();
const urlQuery = useUrlQuery();
const { safeNavigate } = useSafeNavigate();
const getRouteUrl = useCallback(
(tab: MetricsApplicationTab): string => {
urlQuery.set('tab', tab);
return `${generatePath(ROUTES.SERVICE_METRICS, {
servicename,
})}?${urlQuery.toString()}`;
const items: TabsProps['items'] = [
{
label: TAB_KEY_VS_LABEL[MetricsApplicationTab.OVER_METRICS],
key: MetricsApplicationTab.OVER_METRICS,
children: <Overview />,
},
[servicename, urlQuery],
);
{
label: TAB_KEY_VS_LABEL[MetricsApplicationTab.DB_CALL_METRICS],
key: MetricsApplicationTab.DB_CALL_METRICS,
children: <DBCall />,
},
{
label: TAB_KEY_VS_LABEL[MetricsApplicationTab.EXTERNAL_METRICS],
key: MetricsApplicationTab.EXTERNAL_METRICS,
children: <External />,
},
];
const routes = useMemo(
() => [
{
Component: Overview,
name: TAB_KEY_VS_LABEL[MetricsApplicationTab.OVER_METRICS],
route: getRouteUrl(MetricsApplicationTab.OVER_METRICS),
key: MetricsApplicationTab.OVER_METRICS,
},
{
Component: DBCall,
name: TAB_KEY_VS_LABEL[MetricsApplicationTab.DB_CALL_METRICS],
route: getRouteUrl(MetricsApplicationTab.DB_CALL_METRICS),
key: MetricsApplicationTab.DB_CALL_METRICS,
},
{
Component: External,
name: TAB_KEY_VS_LABEL[MetricsApplicationTab.EXTERNAL_METRICS],
route: getRouteUrl(MetricsApplicationTab.EXTERNAL_METRICS),
key: MetricsApplicationTab.EXTERNAL_METRICS,
},
],
[getRouteUrl],
);
const onTabChange = (tab: string): void => {
urlQuery.set(QueryParams.tab, tab);
safeNavigate(`/services/${servicename}?${urlQuery.toString()}`);
};
return (
<div className="metrics-application-container">
<ResourceAttributesFilter />
<ApDexApplication />
<RouteTab
routes={routes}
history={history}
<Tabs
items={items}
activeKey={activeKey}
className="service-route-tab"
destroyInactiveTabPane
onChange={onTabChange}
/>
</div>
);

View File

@@ -110,7 +110,7 @@ function SettingsPage(): JSX.Element {
item.key === ROUTES.INTEGRATIONS ||
item.key === ROUTES.API_KEYS ||
item.key === ROUTES.ORG_SETTINGS ||
item.key === ROUTES.SHORTCUTS
item.key === ROUTES.INGESTION_SETTINGS
? true
: item.isEnabled,
}));
@@ -120,7 +120,11 @@ function SettingsPage(): JSX.Element {
// eslint-disable-next-line sonarjs/no-identical-functions
updatedItems = updatedItems.map((item) => ({
...item,
isEnabled: item.key === ROUTES.INTEGRATIONS ? true : item.isEnabled,
isEnabled:
item.key === ROUTES.INTEGRATIONS ||
item.key === ROUTES.INGESTION_SETTINGS
? true
: item.isEnabled,
}));
}
}
@@ -130,9 +134,7 @@ function SettingsPage(): JSX.Element {
updatedItems = updatedItems.map((item) => ({
...item,
isEnabled:
item.key === ROUTES.API_KEYS ||
item.key === ROUTES.ORG_SETTINGS ||
item.key === ROUTES.SHORTCUTS
item.key === ROUTES.API_KEYS || item.key === ROUTES.ORG_SETTINGS
? true
: item.isEnabled,
}));

View File

@@ -119,24 +119,8 @@ const config = {
],
},
{
test: /\.(png|jpe?g|gif|svg)$/i,
use: [
{
loader: 'file-loader',
},
{
loader: 'image-webpack-loader',
options: {
bypassOnDebug: true,
optipng: {
optimizationLevel: 7,
},
gifsicle: {
interlaced: false,
},
},
},
],
test: /\.(jpe?g|png|gif|svg)$/i,
type: 'asset',
},
{
test: /\.(ttf|eot|woff|woff2)$/,

View File

@@ -14,6 +14,7 @@ const CssMinimizerPlugin = require('css-minimizer-webpack-plugin');
const MiniCssExtractPlugin = require('mini-css-extract-plugin');
const { BundleAnalyzerPlugin } = require('webpack-bundle-analyzer');
const { RetryChunkLoadPlugin } = require('webpack-retry-chunk-load-plugin');
const ImageMinimizerPlugin = require('image-minimizer-webpack-plugin');
dotenv.config();
@@ -135,24 +136,8 @@ const config = {
],
},
{
test: /\.(png|jpe?g|gif|svg)$/i,
use: [
{
loader: 'file-loader',
},
{
loader: 'image-webpack-loader',
options: {
bypassOnDebug: true,
optipng: {
optimizationLevel: 7,
},
gifsicle: {
interlaced: false,
},
},
},
],
test: /\.(jpe?g|png|gif|svg)$/i,
type: 'asset',
},
{
@@ -212,6 +197,55 @@ const config = {
},
}),
new CssMinimizerPlugin(),
new ImageMinimizerPlugin({
minimizer: [
{
implementation: ImageMinimizerPlugin.sharpMinify,
options: {
encodeOptions: {
jpeg: {
quality: 80,
},
webp: {
lossless: true,
},
avif: {
lossless: true,
},
png: {},
gif: {},
},
},
},
{
implementation: ImageMinimizerPlugin.imageminMinify,
options: {
plugins: [
[
'svgo',
{
plugins: [
{
name: 'preset-default',
params: {
overrides: {
removeViewBox: false,
addAttributesToSVGElement: {
params: {
attributes: [{ xmlns: 'http://www.w3.org/2000/svg' }],
},
},
},
},
},
],
},
],
],
},
},
],
}),
],
},
performance: {

File diff suppressed because it is too large Load Diff

View File

@@ -289,43 +289,6 @@ func (h *handler) UpdateUser(w http.ResponseWriter, r *http.Request) {
return
}
existingUser, err := h.module.GetUserByID(ctx, claims.OrgID, id)
if err != nil {
render.Error(w, err)
return
}
// only displayName, role can be updated
if user.DisplayName == "" {
user.DisplayName = existingUser.DisplayName
}
if user.Role == "" {
user.Role = existingUser.Role
}
if user.Role != existingUser.Role && claims.Role != types.RoleAdmin {
render.Error(w, errors.New(errors.TypeForbidden, errors.CodeForbidden, "only admins can change roles"))
return
}
// Make sure that the request is not demoting the last admin user.
// also an admin user can only change role of their own or other user
if user.Role != existingUser.Role && existingUser.Role == types.RoleAdmin.String() {
adminUsers, err := h.module.GetUsersByRoleInOrg(ctx, claims.OrgID, types.RoleAdmin)
if err != nil {
render.Error(w, err)
return
}
if len(adminUsers) == 1 {
render.Error(w, errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot demote the last admin"))
return
}
}
user.UpdatedAt = time.Now()
updatedUser, err := h.module.UpdateUser(ctx, claims.OrgID, id, &user, claims.UserID)
if err != nil {
render.Error(w, err)

View File

@@ -91,10 +91,15 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID, userID string, bul
return nil, err
}
// send telemetry event
for i := 0; i < len(invites); i++ {
m.analytics.TrackUser(ctx, orgID, creator.ID.String(), "Invite Sent", map[string]any{"invitee_email": invites[i].Email, "invitee_role": invites[i].Role})
// if the frontend base url is not provided, we don't send the email
if bulkInvites.Invites[i].FrontendBaseUrl == "" {
m.settings.Logger().InfoContext(ctx, "frontend base url is not provided, skipping email", "invitee_email", invites[i].Email)
continue
}
if err := m.emailing.SendHTML(ctx, invites[i].Email, "You are invited to join a team in SigNoz", emailtypes.TemplateNameInvitationEmail, map[string]any{
"CustomerName": invites[i].Name,
"InviterName": creator.DisplayName,
@@ -171,18 +176,69 @@ func (m *Module) ListUsers(ctx context.Context, orgID string) ([]*types.Gettable
}
func (m *Module) UpdateUser(ctx context.Context, orgID string, id string, user *types.User, updatedBy string) (*types.User, error) {
user, err := m.store.UpdateUser(ctx, orgID, id, user)
existingUser, err := m.GetUserByID(ctx, orgID, id)
if err != nil {
return nil, err
}
traits := types.NewTraitsFromUser(user)
requestor, err := m.GetUserByID(ctx, orgID, updatedBy)
if err != nil {
return nil, err
}
// only displayName, role can be updated
if user.DisplayName == "" {
user.DisplayName = existingUser.DisplayName
}
if user.Role == "" {
user.Role = existingUser.Role
}
if user.Role != existingUser.Role && requestor.Role != types.RoleAdmin.String() {
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "only admins can change roles")
}
// Make sure that the request is not demoting the last admin user.
// also an admin user can only change role of their own or other user
if user.Role != existingUser.Role && existingUser.Role == types.RoleAdmin.String() {
adminUsers, err := m.GetUsersByRoleInOrg(ctx, orgID, types.RoleAdmin)
if err != nil {
return nil, err
}
if len(adminUsers) == 1 {
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot demote the last admin")
}
}
user.UpdatedAt = time.Now()
updatedUser, err := m.store.UpdateUser(ctx, orgID, id, user)
if err != nil {
return nil, err
}
traits := types.NewTraitsFromUser(updatedUser)
m.analytics.IdentifyUser(ctx, user.OrgID, user.ID.String(), traits)
traits["updated_by"] = updatedBy
m.analytics.TrackUser(ctx, user.OrgID, user.ID.String(), "User Updated", traits)
return user, nil
// if the role is updated then send an email
if existingUser.Role != updatedUser.Role {
if err := m.emailing.SendHTML(ctx, existingUser.Email, "Your Role is updated in SigNoz", emailtypes.TemplateNameUpdateRole, map[string]any{
"CustomerName": existingUser.DisplayName,
"UpdatedByEmail": requestor.Email,
"OldRole": existingUser.Role,
"NewRole": updatedUser.Role,
}); err != nil {
m.settings.Logger().ErrorContext(ctx, "failed to send email", "error", err)
}
}
return updatedUser, nil
}
func (m *Module) DeleteUser(ctx context.Context, orgID string, id string, deletedBy string) error {

View File

@@ -2300,41 +2300,24 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
func (r *ClickHouseReader) FetchTemporality(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string]map[v3.Temporality]bool, error) {
metricNameToTemporality := make(map[string]map[v3.Temporality]bool)
var metricNamesToQuery []string
for _, metricName := range metricNames {
updatedMetadata, cacheErr := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
if cacheErr != nil {
zap.L().Info("Error in getting metrics cached metadata", zap.Error(cacheErr))
}
if metadata, exist := updatedMetadata[metricName]; exist {
if _, exists := metricNameToTemporality[metricName]; !exists {
metricNameToTemporality[metricName] = make(map[v3.Temporality]bool)
}
metricNameToTemporality[metricName][metadata.Temporality] = true
} else {
metricNamesToQuery = append(metricNamesToQuery, metricName)
}
// Batch fetch all metadata at once
metadataMap, apiErr := r.GetUpdatedMetricsMetadata(ctx, orgID, metricNames...)
if apiErr != nil {
zap.L().Warn("Failed to fetch updated metrics metadata", zap.Error(apiErr))
return nil, apiErr
}
query := fmt.Sprintf(`SELECT DISTINCT metric_name, temporality FROM %s.%s WHERE metric_name IN $1`, signozMetricDBName, signozTSTableNameV41Day)
rows, err := r.db.Query(ctx, query, metricNames)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var metricName, temporality string
err := rows.Scan(&metricName, &temporality)
if err != nil {
return nil, err
for metricName, metadata := range metadataMap {
if metadata == nil {
continue
}
if _, ok := metricNameToTemporality[metricName]; !ok {
if _, exists := metricNameToTemporality[metricName]; !exists {
metricNameToTemporality[metricName] = make(map[v3.Temporality]bool)
}
metricNameToTemporality[metricName][v3.Temporality(temporality)] = true
metricNameToTemporality[metricName][metadata.Temporality] = true
}
return metricNameToTemporality, nil
}
@@ -2673,67 +2656,77 @@ func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string)
}
func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, orgID valuer.UUID, req *v3.AggregateAttributeRequest, skipSignozMetrics bool) (*v3.AggregateAttributeResponse, error) {
var query string
var err error
var rows driver.Rows
var response v3.AggregateAttributeResponse
normalized := true
if constants.IsDotMetricsEnabled {
normalized = false
}
query = fmt.Sprintf("SELECT metric_name, type, is_monotonic, temporality FROM %s.%s WHERE metric_name ILIKE $1 and __normalized = $2 GROUP BY metric_name, type, is_monotonic, temporality", signozMetricDBName, signozTSTableNameV41Day)
// Query all relevant metric names from time_series_v4, but leave metadata retrieval to cache/db
query := fmt.Sprintf(
`SELECT DISTINCT metric_name
FROM %s.%s
WHERE metric_name ILIKE $1 AND __normalized = $2`,
signozMetricDBName, signozTSTableNameV41Day)
if req.Limit != 0 {
query = query + fmt.Sprintf(" LIMIT %d;", req.Limit)
}
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), normalized)
rows, err := r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), normalized)
if err != nil {
zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
zap.L().Error("Error while querying metric names", zap.Error(err))
return nil, fmt.Errorf("error while executing metric name query: %s", err.Error())
}
defer rows.Close()
seen := make(map[string]struct{})
var metricName, typ, temporality string
var isMonotonic bool
var metricNames []string
for rows.Next() {
if err := rows.Scan(&metricName, &typ, &isMonotonic, &temporality); err != nil {
return nil, fmt.Errorf("error while scanning rows: %s", err.Error())
var name string
if err := rows.Scan(&name); err != nil {
return nil, fmt.Errorf("error while scanning metric name: %s", err.Error())
}
if skipSignozMetrics && strings.HasPrefix(metricName, "signoz") {
if skipSignozMetrics && strings.HasPrefix(name, "signoz") {
continue
}
metricNames = append(metricNames, name)
}
metadata, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
if apiError != nil {
zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError))
}
if updatedMetadata, exist := metadata[metricName]; exist {
typ = string(updatedMetadata.MetricType)
isMonotonic = updatedMetadata.IsMonotonic
temporality = string(updatedMetadata.Temporality)
}
if len(metricNames) == 0 {
return &response, nil
}
// Get all metadata in one shot
metadataMap, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricNames...)
if apiError != nil {
return &response, fmt.Errorf("error getting updated metrics metadata: %s", apiError.Error())
}
seen := make(map[string]struct{})
for _, name := range metricNames {
metadata := metadataMap[name]
typ := string(metadata.MetricType)
temporality := string(metadata.Temporality)
isMonotonic := metadata.IsMonotonic
// Non-monotonic cumulative sums are treated as gauges
if typ == "Sum" && !isMonotonic && temporality == string(v3.Cumulative) {
typ = "Gauge"
}
// unlike traces/logs `tag`/`resource` type, the `Type` will be metric type
key := v3.AttributeKey{
Key: metricName,
Key: name,
DataType: v3.AttributeKeyDataTypeFloat64,
Type: v3.AttributeKeyType(typ),
IsColumn: true,
}
// remove duplicates
if _, ok := seen[metricName+typ]; ok {
if _, ok := seen[name+typ]; ok {
continue
}
seen[metricName+typ] = struct{}{}
seen[name+typ] = struct{}{}
response.AttributeKeys = append(response.AttributeKeys, key)
}
@@ -2825,72 +2818,69 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, orgID valuer.U
unixMilli := common.PastDayRoundOff()
// Note: metric metadata should be accessible regardless of the time range selection
// our standard retention period is 30 days, so we are querying the table v4_1_day to reduce the
// amount of data scanned
query := fmt.Sprintf("SELECT temporality, description, type, unit, is_monotonic from %s.%s WHERE metric_name=$1 AND unix_milli >= $2 GROUP BY temporality, description, type, unit, is_monotonic", signozMetricDBName, signozTSTableNameV41Day)
rows, err := r.db.Query(ctx, query, metricName, unixMilli)
if err != nil {
zap.L().Error("Error while fetching metric metadata", zap.Error(err))
return nil, fmt.Errorf("error while fetching metric metadata: %s", err.Error())
}
defer rows.Close()
var deltaExists, isMonotonic bool
var temporality, description, metricType, unit string
for rows.Next() {
if err := rows.Scan(&temporality, &description, &metricType, &unit, &isMonotonic); err != nil {
return nil, fmt.Errorf("error while scanning rows: %s", err.Error())
}
if temporality == string(v3.Delta) {
deltaExists = true
}
}
metadata, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
// 1. Fetch metadata from cache/db using unified function
metadataMap, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
if apiError != nil {
zap.L().Error("Error in getting metric cached metadata", zap.Error(apiError))
}
if updatedMetadata, exist := metadata[metricName]; exist {
metricType = string(updatedMetadata.MetricType)
temporality = string(updatedMetadata.Temporality)
if temporality == string(v3.Delta) {
deltaExists = true
}
isMonotonic = updatedMetadata.IsMonotonic
if updatedMetadata.Description != "" {
description = updatedMetadata.Description
}
if updatedMetadata.Unit != "" {
unit = updatedMetadata.Unit
}
return nil, fmt.Errorf("error fetching metric metadata: %s", apiError.Err.Error())
}
query = fmt.Sprintf("SELECT JSONExtractString(labels, 'le') as le from %s.%s WHERE metric_name=$1 AND unix_milli >= $2 AND type = 'Histogram' AND JSONExtractString(labels, 'service_name') = $3 GROUP BY le ORDER BY le", signozMetricDBName, signozTSTableNameV41Day)
rows, err = r.db.Query(ctx, query, metricName, unixMilli, serviceName)
if err != nil {
zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
// Defaults in case metadata is not found
var (
deltaExists bool
isMonotonic bool
temporality string
description string
metricType string
unit string
)
metadata, ok := metadataMap[metricName]
if !ok {
return nil, fmt.Errorf("metric metadata not found: %s", metricName)
}
metricType = string(metadata.MetricType)
temporality = string(metadata.Temporality)
isMonotonic = metadata.IsMonotonic
description = metadata.Description
unit = metadata.Unit
if temporality == string(v3.Delta) {
deltaExists = true
}
// 2. Only for Histograms, get `le` buckets
var leFloat64 []float64
for rows.Next() {
var leStr string
if err := rows.Scan(&leStr); err != nil {
return nil, fmt.Errorf("error while scanning rows: %s", err.Error())
}
le, err := strconv.ParseFloat(leStr, 64)
// ignore the error and continue if the value is not a float
// ideally this should not happen but we have seen ClickHouse
// returning empty string for some values
if metricType == string(v3.MetricTypeHistogram) {
query := fmt.Sprintf(`
SELECT JSONExtractString(labels, 'le') AS le
FROM %s.%s
WHERE metric_name = $1
AND unix_milli >= $2
AND type = 'Histogram'
AND (JSONExtractString(labels, 'service_name') = $3 OR JSONExtractString(labels, 'service.name') = $4)
GROUP BY le
ORDER BY le`, signozMetricDBName, signozTSTableNameV41Day)
rows, err := r.db.Query(ctx, query, metricName, unixMilli, serviceName, serviceName)
if err != nil {
zap.L().Error("error while parsing le value", zap.Error(err))
continue
zap.L().Error("Error while querying histogram buckets", zap.Error(err))
return nil, fmt.Errorf("error while querying histogram buckets: %s", err.Error())
}
if math.IsInf(le, 0) {
continue
defer rows.Close()
for rows.Next() {
var leStr string
if err := rows.Scan(&leStr); err != nil {
return nil, fmt.Errorf("error while scanning le: %s", err.Error())
}
le, err := strconv.ParseFloat(leStr, 64)
if err != nil || math.IsInf(le, 0) {
zap.L().Error("Invalid 'le' bucket value", zap.String("value", leStr), zap.Error(err))
continue
}
leFloat64 = append(leFloat64, le)
}
leFloat64 = append(leFloat64, le)
}
return &v3.MetricMetadataResponse{
@@ -5811,7 +5801,7 @@ VALUES ( ?, ?, ?, ?, ?, ?, ?);`, signozMetricDBName, signozUpdatedMetricsMetadat
if err != nil {
return &model.ApiError{Typ: "ClickHouseError", Err: err}
}
err = r.cache.Set(ctx, orgID, constants.UpdatedMetricsMetadataCachePrefix+req.MetricName, req, -1)
err = r.cache.Set(ctx, orgID, constants.UpdatedMetricsMetadataCachePrefix+req.MetricName, req, 0)
if err != nil {
return &model.ApiError{Typ: "CachingErr", Err: err}
}
@@ -5852,33 +5842,11 @@ func (r *ClickHouseReader) CheckForLabelsInMetric(ctx context.Context, metricNam
return hasLE, nil
}
func (r *ClickHouseReader) PreloadMetricsMetadata(ctx context.Context, orgID valuer.UUID) []error {
var allMetricsMetadata []model.UpdateMetricsMetadata
var errorList []error
// Fetch all rows from ClickHouse
query := fmt.Sprintf(`SELECT metric_name, type, description , temporality, is_monotonic, unit
FROM %s.%s;`, signozMetricDBName, signozUpdatedMetricsMetadataTable)
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
err := r.db.Select(valueCtx, &allMetricsMetadata, query)
if err != nil {
errorList = append(errorList, err)
return errorList
}
for _, m := range allMetricsMetadata {
err := r.cache.Set(ctx, orgID, constants.UpdatedMetricsMetadataCachePrefix+m.MetricName, &m, -1)
if err != nil {
errorList = append(errorList, err)
}
}
return errorList
}
func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID valuer.UUID, metricNames ...string) (map[string]*model.UpdateMetricsMetadata, *model.ApiError) {
cachedMetadata := make(map[string]*model.UpdateMetricsMetadata)
var missingMetrics []string
// First, try retrieving each metric from cache.
// 1. Try cache
for _, metricName := range metricNames {
metadata := new(model.UpdateMetricsMetadata)
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metricName
@@ -5890,10 +5858,10 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID
}
}
// If there are any metrics missing in the cache, query them from the database.
// 2. Try updated_metrics_metadata table
var stillMissing []string
if len(missingMetrics) > 0 {
// Join the missing metric names; ensure proper quoting if needed.
metricList := "'" + strings.Join(metricNames, "', '") + "'"
metricList := "'" + strings.Join(missingMetrics, "', '") + "'"
query := fmt.Sprintf(`SELECT metric_name, type, description, temporality, is_monotonic, unit
FROM %s.%s
WHERE metric_name IN (%s);`, signozMetricDBName, signozUpdatedMetricsMetadataTable, metricList)
@@ -5905,6 +5873,7 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID
}
defer rows.Close()
found := make(map[string]struct{})
for rows.Next() {
metadata := new(model.UpdateMetricsMetadata)
if err := rows.Scan(
@@ -5918,15 +5887,57 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID
return cachedMetadata, &model.ApiError{Typ: "ClickhouseErr", Err: fmt.Errorf("error scanning metrics metadata: %v", err)}
}
// Cache the result for future requests.
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metadata.MetricName
if cacheErr := r.cache.Set(ctx, orgID, cacheKey, metadata, -1); cacheErr != nil {
if cacheErr := r.cache.Set(ctx, orgID, cacheKey, metadata, 0); cacheErr != nil {
zap.L().Error("Failed to store metrics metadata in cache", zap.String("metric_name", metadata.MetricName), zap.Error(cacheErr))
}
cachedMetadata[metadata.MetricName] = metadata
found[metadata.MetricName] = struct{}{}
}
// Determine which metrics are still missing
for _, m := range missingMetrics {
if _, ok := found[m]; !ok {
stillMissing = append(stillMissing, m)
}
}
}
// 3. Fallback: Try time_series_v4_1week table
if len(stillMissing) > 0 {
metricList := "'" + strings.Join(stillMissing, "', '") + "'"
query := fmt.Sprintf(`SELECT DISTINCT metric_name, type, description, temporality, is_monotonic, unit
FROM %s.%s
WHERE metric_name IN (%s)`, signozMetricDBName, signozTSTableNameV4, metricList)
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
rows, err := r.db.Query(valueCtx, query)
if err != nil {
return cachedMetadata, &model.ApiError{Typ: "ClickhouseErr", Err: fmt.Errorf("error querying time_series_v4 to get metrics metadata: %v", err)}
}
defer rows.Close()
for rows.Next() {
metadata := new(model.UpdateMetricsMetadata)
if err := rows.Scan(
&metadata.MetricName,
&metadata.MetricType,
&metadata.Description,
&metadata.Temporality,
&metadata.IsMonotonic,
&metadata.Unit,
); err != nil {
return cachedMetadata, &model.ApiError{Typ: "ClickhouseErr", Err: fmt.Errorf("error scanning fallback metadata: %v", err)}
}
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metadata.MetricName
if cacheErr := r.cache.Set(ctx, orgID, cacheKey, metadata, 0); cacheErr != nil {
zap.L().Error("Failed to cache fallback metadata", zap.String("metric_name", metadata.MetricName), zap.Error(cacheErr))
}
cachedMetadata[metadata.MetricName] = metadata
}
if rows.Err() != nil {
return cachedMetadata, &model.ApiError{Typ: "ClickhouseErr", Err: fmt.Errorf("error scanning fallback metadata: %v", err)}
}
}
return cachedMetadata, nil
}

View File

@@ -175,7 +175,7 @@
"multiSelect": false,
"name": "Account",
"order": 0,
"queryValue": "SELECT DISTINCT JSONExtractString(labels, 'cloud.account.id') AS cloud.account.id FROM signoz_metrics.distributed_time_series_v4_1day WHERE metric_name = 'aws_ElastiCache_CPUUtilization_max' GROUP BY cloud.account.id",
"queryValue": "SELECT DISTINCT JSONExtractString(labels, 'cloud.account.id') AS `cloud.account.id` FROM signoz_metrics.distributed_time_series_v4_1day WHERE metric_name = 'aws_ElastiCache_CPUUtilization_max' GROUP BY `cloud.account.id`",
"showALLOption": false,
"sort": "DISABLED",
"textboxValue": "",

View File

@@ -4041,7 +4041,7 @@ func (aH *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http
req := logparsingpipeline.PipelinesPreviewRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
render.Error(w, err)
return
}
@@ -4050,7 +4050,7 @@ func (aH *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http
)
if apiErr != nil {
RespondError(w, apiErr, nil)
render.Error(w, apiErr)
return
}
@@ -4072,7 +4072,7 @@ func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Re
version, err := parseAgentConfigVersion(r)
if err != nil {
RespondError(w, model.WrapApiError(err, "Failed to parse agent config version"), nil)
render.Error(w, err)
return
}
@@ -4086,7 +4086,7 @@ func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Re
}
if apierr != nil {
RespondError(w, apierr, payload)
render.Error(w, apierr)
return
}
aH.Respond(w, payload)
@@ -4164,7 +4164,7 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
req := pipelinetypes.PostablePipelines{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
render.Error(w, err)
return
}
@@ -4186,7 +4186,7 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
res, err := createPipeline(r.Context(), req.Pipelines)
if err != nil {
RespondError(w, err, nil)
render.Error(w, err)
return
}
@@ -4527,6 +4527,56 @@ func (aH *APIHandler) sendQueryResultEvents(r *http.Request, result []*v3.Result
}
properties := queryInfoResult.ToMap()
referrer := r.Header.Get("Referer")
if referrer == "" {
return
}
properties["referrer"] = referrer
logsExplorerMatched, _ := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
traceExplorerMatched, _ := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
metricsExplorerMatched, _ := regexp.MatchString(`/metrics-explorer/explorer(?:\?.*)?$`, referrer)
dashboardMatched, _ := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
alertMatched, _ := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
switch {
case dashboardMatched:
properties["module_name"] = "dashboard"
case alertMatched:
properties["module_name"] = "rule"
case metricsExplorerMatched:
properties["module_name"] = "metrics-explorer"
case logsExplorerMatched:
properties["module_name"] = "logs-explorer"
case traceExplorerMatched:
properties["module_name"] = "traces-explorer"
default:
return
}
if dashboardMatched {
if dashboardIDRegex, err := regexp.Compile(`/dashboard/([a-f0-9\-]+)/`); err == nil {
if matches := dashboardIDRegex.FindStringSubmatch(referrer); len(matches) > 1 {
properties["dashboard_id"] = matches[1]
}
}
if widgetIDRegex, err := regexp.Compile(`widgetId=([a-f0-9\-]+)`); err == nil {
if matches := widgetIDRegex.FindStringSubmatch(referrer); len(matches) > 1 {
properties["widget_id"] = matches[1]
}
}
}
if alertMatched {
if alertIDRegex, err := regexp.Compile(`ruleId=(\d+)`); err == nil {
if matches := alertIDRegex.FindStringSubmatch(referrer); len(matches) > 1 {
properties["rule_id"] = matches[1]
}
}
}
// Check if result is empty or has no data
if len(result) == 0 {
@@ -4551,47 +4601,6 @@ func (aH *APIHandler) sendQueryResultEvents(r *http.Request, result []*v3.Result
}
}
referrer := r.Header.Get("Referer")
if referrer == "" {
aH.Signoz.Analytics.TrackUser(r.Context(), claims.OrgID, claims.UserID, "Telemetry Query Returned Results", properties)
return
}
properties["referrer"] = referrer
if matched, _ := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer); matched {
if dashboardIDRegex, err := regexp.Compile(`/dashboard/([a-f0-9\-]+)/`); err == nil {
if matches := dashboardIDRegex.FindStringSubmatch(referrer); len(matches) > 1 {
properties["dashboard_id"] = matches[1]
}
}
if widgetIDRegex, err := regexp.Compile(`widgetId=([a-f0-9\-]+)`); err == nil {
if matches := widgetIDRegex.FindStringSubmatch(referrer); len(matches) > 1 {
properties["widget_id"] = matches[1]
}
}
properties["module_name"] = "dashboard"
aH.Signoz.Analytics.TrackUser(r.Context(), claims.OrgID, claims.UserID, "Telemetry Query Returned Results", properties)
return
}
if matched, _ := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer); matched {
if alertIDRegex, err := regexp.Compile(`ruleId=(\d+)`); err == nil {
if matches := alertIDRegex.FindStringSubmatch(referrer); len(matches) > 1 {
properties["alert_id"] = matches[1]
}
}
properties["module_name"] = "rule"
aH.Signoz.Analytics.TrackUser(r.Context(), claims.OrgID, claims.UserID, "Telemetry Query Returned Results", properties)
return
}
aH.Signoz.Analytics.TrackUser(r.Context(), claims.OrgID, claims.UserID, "Telemetry Query Returned Results", properties)
}

View File

@@ -16,7 +16,7 @@ import (
)
var (
metricToUseForClusters = GetDotMetrics("k8s_node_cpu_utilization")
metricToUseForClusters = GetDotMetrics("k8s_node_cpu_usage")
clusterAttrsToEnrich = []string{GetDotMetrics("k8s_cluster_name")}

View File

@@ -16,8 +16,6 @@ var dotMetricMap = map[string]string{
"host_name": "host.name",
"k8s_cluster_name": "k8s.cluster.name",
"k8s_node_name": "k8s.node.name",
"k8s_node_cpu_utilization": "k8s.node.cpu.utilization",
"k8s_pod_cpu_utilization": "k8s.pod.cpu.utilization",
"k8s_pod_memory_usage": "k8s.pod.memory.usage",
"k8s_pod_cpu_request_utilization": "k8s.pod.cpu_request_utilization",
"k8s_pod_memory_request_utilization": "k8s.pod.memory_request_utilization",
@@ -107,7 +105,6 @@ var dotMetricMap = map[string]string{
"k8s_pod_uptime": "k8s.pod.uptime",
"container_cpu_usage": "container.cpu.usage",
"container_cpu_utilization": "container.cpu.utilization",
"container_cpu_time": "container.cpu.time",
"container_memory_available": "container.memory.available",
"container_memory_usage": "container.memory.usage",
@@ -209,7 +206,7 @@ WHERE metric_name IN (%s)
var (
// TODO(srikanthccv): import metadata yaml from receivers and use generated files to check the metrics
podMetricNamesToCheck = []string{
GetDotMetrics("k8s_pod_cpu_utilization"),
GetDotMetrics("k8s_pod_cpu_usage"),
GetDotMetrics("k8s_pod_memory_usage"),
GetDotMetrics("k8s_pod_cpu_request_utilization"),
GetDotMetrics("k8s_pod_memory_request_utilization"),
@@ -219,7 +216,7 @@ var (
GetDotMetrics("k8s_pod_phase"),
}
nodeMetricNamesToCheck = []string{
GetDotMetrics("k8s_node_cpu_utilization"),
GetDotMetrics("k8s_node_cpu_usage"),
GetDotMetrics("k8s_node_allocatable_cpu"),
GetDotMetrics("k8s_node_memory_usage"),
GetDotMetrics("k8s_node_allocatable_memory"),

View File

@@ -16,7 +16,7 @@ import (
)
var (
metricToUseForDaemonSets = GetDotMetrics("k8s_pod_cpu_utilization")
metricToUseForDaemonSets = GetDotMetrics("k8s_pod_cpu_usage")
k8sDaemonSetNameAttrKey = GetDotMetrics("k8s_daemonset_name")
metricNamesForDaemonSets = map[string]string{

View File

@@ -16,7 +16,7 @@ import (
)
var (
metricToUseForDeployments = GetDotMetrics("k8s_pod_cpu_utilization")
metricToUseForDeployments = GetDotMetrics("k8s_pod_cpu_usage")
k8sDeploymentNameAttrKey = GetDotMetrics("k8s_deployment_name")
metricNamesForDeployments = map[string]string{

View File

@@ -16,7 +16,7 @@ import (
)
var (
metricToUseForNamespaces = GetDotMetrics("k8s_pod_cpu_utilization")
metricToUseForNamespaces = GetDotMetrics("k8s_pod_cpu_usage")
namespaceAttrsToEnrich = []string{
GetDotMetrics("k8s_namespace_name"),

View File

@@ -3,6 +3,10 @@ package inframetrics
import (
"context"
"fmt"
"math"
"sort"
"strings"
"github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4/helpers"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/constants"
@@ -12,13 +16,10 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices"
"math"
"sort"
"strings"
)
var (
metricToUseForNodes = GetDotMetrics("k8s_node_cpu_utilization")
metricToUseForNodes = GetDotMetrics("k8s_node_cpu_usage")
nodeAttrsToEnrich = []string{GetDotMetrics("k8s_node_name"), GetDotMetrics("k8s_node_uid"), GetDotMetrics("k8s_cluster_name")}
@@ -33,7 +34,7 @@ var (
nodeQueryNames = []string{"A", "B", "C", "D", "E", "F"}
metricNamesForNodes = map[string]string{
"cpu": GetDotMetrics("k8s_node_cpu_utilization"),
"cpu": GetDotMetrics("k8s_node_cpu_usage"),
"cpu_allocatable": GetDotMetrics("k8s_node_allocatable_cpu"),
"memory": GetDotMetrics("k8s_node_memory_usage"),
"memory_allocatable": GetDotMetrics("k8s_node_allocatable_memory"),

View File

@@ -19,7 +19,7 @@ import (
)
var (
metricToUseForPods = GetDotMetrics("k8s_pod_cpu_utilization")
metricToUseForPods = GetDotMetrics("k8s_pod_cpu_usage")
podAttrsToEnrich = []string{
GetDotMetrics("k8s_pod_uid"),
@@ -49,7 +49,7 @@ var (
podQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"}
metricNamesForPods = map[string]string{
"cpu": GetDotMetrics("k8s_pod_cpu_utilization"),
"cpu": GetDotMetrics("k8s_pod_cpu_usage"),
"cpu_request": GetDotMetrics("k8s_pod_cpu_request_utilization"),
"cpu_limit": GetDotMetrics("k8s_pod_cpu_limit_utilization"),
"memory": GetDotMetrics("k8s_pod_memory_usage"),

View File

@@ -16,7 +16,7 @@ import (
)
var (
metricToUseForStatefulSets = GetDotMetrics("k8s_pod_cpu_utilization")
metricToUseForStatefulSets = GetDotMetrics("k8s_pod_cpu_usage")
k8sStatefulSetNameAttrKey = GetDotMetrics("k8s_statefulset_name")
metricNamesForStatefulSets = map[string]string{

View File

@@ -4,7 +4,7 @@ import v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
var (
metricNamesForWorkloads = map[string]string{
"cpu": GetDotMetrics("k8s_pod_cpu_utilization"),
"cpu": GetDotMetrics("k8s_pod_cpu_usage"),
"cpu_request": GetDotMetrics("k8s_pod_cpu_request_utilization"),
"cpu_limit": GetDotMetrics("k8s_pod_cpu_limit_utilization"),
"memory": GetDotMetrics("k8s_pod_memory_usage"),

View File

@@ -213,17 +213,6 @@ func (q *querier) runBuilderQuery(
return
}
if builderQuery.DataSource == v3.DataSourceMetrics && !q.testingMode {
metadata, apiError := q.reader.GetUpdatedMetricsMetadata(ctx, orgID, builderQuery.AggregateAttribute.Key)
if apiError != nil {
zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError))
}
if updatedMetadata, exist := metadata[builderQuery.AggregateAttribute.Key]; exist {
builderQuery.AggregateAttribute.Type = v3.AttributeKeyType(updatedMetadata.MetricType)
builderQuery.Temporality = updatedMetadata.Temporality
}
}
// What is happening here?
// We are only caching the graph panel queries. A non-existant cache key means that the query is not cached.
// If the query is not cached, we execute the query and return the result without caching it.

View File

@@ -214,17 +214,6 @@ func (q *querier) runBuilderQuery(
return
}
if builderQuery.DataSource == v3.DataSourceMetrics && !q.testingMode {
metadata, apiError := q.reader.GetUpdatedMetricsMetadata(ctx, orgID, builderQuery.AggregateAttribute.Key)
if apiError != nil {
zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError))
}
if updatedMetadata, exist := metadata[builderQuery.AggregateAttribute.Key]; exist {
builderQuery.AggregateAttribute.Type = v3.AttributeKeyType(updatedMetadata.MetricType)
builderQuery.Temporality = updatedMetadata.Temporality
}
}
// What is happening here?
// We are only caching the graph panel queries. A non-existant cache key means that the query is not cached.
// If the query is not cached, we execute the query and return the result without caching it.

View File

@@ -164,18 +164,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
agentConfMgr,
signoz.Instrumentation,
)
orgs, err := apiHandler.Signoz.Modules.OrgGetter.ListByOwnedKeyRange(context.Background())
if err != nil {
return nil, err
}
for _, org := range orgs {
errorList := reader.PreloadMetricsMetadata(context.Background(), org.ID)
for _, er := range errorList {
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
}
}
return s, nil
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
@@ -137,6 +138,9 @@ func main() {
signoz.NewEmailingProviderFactories(),
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
return signoz.NewSQLSchemaProviderFactories(sqlstore)
},
signoz.NewSQLStoreProviderFactories(),
signoz.NewTelemetryStoreProviderFactories(),
)

View File

@@ -2,7 +2,6 @@ package rules
import (
"context"
"fmt"
"strings"
"testing"
"time"
@@ -1224,7 +1223,6 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
for idx, c := range cases {
rows := cmock.NewRows(cols, c.values)
telemetryStore.Mock().ExpectQuery(".*").WillReturnError(fmt.Errorf("error"))
// We are testing the eval logic after the query is run
// so we don't care about the query string here
queryString := "SELECT any"
@@ -1322,7 +1320,6 @@ func TestThresholdRuleNoData(t *testing.T) {
for idx, c := range cases {
rows := cmock.NewRows(cols, c.values)
telemetryStore.Mock().ExpectQuery(".*").WillReturnError(fmt.Errorf("error"))
// We are testing the eval logic after the query is run
// so we don't care about the query string here

View File

@@ -235,7 +235,7 @@ func ClickHouseFormattedMetricNames(v interface{}) string {
if name, ok := v.(string); ok {
transitionedMetrics := metrics.GetTransitionedMetric(name, !constants.IsDotMetricsEnabled)
if transitionedMetrics != name {
return ClickHouseFormattedValue([]interface{}{name, transitionedMetrics})
return ClickHouseFormattedValue([]interface{}{transitionedMetrics})
} else {
return ClickHouseFormattedValue([]interface{}{name})
}

View File

@@ -24,6 +24,7 @@ import (
"github.com/SigNoz/signoz/pkg/sharder"
"github.com/SigNoz/signoz/pkg/sqlmigration"
"github.com/SigNoz/signoz/pkg/sqlmigrator"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/statsreporter"
"github.com/SigNoz/signoz/pkg/telemetrystore"
@@ -57,6 +58,9 @@ type Config struct {
// SQLMigrator config
SQLMigrator sqlmigrator.Config `mapstructure:"sqlmigrator"`
// SQLSchema config
SQLSchema sqlschema.Config `mapstructure:"sqlschema"`
// API Server config
APIServer apiserver.Config `mapstructure:"apiserver"`
@@ -111,6 +115,7 @@ func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig, deprec
cache.NewConfigFactory(),
sqlstore.NewConfigFactory(),
sqlmigrator.NewConfigFactory(),
sqlschema.NewConfigFactory(),
apiserver.NewConfigFactory(),
telemetrystore.NewConfigFactory(),
prometheus.NewConfigFactory(),

View File

@@ -26,6 +26,8 @@ import (
"github.com/SigNoz/signoz/pkg/sharder/noopsharder"
"github.com/SigNoz/signoz/pkg/sharder/singlesharder"
"github.com/SigNoz/signoz/pkg/sqlmigration"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlschema/sqlitesqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlitesqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
@@ -69,7 +71,13 @@ func NewSQLStoreProviderFactories() factory.NamedMap[factory.ProviderFactory[sql
)
}
func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlmigration.SQLMigration, sqlmigration.Config]] {
func NewSQLSchemaProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
return factory.MustNewNamedMap(
sqlitesqlschema.NewFactory(sqlstore),
)
}
func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.NamedMap[factory.ProviderFactory[sqlmigration.SQLMigration, sqlmigration.Config]] {
return factory.MustNewNamedMap(
sqlmigration.NewAddDataMigrationsFactory(),
sqlmigration.NewAddOrganizationFactory(),
@@ -112,6 +120,10 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedM
sqlmigration.NewDropFeatureSetFactory(),
sqlmigration.NewDropDeprecatedTablesFactory(),
sqlmigration.NewUpdateAgentsFactory(sqlstore),
sqlmigration.NewUpdateUsersFactory(sqlstore, sqlschema),
sqlmigration.NewUpdateUserInviteFactory(sqlstore, sqlschema),
sqlmigration.NewUpdateOrgDomainFactory(sqlstore, sqlschema),
sqlmigration.NewAddFactorIndexesFactory(sqlstore, sqlschema),
)
}

View File

@@ -8,6 +8,8 @@ import (
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlschema/sqlschematest"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstoretest"
"github.com/SigNoz/signoz/pkg/statsreporter"
@@ -38,7 +40,7 @@ func TestNewProviderFactories(t *testing.T) {
})
assert.NotPanics(t, func() {
NewSQLMigrationProviderFactories(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual))
NewSQLMigrationProviderFactories(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual), sqlschematest.New(map[string]*sqlschema.Table{}, map[string][]*sqlschema.UniqueConstraint{}, map[string]sqlschema.Index{}))
})
assert.NotPanics(t, func() {

View File

@@ -19,6 +19,7 @@ import (
"github.com/SigNoz/signoz/pkg/sharder"
"github.com/SigNoz/signoz/pkg/sqlmigration"
"github.com/SigNoz/signoz/pkg/sqlmigrator"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/statsreporter"
"github.com/SigNoz/signoz/pkg/telemetrystore"
@@ -61,6 +62,7 @@ func New(
emailingProviderFactories factory.NamedMap[factory.ProviderFactory[emailing.Emailing, emailing.Config]],
cacheProviderFactories factory.NamedMap[factory.ProviderFactory[cache.Cache, cache.Config]],
webProviderFactories factory.NamedMap[factory.ProviderFactory[web.Web, web.Config]],
sqlSchemaProviderFactories func(sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]],
sqlstoreProviderFactories factory.NamedMap[factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config]],
telemetrystoreProviderFactories factory.NamedMap[factory.ProviderFactory[telemetrystore.TelemetryStore, telemetrystore.Config]],
) (*SigNoz, error) {
@@ -183,12 +185,23 @@ func New(
return nil, err
}
sqlschema, err := factory.NewProviderFromNamedMap(
ctx,
providerSettings,
config.SQLSchema,
sqlSchemaProviderFactories(sqlstore),
config.SQLStore.Provider,
)
if err != nil {
return nil, err
}
// Run migrations on the sqlstore
sqlmigrations, err := sqlmigration.New(
ctx,
providerSettings,
config.SQLMigration,
NewSQLMigrationProviderFactories(sqlstore),
NewSQLMigrationProviderFactories(sqlstore, sqlschema),
)
if err != nil {
return nil, err

View File

@@ -0,0 +1,85 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateUsers struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewUpdateUsersFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_users"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newUpdateUsers(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newUpdateUsers(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &updateUsers{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *updateUsers) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateUsers) Up(ctx context.Context, db *bun.DB) error {
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, sqlschema.TableName("users"))
if err != nil {
return err
}
sqls := [][]byte{}
dropSQLs := migration.sqlschema.Operator().DropConstraint(table, uniqueConstraints, &sqlschema.UniqueConstraint{ColumnNames: []sqlschema.ColumnName{"email"}})
sqls = append(sqls, dropSQLs...)
indexSQLs := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "users", ColumnNames: []sqlschema.ColumnName{"email", "org_id"}})
sqls = append(sqls, indexSQLs...)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *updateUsers) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -0,0 +1,88 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateUserInvite struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewUpdateUserInviteFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_user_invite"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newUpdateUserInvite(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newUpdateUserInvite(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &updateUserInvite{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *updateUserInvite) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateUserInvite) Up(ctx context.Context, db *bun.DB) error {
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, sqlschema.TableName("user_invite"))
if err != nil {
return err
}
sqls := [][]byte{}
dropSQLs := migration.sqlschema.Operator().DropConstraint(table, uniqueConstraints, &sqlschema.UniqueConstraint{ColumnNames: []sqlschema.ColumnName{"email"}})
sqls = append(sqls, dropSQLs...)
indexSQLs := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "user_invite", ColumnNames: []sqlschema.ColumnName{"email", "org_id"}})
sqls = append(sqls, indexSQLs...)
indexSQLs = migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "user_invite", ColumnNames: []sqlschema.ColumnName{"token"}})
sqls = append(sqls, indexSQLs...)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *updateUserInvite) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -0,0 +1,85 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateOrgDomain struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewUpdateOrgDomainFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_org_domain"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newUpdateOrgDomain(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newUpdateOrgDomain(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &updateOrgDomain{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *updateOrgDomain) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateOrgDomain) Up(ctx context.Context, db *bun.DB) error {
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, sqlschema.TableName("org_domains"))
if err != nil {
return err
}
sqls := [][]byte{}
dropSQLs := migration.sqlschema.Operator().DropConstraint(table, uniqueConstraints, &sqlschema.UniqueConstraint{ColumnNames: []sqlschema.ColumnName{"name"}})
sqls = append(sqls, dropSQLs...)
indexSQLs := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "org_domains", ColumnNames: []sqlschema.ColumnName{"name", "org_id"}})
sqls = append(sqls, indexSQLs...)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *updateOrgDomain) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -0,0 +1,75 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addFactorIndexes struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewAddFactorIndexesFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_factor_indexes"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newAddFactorIndexes(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newAddFactorIndexes(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &addFactorIndexes{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *addFactorIndexes) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addFactorIndexes) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
sqls := [][]byte{}
indexSQLs := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "factor_password", ColumnNames: []sqlschema.ColumnName{"user_id"}})
sqls = append(sqls, indexSQLs...)
indexSQLs = migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "reset_password_token", ColumnNames: []sqlschema.ColumnName{"password_id"}})
sqls = append(sqls, indexSQLs...)
indexSQLs = migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "reset_password_token", ColumnNames: []sqlschema.ColumnName{"token"}})
sqls = append(sqls, indexSQLs...)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *addFactorIndexes) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -14,8 +14,10 @@ type SQLMigration interface {
// Register registers the migration with the given migrations. Each migration needs to be registered
//in a dedicated `*.go` file so that the correct migration semantics can be detected.
Register(*migrate.Migrations) error
// Up runs the migration.
Up(context.Context, *bun.DB) error
// Down rolls back the migration.
Down(context.Context, *bun.DB) error
}
@@ -66,5 +68,6 @@ func MustNew(
if err != nil {
panic(err)
}
return migrations
}

Some files were not shown because too many files have changed in this diff Show More