mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-28 22:39:57 +00:00
Compare commits
4 Commits
indirectDe
...
v0.96.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
340aa9ec21 | ||
|
|
5a47a4349b | ||
|
|
80f0c6dd92 | ||
|
|
c0acc69f87 |
@@ -42,7 +42,7 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
schema-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:v0.129.5
|
||||
image: signoz/signoz-schema-migrator:v0.129.6
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -55,7 +55,7 @@ services:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:v0.129.5
|
||||
image: signoz/signoz-schema-migrator:v0.129.6
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -176,7 +176,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.95.0
|
||||
image: signoz/signoz:v0.96.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
@@ -209,7 +209,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.129.5
|
||||
image: signoz/signoz-otel-collector:v0.129.6
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -233,7 +233,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.129.5
|
||||
image: signoz/signoz-schema-migrator:v0.129.6
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -117,7 +117,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.95.0
|
||||
image: signoz/signoz:v0.96.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
@@ -150,7 +150,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.129.5
|
||||
image: signoz/signoz-otel-collector:v0.129.6
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -176,7 +176,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.129.5
|
||||
image: signoz/signoz-schema-migrator:v0.129.6
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -179,7 +179,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.95.0}
|
||||
image: signoz/signoz:${VERSION:-v0.96.0}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -213,7 +213,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.5}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.6}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -239,7 +239,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.5}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -250,7 +250,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.5}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -111,7 +111,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.95.0}
|
||||
image: signoz/signoz:${VERSION:-v0.96.0}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -144,7 +144,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.5}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.6}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +166,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.5}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +178,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.5}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -2,6 +2,7 @@ import './HeaderRightSection.styles.scss';
|
||||
|
||||
import { Button, Popover } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
|
||||
import { Globe, Inbox, SquarePen } from 'lucide-react';
|
||||
import { useCallback, useState } from 'react';
|
||||
import { useLocation } from 'react-router-dom';
|
||||
@@ -20,13 +21,15 @@ function HeaderRightSection({
|
||||
enableAnnouncements,
|
||||
enableShare,
|
||||
enableFeedback,
|
||||
}: HeaderRightSectionProps): JSX.Element {
|
||||
}: HeaderRightSectionProps): JSX.Element | null {
|
||||
const location = useLocation();
|
||||
|
||||
const [openFeedbackModal, setOpenFeedbackModal] = useState(false);
|
||||
const [openShareURLModal, setOpenShareURLModal] = useState(false);
|
||||
const [openAnnouncementsModal, setOpenAnnouncementsModal] = useState(false);
|
||||
|
||||
const { isCloudUser, isEnterpriseSelfHostedUser } = useGetTenantLicense();
|
||||
|
||||
const handleOpenFeedbackModal = useCallback((): void => {
|
||||
logEvent('Feedback: Clicked', {
|
||||
page: location.pathname,
|
||||
@@ -63,9 +66,11 @@ function HeaderRightSection({
|
||||
setOpenShareURLModal(open);
|
||||
};
|
||||
|
||||
const isLicenseEnabled = isEnterpriseSelfHostedUser || isCloudUser;
|
||||
|
||||
return (
|
||||
<div className="header-right-section-container">
|
||||
{enableFeedback && (
|
||||
{enableFeedback && isLicenseEnabled && (
|
||||
<Popover
|
||||
rootClassName="header-section-popover-root"
|
||||
className="shareable-link-popover"
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
import { render, screen } from '@testing-library/react';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
|
||||
import { useLocation } from 'react-router-dom';
|
||||
|
||||
import HeaderRightSection from '../HeaderRightSection';
|
||||
@@ -44,8 +45,13 @@ jest.mock('../AnnouncementsModal', () => ({
|
||||
),
|
||||
}));
|
||||
|
||||
jest.mock('hooks/useGetTenantLicense', () => ({
|
||||
useGetTenantLicense: jest.fn(),
|
||||
}));
|
||||
|
||||
const mockLogEvent = logEvent as jest.Mock;
|
||||
const mockUseLocation = useLocation as jest.Mock;
|
||||
const mockUseGetTenantLicense = useGetTenantLicense as jest.Mock;
|
||||
|
||||
const defaultProps = {
|
||||
enableAnnouncements: true,
|
||||
@@ -61,6 +67,13 @@ describe('HeaderRightSection', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockUseLocation.mockReturnValue(mockLocation);
|
||||
// Default to licensed user (Enterprise or Cloud)
|
||||
mockUseGetTenantLicense.mockReturnValue({
|
||||
isCloudUser: true,
|
||||
isEnterpriseSelfHostedUser: false,
|
||||
isCommunityUser: false,
|
||||
isCommunityEnterpriseUser: false,
|
||||
});
|
||||
});
|
||||
|
||||
it('should render all buttons when all features are enabled', () => {
|
||||
@@ -189,4 +202,84 @@ describe('HeaderRightSection', () => {
|
||||
expect(screen.getByTestId('feedback-modal')).toBeInTheDocument();
|
||||
expect(screen.queryByTestId('share-modal')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should show feedback button for Cloud users when feedback is enabled', () => {
|
||||
mockUseGetTenantLicense.mockReturnValue({
|
||||
isCloudUser: true,
|
||||
isEnterpriseSelfHostedUser: false,
|
||||
isCommunityUser: false,
|
||||
isCommunityEnterpriseUser: false,
|
||||
});
|
||||
|
||||
render(<HeaderRightSection {...defaultProps} />);
|
||||
|
||||
const feedbackButton = document.querySelector('.lucide-square-pen');
|
||||
expect(feedbackButton).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should show feedback button for Enterprise self-hosted users when feedback is enabled', () => {
|
||||
mockUseGetTenantLicense.mockReturnValue({
|
||||
isCloudUser: false,
|
||||
isEnterpriseSelfHostedUser: true,
|
||||
isCommunityUser: false,
|
||||
isCommunityEnterpriseUser: false,
|
||||
});
|
||||
|
||||
render(<HeaderRightSection {...defaultProps} />);
|
||||
|
||||
const feedbackButton = document.querySelector('.lucide-square-pen');
|
||||
expect(feedbackButton).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should hide feedback button for Community users even when feedback is enabled', () => {
|
||||
mockUseGetTenantLicense.mockReturnValue({
|
||||
isCloudUser: false,
|
||||
isEnterpriseSelfHostedUser: false,
|
||||
isCommunityUser: true,
|
||||
isCommunityEnterpriseUser: false,
|
||||
});
|
||||
|
||||
render(<HeaderRightSection {...defaultProps} />);
|
||||
|
||||
const feedbackButton = document.querySelector('.lucide-square-pen');
|
||||
expect(feedbackButton).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should hide feedback button for Community Enterprise users even when feedback is enabled', () => {
|
||||
mockUseGetTenantLicense.mockReturnValue({
|
||||
isCloudUser: false,
|
||||
isEnterpriseSelfHostedUser: false,
|
||||
isCommunityUser: false,
|
||||
isCommunityEnterpriseUser: true,
|
||||
});
|
||||
|
||||
render(<HeaderRightSection {...defaultProps} />);
|
||||
|
||||
const feedbackButton = document.querySelector('.lucide-square-pen');
|
||||
expect(feedbackButton).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should render correct number of buttons when feedback is hidden due to license', () => {
|
||||
mockUseGetTenantLicense.mockReturnValue({
|
||||
isCloudUser: false,
|
||||
isEnterpriseSelfHostedUser: false,
|
||||
isCommunityUser: true,
|
||||
isCommunityEnterpriseUser: false,
|
||||
});
|
||||
|
||||
render(<HeaderRightSection {...defaultProps} />);
|
||||
|
||||
// Should have 2 buttons (announcements + share) instead of 3
|
||||
const buttons = screen.getAllByRole('button');
|
||||
expect(buttons).toHaveLength(2);
|
||||
|
||||
// Verify which buttons are present
|
||||
expect(screen.getByRole('button', { name: /share/i })).toBeInTheDocument();
|
||||
const inboxIcon = document.querySelector('.lucide-inbox');
|
||||
expect(inboxIcon).toBeInTheDocument();
|
||||
|
||||
// Verify feedback button is not present
|
||||
const feedbackIcon = document.querySelector('.lucide-square-pen');
|
||||
expect(feedbackIcon).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import { Router } from 'react-router-dom';
|
||||
import { fireEvent, render, screen } from 'tests/test-utils';
|
||||
|
||||
import RouteTab from './index';
|
||||
import { RouteTabProps } from './types';
|
||||
|
||||
@@ -66,13 +66,7 @@ export const useGetExplorerQueryRange = (
|
||||
ENTITY_VERSION_V5,
|
||||
{
|
||||
...options,
|
||||
queryKey: [
|
||||
key,
|
||||
selectedTimeInterval ?? globalSelectedInterval,
|
||||
requestData,
|
||||
minTime,
|
||||
maxTime,
|
||||
],
|
||||
queryKey: [key, globalSelectedInterval, requestData, minTime, maxTime],
|
||||
enabled: isEnabled,
|
||||
},
|
||||
headers,
|
||||
|
||||
@@ -1640,6 +1640,12 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Calculate cold storage duration
|
||||
coldStorageDuration := -1
|
||||
if len(params.ColdStorageVolume) > 0 && params.ToColdStorageDuration > 0 {
|
||||
coldStorageDuration = int(params.ToColdStorageDuration) // Already in days
|
||||
}
|
||||
|
||||
tableNames := []string{
|
||||
r.logsDB + "." + r.logsLocalTableV2,
|
||||
r.logsDB + "." + r.logsResourceLocalTableV2,
|
||||
@@ -1655,25 +1661,47 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
}
|
||||
}
|
||||
|
||||
// Build multiIf expressions for both tables
|
||||
multiIfExpr := r.buildMultiIfExpression(params.TTLConditions, params.DefaultTTLDays, false)
|
||||
resourceMultiIfExpr := r.buildMultiIfExpression(params.TTLConditions, params.DefaultTTLDays, true)
|
||||
|
||||
ttlPayload := map[string]string{
|
||||
tableNames[0]: fmt.Sprintf(`ALTER TABLE %s ON CLUSTER %s MODIFY COLUMN _retention_days UInt16 DEFAULT %s`,
|
||||
ttlPayload := make(map[string][]string)
|
||||
|
||||
queries := []string{
|
||||
fmt.Sprintf(`ALTER TABLE %s ON CLUSTER %s MODIFY COLUMN _retention_days UInt16 DEFAULT %s`,
|
||||
tableNames[0], r.cluster, multiIfExpr),
|
||||
tableNames[1]: fmt.Sprintf(`ALTER TABLE %s ON CLUSTER %s MODIFY COLUMN _retention_days UInt16 DEFAULT %s`,
|
||||
}
|
||||
|
||||
if len(params.ColdStorageVolume) > 0 && coldStorageDuration > 0 {
|
||||
queries = append(queries, fmt.Sprintf(`ALTER TABLE %s ON CLUSTER %s MODIFY COLUMN _retention_days_cold UInt16 DEFAULT %d`,
|
||||
tableNames[0], r.cluster, coldStorageDuration))
|
||||
|
||||
queries = append(queries, fmt.Sprintf(`ALTER TABLE %s ON CLUSTER %s MODIFY TTL toDateTime(timestamp / 1000000000) + toIntervalDay(_retention_days) DELETE, toDateTime(timestamp / 1000000000) + toIntervalDay(_retention_days_cold) TO VOLUME '%s'`,
|
||||
tableNames[0], r.cluster, params.ColdStorageVolume))
|
||||
}
|
||||
|
||||
ttlPayload[tableNames[0]] = queries
|
||||
|
||||
resourceQueries := []string{
|
||||
fmt.Sprintf(`ALTER TABLE %s ON CLUSTER %s MODIFY COLUMN _retention_days UInt16 DEFAULT %s`,
|
||||
tableNames[1], r.cluster, resourceMultiIfExpr),
|
||||
}
|
||||
|
||||
if len(params.ColdStorageVolume) > 0 && coldStorageDuration > 0 {
|
||||
resourceQueries = append(resourceQueries, fmt.Sprintf(`ALTER TABLE %s ON CLUSTER %s MODIFY COLUMN _retention_days_cold UInt16 DEFAULT %d`,
|
||||
tableNames[1], r.cluster, coldStorageDuration))
|
||||
|
||||
resourceQueries = append(resourceQueries, fmt.Sprintf(`ALTER TABLE %s ON CLUSTER %s MODIFY TTL toDateTime(seen_at_ts_bucket_start) + toIntervalSecond(1800) + toIntervalDay(_retention_days) DELETE, toDateTime(seen_at_ts_bucket_start) + toIntervalSecond(1800) + toIntervalDay(_retention_days_cold) TO VOLUME '%s'`,
|
||||
tableNames[1], r.cluster, params.ColdStorageVolume))
|
||||
}
|
||||
|
||||
ttlPayload[tableNames[1]] = resourceQueries
|
||||
|
||||
ttlConditionsJSON, err := json.Marshal(params.TTLConditions)
|
||||
if err != nil {
|
||||
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error marshalling TTL condition")
|
||||
}
|
||||
|
||||
// Execute the TTL modifications synchronously
|
||||
for tableName, query := range ttlPayload {
|
||||
// Store the operation in the database
|
||||
for tableName, queries := range ttlPayload {
|
||||
customTTL := types.TTLSetting{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
@@ -1682,12 +1710,13 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
TransactionID: uuid,
|
||||
TableName: tableName,
|
||||
TTL: params.DefaultTTLDays,
|
||||
Condition: string(ttlConditionsJSON),
|
||||
Status: constants.StatusPending,
|
||||
OrgID: orgID,
|
||||
TransactionID: uuid,
|
||||
TableName: tableName,
|
||||
TTL: params.DefaultTTLDays,
|
||||
Condition: string(ttlConditionsJSON),
|
||||
Status: constants.StatusPending,
|
||||
ColdStorageTTL: coldStorageDuration,
|
||||
OrgID: orgID,
|
||||
}
|
||||
|
||||
// Insert TTL setting record
|
||||
@@ -1697,19 +1726,24 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
return nil, errorsV2.Wrapf(dbErr, errorsV2.TypeInternal, errorsV2.CodeInternal, "error inserting TTL settings")
|
||||
}
|
||||
|
||||
zap.L().Debug("Executing custom retention TTL request: ", zap.String("request", query))
|
||||
|
||||
// Execute the ALTER TABLE query
|
||||
if err := r.db.Exec(ctx, query); err != nil {
|
||||
zap.L().Error("error while setting custom retention ttl", zap.Error(err))
|
||||
|
||||
// Update status to failed
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
|
||||
|
||||
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting custom retention TTL for table %s", tableName)
|
||||
if len(params.ColdStorageVolume) > 0 && coldStorageDuration > 0 {
|
||||
err := r.setColdStorage(ctx, tableName, params.ColdStorageVolume)
|
||||
if err != nil {
|
||||
zap.L().Error("error in setting cold storage", zap.Error(err))
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
|
||||
return nil, errorsV2.Wrapf(err.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting cold storage for table %s", tableName)
|
||||
}
|
||||
}
|
||||
|
||||
for i, query := range queries {
|
||||
zap.L().Debug("Executing custom retention TTL request: ", zap.String("request", query), zap.Int("step", i+1))
|
||||
if err := r.db.Exec(ctx, query); err != nil {
|
||||
zap.L().Error("error while setting custom retention ttl", zap.Error(err))
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
|
||||
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting custom retention TTL for table %s, query: %s", tableName, query)
|
||||
}
|
||||
}
|
||||
|
||||
// Update status to success
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusSuccess)
|
||||
}
|
||||
|
||||
@@ -1841,6 +1875,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
|
||||
response.DefaultTTLDays = customTTL.TTL
|
||||
response.TTLConditions = ttlConditions
|
||||
response.Status = customTTL.Status
|
||||
response.ColdStorageTTLDays = customTTL.ColdStorageTTL
|
||||
|
||||
} else {
|
||||
// V1 - Traditional TTL
|
||||
|
||||
@@ -433,8 +433,10 @@ type GetCustomRetentionTTLResponse struct {
|
||||
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
|
||||
|
||||
// V2 fields
|
||||
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
|
||||
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
|
||||
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
|
||||
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
|
||||
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
|
||||
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
|
||||
}
|
||||
|
||||
type CustomRetentionTTLResponse struct {
|
||||
|
||||
Reference in New Issue
Block a user