Compare commits
13 Commits
v0.81.0
...
remove-dea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
807aa60906 | ||
|
|
208a5603a9 | ||
|
|
0de779a866 | ||
|
|
5cc833b73f | ||
|
|
3eee3bfec1 | ||
|
|
01b308d507 | ||
|
|
dcf627a683 | ||
|
|
49c04eb9d9 | ||
|
|
c89a8cbb0c | ||
|
|
b6bb71f650 | ||
|
|
af135aa068 | ||
|
|
4a4e4d6779 | ||
|
|
fc604915ed |
@@ -66,8 +66,6 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
|
|||||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
Cache: opts.Cache,
|
Cache: opts.Cache,
|
||||||
FluxInterval: opts.FluxInterval,
|
FluxInterval: opts.FluxInterval,
|
||||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
|
||||||
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
||||||
FieldsAPI: fields.NewAPI(signoz.TelemetryStore),
|
FieldsAPI: fields.NewAPI(signoz.TelemetryStore),
|
||||||
Signoz: signoz,
|
Signoz: signoz,
|
||||||
|
|||||||
@@ -23,12 +23,10 @@ func NewDataConnector(
|
|||||||
telemetryStore telemetrystore.TelemetryStore,
|
telemetryStore telemetrystore.TelemetryStore,
|
||||||
prometheus prometheus.Prometheus,
|
prometheus prometheus.Prometheus,
|
||||||
cluster string,
|
cluster string,
|
||||||
useLogsNewSchema bool,
|
|
||||||
useTraceNewSchema bool,
|
|
||||||
fluxIntervalForTraceDetail time.Duration,
|
fluxIntervalForTraceDetail time.Duration,
|
||||||
cache cache.Cache,
|
cache cache.Cache,
|
||||||
) *ClickhouseReader {
|
) *ClickhouseReader {
|
||||||
chReader := basechr.NewReader(sqlDB, telemetryStore, prometheus, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
chReader := basechr.NewReader(sqlDB, telemetryStore, prometheus, cluster, fluxIntervalForTraceDetail, cache)
|
||||||
return &ClickhouseReader{
|
return &ClickhouseReader{
|
||||||
conn: telemetryStore.ClickhouseDB(),
|
conn: telemetryStore.ClickhouseDB(),
|
||||||
appdb: sqlDB,
|
appdb: sqlDB,
|
||||||
|
|||||||
@@ -62,8 +62,6 @@ type ServerOptions struct {
|
|||||||
FluxIntervalForTraceDetail string
|
FluxIntervalForTraceDetail string
|
||||||
Cluster string
|
Cluster string
|
||||||
GatewayUrl string
|
GatewayUrl string
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
Jwt *authtypes.JWT
|
Jwt *authtypes.JWT
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,8 +130,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.SigNoz.TelemetryStore,
|
serverOptions.SigNoz.TelemetryStore,
|
||||||
serverOptions.SigNoz.Prometheus,
|
serverOptions.SigNoz.Prometheus,
|
||||||
serverOptions.Cluster,
|
serverOptions.Cluster,
|
||||||
serverOptions.UseLogsNewSchema,
|
|
||||||
serverOptions.UseTraceNewSchema,
|
|
||||||
fluxIntervalForTraceDetail,
|
fluxIntervalForTraceDetail,
|
||||||
serverOptions.SigNoz.Cache,
|
serverOptions.SigNoz.Cache,
|
||||||
)
|
)
|
||||||
@@ -151,8 +147,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||||
reader,
|
reader,
|
||||||
c,
|
c,
|
||||||
serverOptions.UseLogsNewSchema,
|
|
||||||
serverOptions.UseTraceNewSchema,
|
|
||||||
serverOptions.SigNoz.Alertmanager,
|
serverOptions.SigNoz.Alertmanager,
|
||||||
serverOptions.SigNoz.SQLStore,
|
serverOptions.SigNoz.SQLStore,
|
||||||
serverOptions.SigNoz.TelemetryStore,
|
serverOptions.SigNoz.TelemetryStore,
|
||||||
@@ -233,8 +227,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
Gateway: gatewayProxy,
|
Gateway: gatewayProxy,
|
||||||
GatewayUrl: serverOptions.GatewayUrl,
|
GatewayUrl: serverOptions.GatewayUrl,
|
||||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
|
||||||
JWT: serverOptions.Jwt,
|
JWT: serverOptions.Jwt,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,8 +236,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s := &Server{
|
s := &Server{
|
||||||
// logger: logger,
|
|
||||||
// tracer: tracer,
|
|
||||||
ruleManager: rm,
|
ruleManager: rm,
|
||||||
serverOptions: serverOptions,
|
serverOptions: serverOptions,
|
||||||
unavailableChannel: make(chan healthcheck.Status),
|
unavailableChannel: make(chan healthcheck.Status),
|
||||||
@@ -486,8 +476,6 @@ func makeRulesManager(
|
|||||||
db *sqlx.DB,
|
db *sqlx.DB,
|
||||||
ch baseint.Reader,
|
ch baseint.Reader,
|
||||||
cache cache.Cache,
|
cache cache.Cache,
|
||||||
useLogsNewSchema bool,
|
|
||||||
useTraceNewSchema bool,
|
|
||||||
alertmanager alertmanager.Alertmanager,
|
alertmanager alertmanager.Alertmanager,
|
||||||
sqlstore sqlstore.SQLStore,
|
sqlstore sqlstore.SQLStore,
|
||||||
telemetryStore telemetrystore.TelemetryStore,
|
telemetryStore telemetrystore.TelemetryStore,
|
||||||
@@ -504,8 +492,6 @@ func makeRulesManager(
|
|||||||
Cache: cache,
|
Cache: cache,
|
||||||
EvalDelay: baseconst.GetEvalDelay(),
|
EvalDelay: baseconst.GetEvalDelay(),
|
||||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||||
UseLogsNewSchema: useLogsNewSchema,
|
|
||||||
UseTraceNewSchema: useTraceNewSchema,
|
|
||||||
PrepareTestRuleFunc: rules.TestNotification,
|
PrepareTestRuleFunc: rules.TestNotification,
|
||||||
Alertmanager: alertmanager,
|
Alertmanager: alertmanager,
|
||||||
SQLStore: sqlstore,
|
SQLStore: sqlstore,
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import (
|
|||||||
"go.uber.org/zap/zapcore"
|
"go.uber.org/zap/zapcore"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Deprecated: Please use the logger from pkg/instrumentation.
|
||||||
func initZapLog() *zap.Logger {
|
func initZapLog() *zap.Logger {
|
||||||
config := zap.NewProductionConfig()
|
config := zap.NewProductionConfig()
|
||||||
config.EncoderConfig.TimeKey = "timestamp"
|
config.EncoderConfig.TimeKey = "timestamp"
|
||||||
@@ -50,7 +51,9 @@ func main() {
|
|||||||
var gatewayUrl string
|
var gatewayUrl string
|
||||||
var useLicensesV3 bool
|
var useLicensesV3 bool
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||||
|
// Deprecated
|
||||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||||
// Deprecated
|
// Deprecated
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
@@ -136,8 +139,6 @@ func main() {
|
|||||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||||
Cluster: cluster,
|
Cluster: cluster,
|
||||||
GatewayUrl: gatewayUrl,
|
GatewayUrl: gatewayUrl,
|
||||||
UseLogsNewSchema: useLogsNewSchema,
|
|
||||||
UseTraceNewSchema: useTraceNewSchema,
|
|
||||||
Jwt: jwt,
|
Jwt: jwt,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -25,8 +25,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
ruleId,
|
ruleId,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.UseLogsNewSchema,
|
|
||||||
opts.UseTraceNewSchema,
|
|
||||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
)
|
)
|
||||||
@@ -123,8 +121,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
|||||||
alertname,
|
alertname,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.UseLogsNewSchema,
|
|
||||||
opts.UseTraceNewSchema,
|
|
||||||
baserules.WithSendAlways(),
|
baserules.WithSendAlways(),
|
||||||
baserules.WithSendUnmatched(),
|
baserules.WithSendUnmatched(),
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
|
|||||||
@@ -64,10 +64,6 @@ export const TraceDetail = Loadable(
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
export const UsageExplorerPage = Loadable(
|
|
||||||
() => import(/* webpackChunkName: "UsageExplorerPage" */ 'modules/Usage'),
|
|
||||||
);
|
|
||||||
|
|
||||||
export const SignupPage = Loadable(
|
export const SignupPage = Loadable(
|
||||||
() => import(/* webpackChunkName: "SignupPage" */ 'pages/SignUp'),
|
() => import(/* webpackChunkName: "SignupPage" */ 'pages/SignUp'),
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -57,7 +57,6 @@ import {
|
|||||||
TracesFunnels,
|
TracesFunnels,
|
||||||
TracesSaveViews,
|
TracesSaveViews,
|
||||||
UnAuthorized,
|
UnAuthorized,
|
||||||
UsageExplorerPage,
|
|
||||||
WorkspaceAccessRestricted,
|
WorkspaceAccessRestricted,
|
||||||
WorkspaceBlocked,
|
WorkspaceBlocked,
|
||||||
WorkspaceSuspended,
|
WorkspaceSuspended,
|
||||||
@@ -155,13 +154,6 @@ const routes: AppRoutes[] = [
|
|||||||
isPrivate: true,
|
isPrivate: true,
|
||||||
key: 'SETTINGS',
|
key: 'SETTINGS',
|
||||||
},
|
},
|
||||||
{
|
|
||||||
path: ROUTES.USAGE_EXPLORER,
|
|
||||||
exact: true,
|
|
||||||
component: UsageExplorerPage,
|
|
||||||
isPrivate: true,
|
|
||||||
key: 'USAGE_EXPLORER',
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
path: ROUTES.ALL_DASHBOARD,
|
path: ROUTES.ALL_DASHBOARD,
|
||||||
exact: true,
|
exact: true,
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
import axios from 'api';
|
|
||||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
|
||||||
import { AxiosError } from 'axios';
|
|
||||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
|
||||||
import { PayloadProps, Props } from 'types/api/logs/getLogs';
|
|
||||||
|
|
||||||
const GetLogs = async (
|
|
||||||
props: Props,
|
|
||||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
|
||||||
try {
|
|
||||||
const data = await axios.get(`/logs`, {
|
|
||||||
params: props,
|
|
||||||
});
|
|
||||||
|
|
||||||
return {
|
|
||||||
statusCode: 200,
|
|
||||||
error: null,
|
|
||||||
message: '',
|
|
||||||
payload: data.data.results,
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
return ErrorResponseHandler(error as AxiosError);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
export default GetLogs;
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
import apiV1 from 'api/apiV1';
|
|
||||||
import getLocalStorageKey from 'api/browser/localstorage/get';
|
|
||||||
import { ENVIRONMENT } from 'constants/env';
|
|
||||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
|
||||||
import { EventSourcePolyfill } from 'event-source-polyfill';
|
|
||||||
|
|
||||||
// 10 min in ms
|
|
||||||
const TIMEOUT_IN_MS = 10 * 60 * 1000;
|
|
||||||
|
|
||||||
export const LiveTail = (queryParams: string): EventSourcePolyfill =>
|
|
||||||
new EventSourcePolyfill(
|
|
||||||
`${ENVIRONMENT.baseURL}${apiV1}logs/tail?${queryParams}`,
|
|
||||||
{
|
|
||||||
headers: {
|
|
||||||
Authorization: `Bearer ${getLocalStorageKey(LOCALSTORAGE.AUTH_TOKEN)}`,
|
|
||||||
},
|
|
||||||
heartbeatTimeout: TIMEOUT_IN_MS,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
@@ -17,7 +17,6 @@ const ROUTES = {
|
|||||||
'/get-started/infrastructure-monitoring',
|
'/get-started/infrastructure-monitoring',
|
||||||
GET_STARTED_AWS_MONITORING: '/get-started/aws-monitoring',
|
GET_STARTED_AWS_MONITORING: '/get-started/aws-monitoring',
|
||||||
GET_STARTED_AZURE_MONITORING: '/get-started/azure-monitoring',
|
GET_STARTED_AZURE_MONITORING: '/get-started/azure-monitoring',
|
||||||
USAGE_EXPLORER: '/usage-explorer',
|
|
||||||
APPLICATION: '/services',
|
APPLICATION: '/services',
|
||||||
ALL_DASHBOARD: '/dashboard',
|
ALL_DASHBOARD: '/dashboard',
|
||||||
DASHBOARD: '/dashboard/:dashboardId',
|
DASHBOARD: '/dashboard/:dashboardId',
|
||||||
|
|||||||
@@ -133,231 +133,3 @@ const ServicesListTable = memo(
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
ServicesListTable.displayName = 'ServicesListTable';
|
ServicesListTable.displayName = 'ServicesListTable';
|
||||||
|
|
||||||
function ServiceMetrics({
|
|
||||||
onUpdateChecklistDoneItem,
|
|
||||||
loadingUserPreferences,
|
|
||||||
}: {
|
|
||||||
onUpdateChecklistDoneItem: (itemKey: string) => void;
|
|
||||||
loadingUserPreferences: boolean;
|
|
||||||
}): JSX.Element {
|
|
||||||
const { selectedTime: globalSelectedInterval } = useSelector<
|
|
||||||
AppState,
|
|
||||||
GlobalReducer
|
|
||||||
>((state) => state.globalTime);
|
|
||||||
|
|
||||||
const { user, activeLicenseV3 } = useAppContext();
|
|
||||||
|
|
||||||
const [timeRange, setTimeRange] = useState(() => {
|
|
||||||
const now = new Date().getTime();
|
|
||||||
return {
|
|
||||||
startTime: now - homeInterval,
|
|
||||||
endTime: now,
|
|
||||||
selectedInterval: homeInterval,
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
const { queries } = useResourceAttribute();
|
|
||||||
const { safeNavigate } = useSafeNavigate();
|
|
||||||
|
|
||||||
const selectedTags = useMemo(
|
|
||||||
() => (convertRawQueriesToTraceSelectedTags(queries) as Tags[]) || [],
|
|
||||||
[queries],
|
|
||||||
);
|
|
||||||
|
|
||||||
const [isError, setIsError] = useState(false);
|
|
||||||
|
|
||||||
const queryKey: QueryKey = useMemo(
|
|
||||||
() => [
|
|
||||||
timeRange.startTime,
|
|
||||||
timeRange.endTime,
|
|
||||||
selectedTags,
|
|
||||||
globalSelectedInterval,
|
|
||||||
],
|
|
||||||
[
|
|
||||||
timeRange.startTime,
|
|
||||||
timeRange.endTime,
|
|
||||||
selectedTags,
|
|
||||||
globalSelectedInterval,
|
|
||||||
],
|
|
||||||
);
|
|
||||||
|
|
||||||
const {
|
|
||||||
data,
|
|
||||||
isLoading: isLoadingTopLevelOperations,
|
|
||||||
isError: isErrorTopLevelOperations,
|
|
||||||
} = useGetTopLevelOperations(queryKey, {
|
|
||||||
start: timeRange.startTime * 1e6,
|
|
||||||
end: timeRange.endTime * 1e6,
|
|
||||||
});
|
|
||||||
|
|
||||||
const handleTimeIntervalChange = useCallback((value: number): void => {
|
|
||||||
const timeInterval = TIME_PICKER_OPTIONS.find(
|
|
||||||
(option) => option.value === value,
|
|
||||||
);
|
|
||||||
|
|
||||||
logEvent('Homepage: Services time interval updated', {
|
|
||||||
updatedTimeInterval: timeInterval?.label,
|
|
||||||
});
|
|
||||||
|
|
||||||
const now = new Date();
|
|
||||||
setTimeRange({
|
|
||||||
startTime: now.getTime() - value,
|
|
||||||
endTime: now.getTime(),
|
|
||||||
selectedInterval: value,
|
|
||||||
});
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const topLevelOperations = useMemo(() => Object.entries(data || {}), [data]);
|
|
||||||
|
|
||||||
const queryRangeRequestData = useMemo(
|
|
||||||
() =>
|
|
||||||
getQueryRangeRequestData({
|
|
||||||
topLevelOperations,
|
|
||||||
minTime: timeRange.startTime * 1e6,
|
|
||||||
maxTime: timeRange.endTime * 1e6,
|
|
||||||
globalSelectedInterval,
|
|
||||||
}),
|
|
||||||
[
|
|
||||||
globalSelectedInterval,
|
|
||||||
timeRange.endTime,
|
|
||||||
timeRange.startTime,
|
|
||||||
topLevelOperations,
|
|
||||||
],
|
|
||||||
);
|
|
||||||
|
|
||||||
const dataQueries = useGetQueriesRange(
|
|
||||||
queryRangeRequestData,
|
|
||||||
ENTITY_VERSION_V4,
|
|
||||||
{
|
|
||||||
queryKey: useMemo(
|
|
||||||
() => [
|
|
||||||
`GetMetricsQueryRange-home-${globalSelectedInterval}`,
|
|
||||||
timeRange.endTime,
|
|
||||||
timeRange.startTime,
|
|
||||||
globalSelectedInterval,
|
|
||||||
],
|
|
||||||
[globalSelectedInterval, timeRange.endTime, timeRange.startTime],
|
|
||||||
),
|
|
||||||
keepPreviousData: true,
|
|
||||||
enabled: true,
|
|
||||||
refetchOnMount: false,
|
|
||||||
onError: () => {
|
|
||||||
setIsError(true);
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
const isLoading = useMemo(() => dataQueries.some((query) => query.isLoading), [
|
|
||||||
dataQueries,
|
|
||||||
]);
|
|
||||||
|
|
||||||
const services: ServicesList[] = useMemo(
|
|
||||||
() =>
|
|
||||||
getServiceListFromQuery({
|
|
||||||
queries: dataQueries,
|
|
||||||
topLevelOperations,
|
|
||||||
isLoading,
|
|
||||||
}),
|
|
||||||
[dataQueries, topLevelOperations, isLoading],
|
|
||||||
);
|
|
||||||
|
|
||||||
const sortedServices = useMemo(
|
|
||||||
() =>
|
|
||||||
services?.sort((a, b) => {
|
|
||||||
const aUpdateAt = new Date(a.p99).getTime();
|
|
||||||
const bUpdateAt = new Date(b.p99).getTime();
|
|
||||||
return bUpdateAt - aUpdateAt;
|
|
||||||
}) || [],
|
|
||||||
[services],
|
|
||||||
);
|
|
||||||
|
|
||||||
const servicesExist = sortedServices.length > 0;
|
|
||||||
const top5Services = useMemo(() => sortedServices.slice(0, 5), [
|
|
||||||
sortedServices,
|
|
||||||
]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (!loadingUserPreferences && servicesExist) {
|
|
||||||
onUpdateChecklistDoneItem('SETUP_SERVICES');
|
|
||||||
}
|
|
||||||
}, [onUpdateChecklistDoneItem, loadingUserPreferences, servicesExist]);
|
|
||||||
|
|
||||||
const handleRowClick = useCallback(
|
|
||||||
(record: ServicesList) => {
|
|
||||||
logEvent('Homepage: Service clicked', {
|
|
||||||
serviceName: record.serviceName,
|
|
||||||
});
|
|
||||||
safeNavigate(`${ROUTES.APPLICATION}/${record.serviceName}`);
|
|
||||||
},
|
|
||||||
[safeNavigate],
|
|
||||||
);
|
|
||||||
|
|
||||||
if (isLoadingTopLevelOperations || isLoading) {
|
|
||||||
return (
|
|
||||||
<Card className="services-list-card home-data-card loading-card">
|
|
||||||
<Card.Content>
|
|
||||||
<Skeleton active />
|
|
||||||
</Card.Content>
|
|
||||||
</Card>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isErrorTopLevelOperations || isError) {
|
|
||||||
return (
|
|
||||||
<Card className="services-list-card home-data-card error-card">
|
|
||||||
<Card.Content>
|
|
||||||
<Skeleton active />
|
|
||||||
</Card.Content>
|
|
||||||
</Card>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Card className="services-list-card home-data-card">
|
|
||||||
{servicesExist && (
|
|
||||||
<Card.Header>
|
|
||||||
<div className="services-header home-data-card-header">
|
|
||||||
{' '}
|
|
||||||
Services
|
|
||||||
<div className="services-header-actions">
|
|
||||||
<Select
|
|
||||||
value={timeRange.selectedInterval}
|
|
||||||
onChange={handleTimeIntervalChange}
|
|
||||||
options={TIME_PICKER_OPTIONS}
|
|
||||||
className="services-header-select"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</Card.Header>
|
|
||||||
)}
|
|
||||||
<Card.Content>
|
|
||||||
{servicesExist ? (
|
|
||||||
<ServicesListTable services={top5Services} onRowClick={handleRowClick} />
|
|
||||||
) : (
|
|
||||||
<EmptyState user={user} activeLicenseV3={activeLicenseV3} />
|
|
||||||
)}
|
|
||||||
</Card.Content>
|
|
||||||
|
|
||||||
{servicesExist && (
|
|
||||||
<Card.Footer>
|
|
||||||
<div className="services-footer home-data-card-footer">
|
|
||||||
<Link to="/services">
|
|
||||||
<Button
|
|
||||||
type="link"
|
|
||||||
className="periscope-btn link learn-more-link"
|
|
||||||
onClick={(): void => {
|
|
||||||
logEvent('Homepage: All Services clicked', {});
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
All Services <ArrowRight size={12} />
|
|
||||||
</Button>
|
|
||||||
</Link>
|
|
||||||
</div>
|
|
||||||
</Card.Footer>
|
|
||||||
)}
|
|
||||||
</Card>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
export default memo(ServiceMetrics);
|
|
||||||
|
|||||||
@@ -21,17 +21,10 @@ function Services({
|
|||||||
return (
|
return (
|
||||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||||
<div className="home-services-container">
|
<div className="home-services-container">
|
||||||
{isSpanMetricEnabled ? (
|
<ServiceTraces
|
||||||
<ServiceMetrics
|
|
||||||
onUpdateChecklistDoneItem={onUpdateChecklistDoneItem}
|
onUpdateChecklistDoneItem={onUpdateChecklistDoneItem}
|
||||||
loadingUserPreferences={loadingUserPreferences}
|
loadingUserPreferences={loadingUserPreferences}
|
||||||
/>
|
/>
|
||||||
) : (
|
|
||||||
<ServiceTraces
|
|
||||||
onUpdateChecklistDoneItem={onUpdateChecklistDoneItem}
|
|
||||||
loadingUserPreferences={loadingUserPreferences}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
</Sentry.ErrorBoundary>
|
</Sentry.ErrorBoundary>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -481,7 +481,6 @@ export const apDexMetricsQueryBuilderQueries = ({
|
|||||||
export const operationPerSec = ({
|
export const operationPerSec = ({
|
||||||
servicename,
|
servicename,
|
||||||
tagFilterItems,
|
tagFilterItems,
|
||||||
topLevelOperations,
|
|
||||||
}: OperationPerSecProps): QueryBuilderData => {
|
}: OperationPerSecProps): QueryBuilderData => {
|
||||||
const autocompleteData: BaseAutocompleteData[] = [
|
const autocompleteData: BaseAutocompleteData[] = [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,7 +1,4 @@
|
|||||||
import logEvent from 'api/common/logEvent';
|
import logEvent from 'api/common/logEvent';
|
||||||
import getTopLevelOperations, {
|
|
||||||
ServiceDataProps,
|
|
||||||
} from 'api/metrics/getTopLevelOperations';
|
|
||||||
import { FeatureKeys } from 'constants/features';
|
import { FeatureKeys } from 'constants/features';
|
||||||
import { QueryParams } from 'constants/query';
|
import { QueryParams } from 'constants/query';
|
||||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||||
@@ -110,21 +107,6 @@ function Application(): JSX.Element {
|
|||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
const {
|
|
||||||
data: topLevelOperations,
|
|
||||||
error: topLevelOperationsError,
|
|
||||||
isLoading: topLevelOperationsIsLoading,
|
|
||||||
isError: topLevelOperationsIsError,
|
|
||||||
} = useQuery<ServiceDataProps>({
|
|
||||||
queryKey: [servicename, minTime, maxTime],
|
|
||||||
queryFn: (): Promise<ServiceDataProps> =>
|
|
||||||
getTopLevelOperations({
|
|
||||||
service: servicename || '',
|
|
||||||
start: minTime,
|
|
||||||
end: maxTime,
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
const selectedTraceTags: string = JSON.stringify(
|
const selectedTraceTags: string = JSON.stringify(
|
||||||
convertRawQueriesToTraceSelectedTags(queries) || [],
|
convertRawQueriesToTraceSelectedTags(queries) || [],
|
||||||
);
|
);
|
||||||
@@ -137,14 +119,6 @@ function Application(): JSX.Element {
|
|||||||
[queries],
|
[queries],
|
||||||
);
|
);
|
||||||
|
|
||||||
const topLevelOperationsRoute = useMemo(
|
|
||||||
() =>
|
|
||||||
topLevelOperations
|
|
||||||
? defaultTo(topLevelOperations[servicename || ''], [])
|
|
||||||
: [],
|
|
||||||
[servicename, topLevelOperations],
|
|
||||||
);
|
|
||||||
|
|
||||||
const operationPerSecWidget = useMemo(
|
const operationPerSecWidget = useMemo(
|
||||||
() =>
|
() =>
|
||||||
getWidgetQueryBuilder({
|
getWidgetQueryBuilder({
|
||||||
|
|||||||
@@ -1,224 +0,0 @@
|
|||||||
/* eslint-disable */
|
|
||||||
//@ts-nocheck
|
|
||||||
|
|
||||||
import { Select, Space, Typography } from 'antd';
|
|
||||||
import Graph from 'components/Graph';
|
|
||||||
import { useEffect, useState } from 'react';
|
|
||||||
import { connect, useSelector } from 'react-redux';
|
|
||||||
import { withRouter } from 'react-router-dom';
|
|
||||||
import { GetService, getUsageData, UsageDataItem } from 'store/actions';
|
|
||||||
import { AppState } from 'store/reducers';
|
|
||||||
import { GlobalTime } from 'types/actions/globalTime';
|
|
||||||
import { GlobalReducer } from 'types/reducer/globalTime';
|
|
||||||
import MetricReducer from 'types/reducer/metrics';
|
|
||||||
import { isOnboardingSkipped } from 'utils/app';
|
|
||||||
|
|
||||||
import { Card } from './styles';
|
|
||||||
|
|
||||||
const { Option } = Select;
|
|
||||||
|
|
||||||
interface UsageExplorerProps {
|
|
||||||
usageData: UsageDataItem[];
|
|
||||||
getUsageData: (
|
|
||||||
minTime: number,
|
|
||||||
maxTime: number,
|
|
||||||
selectedInterval: number,
|
|
||||||
selectedService: string,
|
|
||||||
) => void;
|
|
||||||
getServicesList: ({
|
|
||||||
selectedTimeInterval,
|
|
||||||
}: {
|
|
||||||
selectedTimeInterval: GlobalReducer['selectedTime'];
|
|
||||||
}) => void;
|
|
||||||
globalTime: GlobalTime;
|
|
||||||
servicesList: servicesListItem[];
|
|
||||||
totalCount: number;
|
|
||||||
}
|
|
||||||
const timeDaysOptions = [
|
|
||||||
{ value: 30, label: 'Last 30 Days' },
|
|
||||||
{ value: 7, label: 'Last week' },
|
|
||||||
{ value: 1, label: 'Last day' },
|
|
||||||
];
|
|
||||||
|
|
||||||
const interval = [
|
|
||||||
{
|
|
||||||
value: 604800,
|
|
||||||
chartDivideMultiplier: 1,
|
|
||||||
label: 'Weekly',
|
|
||||||
applicableOn: [timeDaysOptions[0]],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
value: 86400,
|
|
||||||
chartDivideMultiplier: 30,
|
|
||||||
label: 'Daily',
|
|
||||||
applicableOn: [timeDaysOptions[0], timeDaysOptions[1]],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
value: 3600,
|
|
||||||
chartDivideMultiplier: 10,
|
|
||||||
label: 'Hours',
|
|
||||||
applicableOn: [timeDaysOptions[2], timeDaysOptions[1]],
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
function _UsageExplorer(props: UsageExplorerProps): JSX.Element {
|
|
||||||
const [selectedTime, setSelectedTime] = useState(timeDaysOptions[1]);
|
|
||||||
const [selectedInterval, setSelectedInterval] = useState(interval[2]);
|
|
||||||
const [selectedService, setSelectedService] = useState<string>('');
|
|
||||||
const { selectedTime: globalSelectedTime } = useSelector<
|
|
||||||
AppState,
|
|
||||||
GlobalReducer
|
|
||||||
>((state) => state.globalTime);
|
|
||||||
const {
|
|
||||||
getServicesList,
|
|
||||||
getUsageData,
|
|
||||||
globalTime,
|
|
||||||
totalCount,
|
|
||||||
usageData,
|
|
||||||
} = props;
|
|
||||||
const { services } = useSelector<AppState, MetricReducer>(
|
|
||||||
(state) => state.metrics,
|
|
||||||
);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (selectedTime && selectedInterval) {
|
|
||||||
const maxTime = new Date().getTime() * 1000000;
|
|
||||||
const minTime = maxTime - selectedTime.value * 24 * 3600000 * 1000000;
|
|
||||||
|
|
||||||
getUsageData(minTime, maxTime, selectedInterval.value, selectedService);
|
|
||||||
}
|
|
||||||
}, [selectedTime, selectedInterval, selectedService, getUsageData]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
getServicesList({
|
|
||||||
selectedTimeInterval: globalSelectedTime,
|
|
||||||
});
|
|
||||||
}, [globalTime, getServicesList, globalSelectedTime]);
|
|
||||||
|
|
||||||
const data = {
|
|
||||||
labels: usageData.map((s) => new Date(s.timestamp / 1000000)),
|
|
||||||
datasets: [
|
|
||||||
{
|
|
||||||
label: 'Span Count',
|
|
||||||
data: usageData.map((s) => s.count),
|
|
||||||
backgroundColor: 'rgba(255, 99, 132, 0.2)',
|
|
||||||
borderColor: 'rgba(255, 99, 132, 1)',
|
|
||||||
borderWidth: 2,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
|
||||||
<>
|
|
||||||
<Space style={{ marginTop: 40, marginLeft: 20 }}>
|
|
||||||
<Space>
|
|
||||||
<Select
|
|
||||||
onSelect={(value): void => {
|
|
||||||
setSelectedTime(
|
|
||||||
timeDaysOptions.filter((item) => item.value == parseInt(value))[0],
|
|
||||||
);
|
|
||||||
}}
|
|
||||||
value={selectedTime.label}
|
|
||||||
>
|
|
||||||
{timeDaysOptions.map(({ value, label }) => (
|
|
||||||
<Option key={value} value={value}>
|
|
||||||
{label}
|
|
||||||
</Option>
|
|
||||||
))}
|
|
||||||
</Select>
|
|
||||||
</Space>
|
|
||||||
<Space>
|
|
||||||
<Select
|
|
||||||
onSelect={(value): void => {
|
|
||||||
setSelectedInterval(
|
|
||||||
interval.filter((item) => item.value === parseInt(value))[0],
|
|
||||||
);
|
|
||||||
}}
|
|
||||||
value={selectedInterval.label}
|
|
||||||
>
|
|
||||||
{interval
|
|
||||||
.filter((interval) => interval.applicableOn.includes(selectedTime))
|
|
||||||
.map((item) => (
|
|
||||||
<Option key={item.label} value={item.value}>
|
|
||||||
{item.label}
|
|
||||||
</Option>
|
|
||||||
))}
|
|
||||||
</Select>
|
|
||||||
</Space>
|
|
||||||
|
|
||||||
<Space>
|
|
||||||
<Select
|
|
||||||
onSelect={(value): void => {
|
|
||||||
setSelectedService(value);
|
|
||||||
}}
|
|
||||||
value={selectedService || 'All Services'}
|
|
||||||
>
|
|
||||||
<Option value="">All Services</Option>
|
|
||||||
{services?.map((service) => (
|
|
||||||
<Option key={service.serviceName} value={service.serviceName}>
|
|
||||||
{service.serviceName}
|
|
||||||
</Option>
|
|
||||||
))}
|
|
||||||
</Select>
|
|
||||||
</Space>
|
|
||||||
|
|
||||||
{isOnboardingSkipped() && totalCount === 0 ? (
|
|
||||||
<Space
|
|
||||||
style={{
|
|
||||||
width: '100%',
|
|
||||||
margin: '40px 0',
|
|
||||||
marginLeft: 20,
|
|
||||||
justifyContent: 'center',
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<Typography>
|
|
||||||
No spans found. Please add instrumentation (follow this
|
|
||||||
<a
|
|
||||||
href="https://signoz.io/docs/instrumentation/overview"
|
|
||||||
target="_blank"
|
|
||||||
style={{ marginLeft: 3 }}
|
|
||||||
rel="noreferrer"
|
|
||||||
>
|
|
||||||
guide
|
|
||||||
</a>
|
|
||||||
)
|
|
||||||
</Typography>
|
|
||||||
</Space>
|
|
||||||
) : (
|
|
||||||
<Space style={{ display: 'block', marginLeft: 20, width: 200 }}>
|
|
||||||
<Typography>{`Total count is ${totalCount}`}</Typography>
|
|
||||||
</Space>
|
|
||||||
)}
|
|
||||||
</Space>
|
|
||||||
|
|
||||||
<Card>
|
|
||||||
<Graph name="usage" data={data} type="bar" />
|
|
||||||
</Card>
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const mapStateToProps = (
|
|
||||||
state: AppState,
|
|
||||||
): {
|
|
||||||
totalCount: number;
|
|
||||||
globalTime: GlobalTime;
|
|
||||||
usageData: UsageDataItem[];
|
|
||||||
} => {
|
|
||||||
let totalCount = 0;
|
|
||||||
for (const item of state.usageDate) {
|
|
||||||
totalCount += item.count;
|
|
||||||
}
|
|
||||||
return {
|
|
||||||
totalCount,
|
|
||||||
usageData: state.usageDate,
|
|
||||||
globalTime: state.globalTime,
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
export const UsageExplorer = withRouter(
|
|
||||||
connect(mapStateToProps, {
|
|
||||||
getUsageData,
|
|
||||||
getServicesList: GetService,
|
|
||||||
})(_UsageExplorer),
|
|
||||||
);
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
import { UsageExplorer } from './UsageExplorer';
|
|
||||||
|
|
||||||
function UsageExplorerContainer(): JSX.Element {
|
|
||||||
return <UsageExplorer />;
|
|
||||||
}
|
|
||||||
|
|
||||||
export default UsageExplorerContainer;
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
import { Card as CardComponent } from 'antd';
|
|
||||||
import styled from 'styled-components';
|
|
||||||
|
|
||||||
export const Card = styled(CardComponent)`
|
|
||||||
&&& {
|
|
||||||
width: 90%;
|
|
||||||
margin-top: 2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.ant-card-body {
|
|
||||||
height: 70vh;
|
|
||||||
}
|
|
||||||
`;
|
|
||||||
@@ -2,4 +2,3 @@ export * from './global';
|
|||||||
export * from './metrics';
|
export * from './metrics';
|
||||||
export * from './serviceMap';
|
export * from './serviceMap';
|
||||||
export * from './types';
|
export * from './types';
|
||||||
export * from './usage';
|
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
import GetLogs from 'api/logs/GetLogs';
|
|
||||||
import { Dispatch } from 'redux';
|
|
||||||
import AppActions from 'types/actions';
|
|
||||||
import { SET_LOADING, SET_LOGS } from 'types/actions/logs';
|
|
||||||
import { Props } from 'types/api/logs/getLogs';
|
|
||||||
|
|
||||||
export const getLogs = (
|
|
||||||
props: Props,
|
|
||||||
): ((dispatch: Dispatch<AppActions>) => void) => async (
|
|
||||||
dispatch,
|
|
||||||
): Promise<void> => {
|
|
||||||
dispatch({
|
|
||||||
type: SET_LOADING,
|
|
||||||
payload: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
const response = await GetLogs(props);
|
|
||||||
|
|
||||||
if (response.payload)
|
|
||||||
dispatch({
|
|
||||||
type: SET_LOGS,
|
|
||||||
payload: response.payload,
|
|
||||||
});
|
|
||||||
else
|
|
||||||
dispatch({
|
|
||||||
type: SET_LOGS,
|
|
||||||
payload: [],
|
|
||||||
});
|
|
||||||
|
|
||||||
dispatch({
|
|
||||||
type: SET_LOADING,
|
|
||||||
payload: false,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
@@ -1,17 +1,14 @@
|
|||||||
import { ServiceMapItemAction, ServiceMapLoading } from './serviceMap';
|
import { ServiceMapItemAction, ServiceMapLoading } from './serviceMap';
|
||||||
import { GetUsageDataAction } from './usage';
|
|
||||||
|
|
||||||
export enum ActionTypes {
|
export enum ActionTypes {
|
||||||
updateTimeInterval = 'UPDATE_TIME_INTERVAL',
|
updateTimeInterval = 'UPDATE_TIME_INTERVAL',
|
||||||
getServiceMapItems = 'GET_SERVICE_MAP_ITEMS',
|
getServiceMapItems = 'GET_SERVICE_MAP_ITEMS',
|
||||||
getServices = 'GET_SERVICES',
|
getServices = 'GET_SERVICES',
|
||||||
getUsageData = 'GET_USAGE_DATE',
|
|
||||||
fetchTraces = 'FETCH_TRACES',
|
fetchTraces = 'FETCH_TRACES',
|
||||||
fetchTraceItem = 'FETCH_TRACE_ITEM',
|
fetchTraceItem = 'FETCH_TRACE_ITEM',
|
||||||
serviceMapLoading = 'UPDATE_SERVICE_MAP_LOADING',
|
serviceMapLoading = 'UPDATE_SERVICE_MAP_LOADING',
|
||||||
}
|
}
|
||||||
|
|
||||||
export type Action =
|
export type Action =
|
||||||
| GetUsageDataAction
|
|
||||||
| ServiceMapItemAction
|
| ServiceMapItemAction
|
||||||
| ServiceMapLoading;
|
| ServiceMapLoading;
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
import api from 'api';
|
|
||||||
import { Dispatch } from 'redux';
|
|
||||||
import { toUTCEpoch } from 'utils/timeUtils';
|
|
||||||
|
|
||||||
import { ActionTypes } from './types';
|
|
||||||
|
|
||||||
export interface UsageDataItem {
|
|
||||||
timestamp: number;
|
|
||||||
count: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface GetUsageDataAction {
|
|
||||||
type: ActionTypes.getUsageData;
|
|
||||||
payload: UsageDataItem[];
|
|
||||||
}
|
|
||||||
|
|
||||||
export const getUsageData = (
|
|
||||||
minTime: number,
|
|
||||||
maxTime: number,
|
|
||||||
step: number,
|
|
||||||
service: string,
|
|
||||||
) => async (dispatch: Dispatch): Promise<void> => {
|
|
||||||
const requesString = `/usage?start=${toUTCEpoch(minTime)}&end=${toUTCEpoch(
|
|
||||||
maxTime,
|
|
||||||
)}&step=${step}&service=${service || ''}`;
|
|
||||||
// Step can only be multiple of 3600
|
|
||||||
const response = await api.get<UsageDataItem[]>(requesString);
|
|
||||||
|
|
||||||
dispatch<GetUsageDataAction>({
|
|
||||||
type: ActionTypes.getUsageData,
|
|
||||||
payload: response.data,
|
|
||||||
// PNOTE - response.data in the axios response has the actual API response
|
|
||||||
});
|
|
||||||
};
|
|
||||||
@@ -6,11 +6,9 @@ import { LogsReducer } from './logs';
|
|||||||
import metricsReducers from './metric';
|
import metricsReducers from './metric';
|
||||||
import { ServiceMapReducer } from './serviceMap';
|
import { ServiceMapReducer } from './serviceMap';
|
||||||
import traceReducer from './trace';
|
import traceReducer from './trace';
|
||||||
import { usageDataReducer } from './usage';
|
|
||||||
|
|
||||||
const reducers = combineReducers({
|
const reducers = combineReducers({
|
||||||
traces: traceReducer,
|
traces: traceReducer,
|
||||||
usageDate: usageDataReducer,
|
|
||||||
globalTime: globalTimeReducer,
|
globalTime: globalTimeReducer,
|
||||||
serviceMap: ServiceMapReducer,
|
serviceMap: ServiceMapReducer,
|
||||||
app: appReducer,
|
app: appReducer,
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
/* eslint-disable sonarjs/no-small-switch */
|
|
||||||
import { Action, ActionTypes, UsageDataItem } from 'store/actions';
|
|
||||||
|
|
||||||
export const usageDataReducer = (
|
|
||||||
state: UsageDataItem[] = [{ timestamp: 0, count: 0 }],
|
|
||||||
action: Action,
|
|
||||||
): UsageDataItem[] => {
|
|
||||||
switch (action.type) {
|
|
||||||
case ActionTypes.getUsageData:
|
|
||||||
return action.payload;
|
|
||||||
default:
|
|
||||||
return state;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
@@ -17,8 +17,6 @@ const (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
defaultTraceDB string = "signoz_traces"
|
defaultTraceDB string = "signoz_traces"
|
||||||
defaultOperationsTable string = "distributed_signoz_operations"
|
|
||||||
defaultIndexTable string = "distributed_signoz_index_v2"
|
|
||||||
defaultLocalIndexTable string = "signoz_index_v2"
|
defaultLocalIndexTable string = "signoz_index_v2"
|
||||||
defaultErrorTable string = "distributed_signoz_error_index_v2"
|
defaultErrorTable string = "distributed_signoz_error_index_v2"
|
||||||
defaultDurationTable string = "distributed_durationSort"
|
defaultDurationTable string = "distributed_durationSort"
|
||||||
@@ -59,19 +57,10 @@ type namespaceConfig struct {
|
|||||||
Enabled bool
|
Enabled bool
|
||||||
Datasource string
|
Datasource string
|
||||||
TraceDB string
|
TraceDB string
|
||||||
OperationsTable string
|
|
||||||
IndexTable string
|
|
||||||
LocalIndexTable string
|
|
||||||
DurationTable string
|
|
||||||
UsageExplorerTable string
|
|
||||||
SpansTable string
|
|
||||||
ErrorTable string
|
ErrorTable string
|
||||||
|
LocalIndexTable string
|
||||||
SpanAttributeTableV2 string
|
SpanAttributeTableV2 string
|
||||||
SpanAttributeKeysTable string
|
SpanAttributeKeysTable string
|
||||||
DependencyGraphTable string
|
|
||||||
TopLevelOperationsTable string
|
|
||||||
LogsDB string
|
|
||||||
LogsTable string
|
|
||||||
LogsLocalTable string
|
LogsLocalTable string
|
||||||
LogsAttributeKeysTable string
|
LogsAttributeKeysTable string
|
||||||
LogsResourceKeysTable string
|
LogsResourceKeysTable string
|
||||||
@@ -82,6 +71,7 @@ type namespaceConfig struct {
|
|||||||
Encoding Encoding
|
Encoding Encoding
|
||||||
Connector Connector
|
Connector Connector
|
||||||
|
|
||||||
|
LogsDB string
|
||||||
LogsLocalTableV2 string
|
LogsLocalTableV2 string
|
||||||
LogsTableV2 string
|
LogsTableV2 string
|
||||||
LogsResourceLocalTableV2 string
|
LogsResourceLocalTableV2 string
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -49,7 +49,6 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/query-service/app/querier"
|
"github.com/SigNoz/signoz/pkg/query-service/app/querier"
|
||||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||||
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
|
|
||||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/cache"
|
"github.com/SigNoz/signoz/pkg/query-service/cache"
|
||||||
@@ -118,9 +117,6 @@ type APIHandler struct {
|
|||||||
// Websocket connection upgrader
|
// Websocket connection upgrader
|
||||||
Upgrader *websocket.Upgrader
|
Upgrader *websocket.Upgrader
|
||||||
|
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
|
|
||||||
hostsRepo *inframetrics.HostsRepo
|
hostsRepo *inframetrics.HostsRepo
|
||||||
processesRepo *inframetrics.ProcessesRepo
|
processesRepo *inframetrics.ProcessesRepo
|
||||||
podsRepo *inframetrics.PodsRepo
|
podsRepo *inframetrics.PodsRepo
|
||||||
@@ -177,11 +173,6 @@ type APIHandlerOpts struct {
|
|||||||
// Querier Influx Interval
|
// Querier Influx Interval
|
||||||
FluxInterval time.Duration
|
FluxInterval time.Duration
|
||||||
|
|
||||||
// Use Logs New schema
|
|
||||||
UseLogsNewSchema bool
|
|
||||||
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
|
|
||||||
JWT *authtypes.JWT
|
JWT *authtypes.JWT
|
||||||
|
|
||||||
AlertmanagerAPI *alertmanager.API
|
AlertmanagerAPI *alertmanager.API
|
||||||
@@ -194,21 +185,17 @@ type APIHandlerOpts struct {
|
|||||||
// NewAPIHandler returns an APIHandler
|
// NewAPIHandler returns an APIHandler
|
||||||
func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
||||||
querierOpts := querier.QuerierOptions{
|
querierOpts := querier.QuerierOptions{
|
||||||
Reader: opts.Reader,
|
Reader: opts.Reader,
|
||||||
Cache: opts.Cache,
|
Cache: opts.Cache,
|
||||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||||
FluxInterval: opts.FluxInterval,
|
FluxInterval: opts.FluxInterval,
|
||||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
querierOptsV2 := querierV2.QuerierOptions{
|
querierOptsV2 := querierV2.QuerierOptions{
|
||||||
Reader: opts.Reader,
|
Reader: opts.Reader,
|
||||||
Cache: opts.Cache,
|
Cache: opts.Cache,
|
||||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||||
FluxInterval: opts.FluxInterval,
|
FluxInterval: opts.FluxInterval,
|
||||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
querier := querier.NewQuerier(querierOpts)
|
querier := querier.NewQuerier(querierOpts)
|
||||||
@@ -239,8 +226,6 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
|||||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
querier: querier,
|
querier: querier,
|
||||||
querierV2: querierv2,
|
querierV2: querierv2,
|
||||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
|
||||||
hostsRepo: hostsRepo,
|
hostsRepo: hostsRepo,
|
||||||
processesRepo: processesRepo,
|
processesRepo: processesRepo,
|
||||||
podsRepo: podsRepo,
|
podsRepo: podsRepo,
|
||||||
@@ -259,15 +244,8 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
|||||||
FieldsAPI: opts.FieldsAPI,
|
FieldsAPI: opts.FieldsAPI,
|
||||||
}
|
}
|
||||||
|
|
||||||
logsQueryBuilder := logsv3.PrepareLogsQuery
|
logsQueryBuilder := logsv4.PrepareLogsQuery
|
||||||
if opts.UseLogsNewSchema {
|
tracesQueryBuilder := tracesV4.PrepareTracesQuery
|
||||||
logsQueryBuilder = logsv4.PrepareLogsQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
|
||||||
if opts.UseTraceNewSchema {
|
|
||||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
builderOpts := queryBuilder.QueryBuilderOptions{
|
builderOpts := queryBuilder.QueryBuilderOptions{
|
||||||
BuildMetricQuery: metricsv3.PrepareMetricQuery,
|
BuildMetricQuery: metricsv3.PrepareMetricQuery,
|
||||||
@@ -550,12 +528,8 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
|||||||
|
|
||||||
// router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet)
|
// router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v1/services", am.ViewAccess(aH.getServices)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/services", am.ViewAccess(aH.getServices)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/services/list", am.ViewAccess(aH.getServicesList)).Methods(http.MethodGet)
|
|
||||||
router.HandleFunc("/api/v1/service/top_operations", am.ViewAccess(aH.getTopOperations)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/service/top_operations", am.ViewAccess(aH.getTopOperations)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/service/top_level_operations", am.ViewAccess(aH.getServicesTopLevelOps)).Methods(http.MethodPost)
|
|
||||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(aH.SearchTraces)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(aH.SearchTraces)).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v1/usage", am.ViewAccess(aH.getUsage)).Methods(http.MethodGet)
|
|
||||||
router.HandleFunc("/api/v1/dependency_graph", am.ViewAccess(aH.dependencyGraph)).Methods(http.MethodPost)
|
|
||||||
router.HandleFunc("/api/v1/settings/ttl", am.AdminAccess(aH.setTTL)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/settings/ttl", am.AdminAccess(aH.setTTL)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/settings/ttl", am.ViewAccess(aH.getTTL)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/settings/ttl", am.ViewAccess(aH.getTTL)).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v1/settings/apdex", am.AdminAccess(aH.setApdexSettings)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/settings/apdex", am.AdminAccess(aH.setApdexSettings)).Methods(http.MethodPost)
|
||||||
@@ -1624,122 +1598,13 @@ func (aH *APIHandler) getTopOperations(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) getUsage(w http.ResponseWriter, r *http.Request) {
|
|
||||||
|
|
||||||
query, err := parseGetUsageRequest(r)
|
|
||||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := aH.reader.GetUsage(r.Context(), query)
|
|
||||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
aH.WriteJSON(w, r, result)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) getServicesTopLevelOps(w http.ResponseWriter, r *http.Request) {
|
|
||||||
|
|
||||||
var start, end time.Time
|
|
||||||
var services []string
|
|
||||||
|
|
||||||
type topLevelOpsParams struct {
|
|
||||||
Service string `json:"service"`
|
|
||||||
Start string `json:"start"`
|
|
||||||
End string `json:"end"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var params topLevelOpsParams
|
|
||||||
err := json.NewDecoder(r.Body).Decode(¶ms)
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Error("Error in getting req body for get top operations API", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.Service != "" {
|
|
||||||
services = []string{params.Service}
|
|
||||||
}
|
|
||||||
|
|
||||||
startEpoch := params.Start
|
|
||||||
if startEpoch != "" {
|
|
||||||
startEpochInt, err := strconv.ParseInt(startEpoch, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading start time")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
start = time.Unix(0, startEpochInt)
|
|
||||||
}
|
|
||||||
endEpoch := params.End
|
|
||||||
if endEpoch != "" {
|
|
||||||
endEpochInt, err := strconv.ParseInt(endEpoch, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading end time")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
end = time.Unix(0, endEpochInt)
|
|
||||||
}
|
|
||||||
|
|
||||||
result, apiErr := aH.reader.GetTopLevelOperations(r.Context(), start, end, services)
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
aH.WriteJSON(w, r, result)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) {
|
func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
query, err := parseGetServicesRequest(r)
|
|
||||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, apiErr := aH.reader.GetServices(r.Context(), query)
|
|
||||||
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
data := map[string]interface{}{
|
|
||||||
"number": len(*result),
|
|
||||||
}
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
|
||||||
if errv2 != nil {
|
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_NUMBER_OF_SERVICES, data, claims.Email, true, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data["number"] != 0) && (data["number"] != telemetry.DEFAULT_NUMBER_OF_SERVICES) {
|
|
||||||
telemetry.GetInstance().AddActiveTracesUser()
|
|
||||||
}
|
|
||||||
|
|
||||||
aH.WriteJSON(w, r, result)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) dependencyGraph(w http.ResponseWriter, r *http.Request) {
|
|
||||||
|
|
||||||
query, err := parseGetServicesRequest(r)
|
|
||||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := aH.reader.GetDependencyGraph(r.Context(), query)
|
|
||||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
aH.WriteJSON(w, r, result)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) getServicesList(w http.ResponseWriter, r *http.Request) {
|
|
||||||
|
|
||||||
result, err := aH.reader.GetServicesList(r.Context())
|
result, err := aH.reader.GetServicesList(r.Context())
|
||||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
aH.WriteJSON(w, r, result)
|
aH.WriteJSON(w, r, result)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) SearchTraces(w http.ResponseWriter, r *http.Request) {
|
func (aH *APIHandler) SearchTraces(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -4239,11 +4104,8 @@ func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
|
|||||||
// logs
|
// logs
|
||||||
func (aH *APIHandler) RegisterLogsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
func (aH *APIHandler) RegisterLogsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||||
subRouter := router.PathPrefix("/api/v1/logs").Subrouter()
|
subRouter := router.PathPrefix("/api/v1/logs").Subrouter()
|
||||||
subRouter.HandleFunc("", am.ViewAccess(aH.getLogs)).Methods(http.MethodGet)
|
|
||||||
subRouter.HandleFunc("/tail", am.ViewAccess(aH.tailLogs)).Methods(http.MethodGet)
|
|
||||||
subRouter.HandleFunc("/fields", am.ViewAccess(aH.logFields)).Methods(http.MethodGet)
|
subRouter.HandleFunc("/fields", am.ViewAccess(aH.logFields)).Methods(http.MethodGet)
|
||||||
subRouter.HandleFunc("/fields", am.EditAccess(aH.logFieldUpdate)).Methods(http.MethodPost)
|
subRouter.HandleFunc("/fields", am.EditAccess(aH.logFieldUpdate)).Methods(http.MethodPost)
|
||||||
subRouter.HandleFunc("/aggregate", am.ViewAccess(aH.logAggregate)).Methods(http.MethodGet)
|
|
||||||
|
|
||||||
// log pipelines
|
// log pipelines
|
||||||
subRouter.HandleFunc("/pipelines/preview", am.ViewAccess(aH.PreviewLogsPipelinesHandler)).Methods(http.MethodPost)
|
subRouter.HandleFunc("/pipelines/preview", am.ViewAccess(aH.PreviewLogsPipelinesHandler)).Methods(http.MethodPost)
|
||||||
@@ -4283,81 +4145,6 @@ func (aH *APIHandler) logFieldUpdate(w http.ResponseWriter, r *http.Request) {
|
|||||||
aH.WriteJSON(w, r, field)
|
aH.WriteJSON(w, r, field)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) getLogs(w http.ResponseWriter, r *http.Request) {
|
|
||||||
params, err := logs.ParseLogFilterParams(r)
|
|
||||||
if err != nil {
|
|
||||||
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
|
||||||
RespondError(w, apiErr, "Incorrect params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
res, apiErr := aH.reader.GetLogs(r.Context(), params)
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, "Failed to fetch logs from the DB")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
aH.WriteJSON(w, r, map[string]interface{}{"results": res})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) tailLogs(w http.ResponseWriter, r *http.Request) {
|
|
||||||
params, err := logs.ParseLogFilterParams(r)
|
|
||||||
if err != nil {
|
|
||||||
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
|
||||||
RespondError(w, apiErr, "Incorrect params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the client
|
|
||||||
client := &model.LogsTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error), Filter: *params}
|
|
||||||
go aH.reader.TailLogs(r.Context(), client)
|
|
||||||
|
|
||||||
w.Header().Set("Connection", "keep-alive")
|
|
||||||
w.Header().Set("Content-Type", "text/event-stream")
|
|
||||||
w.Header().Set("Cache-Control", "no-cache")
|
|
||||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
||||||
w.WriteHeader(200)
|
|
||||||
|
|
||||||
flusher, ok := w.(http.Flusher)
|
|
||||||
if !ok {
|
|
||||||
err := model.ApiError{Typ: model.ErrorStreamingNotSupported, Err: nil}
|
|
||||||
RespondError(w, &err, "streaming is not supported")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// flush the headers
|
|
||||||
flusher.Flush()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case log := <-client.Logs:
|
|
||||||
var buf bytes.Buffer
|
|
||||||
enc := json.NewEncoder(&buf)
|
|
||||||
enc.Encode(log)
|
|
||||||
fmt.Fprintf(w, "data: %v\n\n", buf.String())
|
|
||||||
flusher.Flush()
|
|
||||||
case <-client.Done:
|
|
||||||
zap.L().Debug("done!")
|
|
||||||
return
|
|
||||||
case err := <-client.Error:
|
|
||||||
zap.L().Error("error occured", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) logAggregate(w http.ResponseWriter, r *http.Request) {
|
|
||||||
params, err := logs.ParseLogAggregateParams(r)
|
|
||||||
if err != nil {
|
|
||||||
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
|
||||||
RespondError(w, apiErr, "Incorrect params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
res, apiErr := aH.reader.AggregateLogs(r.Context(), params)
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, "Failed to fetch logs aggregate from the DB")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
aH.WriteJSON(w, r, res)
|
|
||||||
}
|
|
||||||
|
|
||||||
const logPipelines = "log_pipelines"
|
const logPipelines = "log_pipelines"
|
||||||
|
|
||||||
func parseAgentConfigVersion(r *http.Request) (int, *model.ApiError) {
|
func parseAgentConfigVersion(r *http.Request) (int, *model.ApiError) {
|
||||||
@@ -4839,30 +4626,10 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
|
|||||||
RespondError(w, apiErrObj, errQuriesByName)
|
RespondError(w, apiErrObj, errQuriesByName)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if aH.UseTraceNewSchema {
|
tracesV4.Enrich(queryRangeParams, spanKeys)
|
||||||
tracesV4.Enrich(queryRangeParams, spanKeys)
|
|
||||||
} else {
|
|
||||||
tracesV3.Enrich(queryRangeParams, spanKeys)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WARN: Only works for AND operator in traces query
|
|
||||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
|
||||||
// check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params
|
|
||||||
isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams)
|
|
||||||
if isUsed && len(traceIDs) > 0 {
|
|
||||||
zap.L().Debug("traceID used as filter in traces query")
|
|
||||||
// query signoz_spans table with traceID to get min and max timestamp
|
|
||||||
min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs)
|
|
||||||
if err == nil {
|
|
||||||
// add timestamp filter to queryRange params
|
|
||||||
tracesV3.AddTimestampFilters(min, max, queryRangeParams)
|
|
||||||
zap.L().Debug("post adding timestamp filter in traces query", zap.Any("queryRangeParams", queryRangeParams))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook up query progress tracking if requested
|
// Hook up query progress tracking if requested
|
||||||
queryIdHeader := r.Header.Get("X-SIGNOZ-QUERY-ID")
|
queryIdHeader := r.Header.Get("X-SIGNOZ-QUERY-ID")
|
||||||
if len(queryIdHeader) > 0 {
|
if len(queryIdHeader) > 0 {
|
||||||
@@ -5202,88 +4969,7 @@ func (aH *APIHandler) liveTailLogsV2(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
|
func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
|
||||||
if aH.UseLogsNewSchema {
|
aH.liveTailLogsV2(w, r)
|
||||||
aH.liveTailLogsV2(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the param from url and add it to body
|
|
||||||
stringReader := strings.NewReader(r.URL.Query().Get("q"))
|
|
||||||
r.Body = io.NopCloser(stringReader)
|
|
||||||
|
|
||||||
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
|
|
||||||
if apiErrorObj != nil {
|
|
||||||
zap.L().Error(apiErrorObj.Err.Error())
|
|
||||||
RespondError(w, apiErrorObj, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
var queryString string
|
|
||||||
switch queryRangeParams.CompositeQuery.QueryType {
|
|
||||||
case v3.QueryTypeBuilder:
|
|
||||||
// check if any enrichment is required for logs if yes then enrich them
|
|
||||||
if logsv3.EnrichmentRequired(queryRangeParams) {
|
|
||||||
logsFields, err := aH.reader.GetLogFields(r.Context())
|
|
||||||
if err != nil {
|
|
||||||
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
|
|
||||||
RespondError(w, apiErrObj, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// get the fields if any logs query is present
|
|
||||||
fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields)
|
|
||||||
logsv3.Enrich(queryRangeParams, fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
queryString, err = aH.queryBuilder.PrepareLiveTailQuery(queryRangeParams)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("invalid query type")
|
|
||||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the client
|
|
||||||
client := &model.LogsLiveTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error)}
|
|
||||||
go aH.reader.LiveTailLogsV3(r.Context(), queryString, uint64(queryRangeParams.Start), "", client)
|
|
||||||
|
|
||||||
w.Header().Set("Connection", "keep-alive")
|
|
||||||
w.Header().Set("Content-Type", "text/event-stream")
|
|
||||||
w.Header().Set("Cache-Control", "no-cache")
|
|
||||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
||||||
w.WriteHeader(200)
|
|
||||||
|
|
||||||
flusher, ok := w.(http.Flusher)
|
|
||||||
if !ok {
|
|
||||||
err := model.ApiError{Typ: model.ErrorStreamingNotSupported, Err: nil}
|
|
||||||
RespondError(w, &err, "streaming is not supported")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// flush the headers
|
|
||||||
flusher.Flush()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case log := <-client.Logs:
|
|
||||||
var buf bytes.Buffer
|
|
||||||
enc := json.NewEncoder(&buf)
|
|
||||||
enc.Encode(log)
|
|
||||||
fmt.Fprintf(w, "data: %v\n\n", buf.String())
|
|
||||||
flusher.Flush()
|
|
||||||
case <-client.Done:
|
|
||||||
zap.L().Debug("done!")
|
|
||||||
return
|
|
||||||
case err := <-client.Error:
|
|
||||||
zap.L().Error("error occurred", zap.Error(err))
|
|
||||||
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error())
|
|
||||||
flusher.Flush()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) {
|
func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -5324,27 +5010,7 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
|
|||||||
RespondError(w, apiErrObj, errQuriesByName)
|
RespondError(w, apiErrObj, errQuriesByName)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if aH.UseTraceNewSchema {
|
tracesV4.Enrich(queryRangeParams, spanKeys)
|
||||||
tracesV4.Enrich(queryRangeParams, spanKeys)
|
|
||||||
} else {
|
|
||||||
tracesV3.Enrich(queryRangeParams, spanKeys)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WARN: Only works for AND operator in traces query
|
|
||||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
|
||||||
// check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params
|
|
||||||
isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams)
|
|
||||||
if isUsed && len(traceIDs) > 0 {
|
|
||||||
zap.L().Debug("traceID used as filter in traces query")
|
|
||||||
// query signoz_spans table with traceID to get min and max timestamp
|
|
||||||
min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs)
|
|
||||||
if err == nil {
|
|
||||||
// add timestamp filter to queryRange params
|
|
||||||
tracesV3.AddTimestampFilters(min, max, queryRangeParams)
|
|
||||||
zap.L().Debug("post adding timestamp filter in traces query", zap.Any("queryRangeParams", queryRangeParams))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams)
|
result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams)
|
||||||
|
|||||||
@@ -171,42 +171,6 @@ func parseQueryRangeRequest(r *http.Request) (*model.QueryRangeParams, *model.Ap
|
|||||||
return &queryRangeParams, nil
|
return &queryRangeParams, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGetUsageRequest(r *http.Request) (*model.GetUsageParams, error) {
|
|
||||||
startTime, err := parseTime("start", r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
endTime, err := parseTime("end", r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stepStr := r.URL.Query().Get("step")
|
|
||||||
if len(stepStr) == 0 {
|
|
||||||
return nil, errors.New("step param missing in query")
|
|
||||||
}
|
|
||||||
stepInt, err := strconv.Atoi(stepStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("step param is not in correct format")
|
|
||||||
}
|
|
||||||
|
|
||||||
serviceName := r.URL.Query().Get("service")
|
|
||||||
stepHour := stepInt / 3600
|
|
||||||
|
|
||||||
getUsageParams := model.GetUsageParams{
|
|
||||||
StartTime: startTime.Format(time.RFC3339Nano),
|
|
||||||
EndTime: endTime.Format(time.RFC3339Nano),
|
|
||||||
Start: startTime,
|
|
||||||
End: endTime,
|
|
||||||
ServiceName: serviceName,
|
|
||||||
Period: fmt.Sprintf("PT%dH", stepHour),
|
|
||||||
StepHour: stepHour,
|
|
||||||
}
|
|
||||||
|
|
||||||
return &getUsageParams, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseGetServicesRequest(r *http.Request) (*model.GetServicesParams, error) {
|
func parseGetServicesRequest(r *http.Request) (*model.GetServicesParams, error) {
|
||||||
|
|
||||||
var postData *model.GetServicesParams
|
var postData *model.GetServicesParams
|
||||||
|
|||||||
@@ -6,10 +6,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
logsV3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
|
|
||||||
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
|
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
|
||||||
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
|
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
|
||||||
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
|
|
||||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||||
@@ -19,19 +17,15 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func prepareLogsQuery(_ context.Context,
|
func prepareLogsQuery(
|
||||||
useLogsNewSchema bool,
|
_ context.Context,
|
||||||
start,
|
start,
|
||||||
end int64,
|
end int64,
|
||||||
builderQuery *v3.BuilderQuery,
|
builderQuery *v3.BuilderQuery,
|
||||||
params *v3.QueryRangeParamsV3,
|
params *v3.QueryRangeParamsV3,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
query := ""
|
query := ""
|
||||||
|
logsQueryBuilder := logsV4.PrepareLogsQuery
|
||||||
logsQueryBuilder := logsV3.PrepareLogsQuery
|
|
||||||
if useLogsNewSchema {
|
|
||||||
logsQueryBuilder = logsV4.PrepareLogsQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
if params == nil || builderQuery == nil {
|
if params == nil || builderQuery == nil {
|
||||||
return query, fmt.Errorf("params and builderQuery cannot be nil")
|
return query, fmt.Errorf("params and builderQuery cannot be nil")
|
||||||
@@ -102,7 +96,7 @@ func (q *querier) runBuilderQuery(
|
|||||||
var err error
|
var err error
|
||||||
if _, ok := cacheKeys[queryName]; !ok || params.NoCache {
|
if _, ok := cacheKeys[queryName]; !ok || params.NoCache {
|
||||||
zap.L().Info("skipping cache for logs query", zap.String("queryName", queryName), zap.Int64("start", start), zap.Int64("end", end), zap.Int64("step", builderQuery.StepInterval), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName]))
|
zap.L().Info("skipping cache for logs query", zap.String("queryName", queryName), zap.Int64("start", start), zap.Int64("end", end), zap.Int64("step", builderQuery.StepInterval), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName]))
|
||||||
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params)
|
query, err = prepareLogsQuery(ctx, start, end, builderQuery, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
||||||
return
|
return
|
||||||
@@ -117,7 +111,7 @@ func (q *querier) runBuilderQuery(
|
|||||||
missedSeries := make([]querycache.CachedSeriesData, 0)
|
missedSeries := make([]querycache.CachedSeriesData, 0)
|
||||||
filteredMissedSeries := make([]querycache.CachedSeriesData, 0)
|
filteredMissedSeries := make([]querycache.CachedSeriesData, 0)
|
||||||
for _, miss := range misses {
|
for _, miss := range misses {
|
||||||
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.Start, miss.End, builderQuery, params)
|
query, err = prepareLogsQuery(ctx, miss.Start, miss.End, builderQuery, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
||||||
return
|
return
|
||||||
@@ -169,11 +163,7 @@ func (q *querier) runBuilderQuery(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if builderQuery.DataSource == v3.DataSourceTraces {
|
if builderQuery.DataSource == v3.DataSourceTraces {
|
||||||
|
tracesQueryBuilder := tracesV4.PrepareTracesQuery
|
||||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
|
||||||
if q.UseTraceNewSchema {
|
|
||||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
var query string
|
var query string
|
||||||
var err error
|
var err error
|
||||||
|
|||||||
@@ -6,11 +6,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
logsV3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
|
|
||||||
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
|
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
|
||||||
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
|
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||||
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
|
|
||||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||||
@@ -52,9 +50,6 @@ type querier struct {
|
|||||||
timeRanges [][]int
|
timeRanges [][]int
|
||||||
returnedSeries []*v3.Series
|
returnedSeries []*v3.Series
|
||||||
returnedErr error
|
returnedErr error
|
||||||
|
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type QuerierOptions struct {
|
type QuerierOptions struct {
|
||||||
@@ -64,22 +59,14 @@ type QuerierOptions struct {
|
|||||||
FluxInterval time.Duration
|
FluxInterval time.Duration
|
||||||
|
|
||||||
// used for testing
|
// used for testing
|
||||||
TestingMode bool
|
TestingMode bool
|
||||||
ReturnedSeries []*v3.Series
|
ReturnedSeries []*v3.Series
|
||||||
ReturnedErr error
|
ReturnedErr error
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||||
logsQueryBuilder := logsV3.PrepareLogsQuery
|
logsQueryBuilder := logsV4.PrepareLogsQuery
|
||||||
if opts.UseLogsNewSchema {
|
tracesQueryBuilder := tracesV4.PrepareTracesQuery
|
||||||
logsQueryBuilder = logsV4.PrepareLogsQuery
|
|
||||||
}
|
|
||||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
|
||||||
if opts.UseTraceNewSchema {
|
|
||||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
|
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
|
||||||
|
|
||||||
@@ -96,11 +83,9 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
|||||||
BuildMetricQuery: metricsV3.PrepareMetricQuery,
|
BuildMetricQuery: metricsV3.PrepareMetricQuery,
|
||||||
}),
|
}),
|
||||||
|
|
||||||
testingMode: opts.TestingMode,
|
testingMode: opts.TestingMode,
|
||||||
returnedSeries: opts.ReturnedSeries,
|
returnedSeries: opts.ReturnedSeries,
|
||||||
returnedErr: opts.ReturnedErr,
|
returnedErr: opts.ReturnedErr,
|
||||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -445,11 +430,6 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
|||||||
len(params.CompositeQuery.BuilderQueries) == 1 &&
|
len(params.CompositeQuery.BuilderQueries) == 1 &&
|
||||||
params.CompositeQuery.PanelType != v3.PanelTypeTrace {
|
params.CompositeQuery.PanelType != v3.PanelTypeTrace {
|
||||||
for _, v := range params.CompositeQuery.BuilderQueries {
|
for _, v := range params.CompositeQuery.BuilderQueries {
|
||||||
if (v.DataSource == v3.DataSourceLogs && !q.UseLogsNewSchema) ||
|
|
||||||
(v.DataSource == v3.DataSourceTraces && !q.UseTraceNewSchema) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// only allow of logs queries with timestamp ordering desc
|
// only allow of logs queries with timestamp ordering desc
|
||||||
// TODO(nitya): allow for timestamp asc
|
// TODO(nitya): allow for timestamp asc
|
||||||
if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) &&
|
if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) &&
|
||||||
|
|||||||
@@ -1370,8 +1370,6 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
|||||||
telemetryStore,
|
telemetryStore,
|
||||||
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
||||||
"",
|
"",
|
||||||
true,
|
|
||||||
true,
|
|
||||||
time.Duration(time.Second),
|
time.Duration(time.Second),
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,11 +6,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
logsV3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
|
|
||||||
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
|
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
|
||||||
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
|
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
|
||||||
metricsV4 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4"
|
metricsV4 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4"
|
||||||
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
|
|
||||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||||
@@ -19,17 +17,14 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func prepareLogsQuery(_ context.Context,
|
func prepareLogsQuery(
|
||||||
useLogsNewSchema bool,
|
_ context.Context,
|
||||||
start,
|
start,
|
||||||
end int64,
|
end int64,
|
||||||
builderQuery *v3.BuilderQuery,
|
builderQuery *v3.BuilderQuery,
|
||||||
params *v3.QueryRangeParamsV3,
|
params *v3.QueryRangeParamsV3,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
logsQueryBuilder := logsV3.PrepareLogsQuery
|
logsQueryBuilder := logsV4.PrepareLogsQuery
|
||||||
if useLogsNewSchema {
|
|
||||||
logsQueryBuilder = logsV4.PrepareLogsQuery
|
|
||||||
}
|
|
||||||
query := ""
|
query := ""
|
||||||
|
|
||||||
if params == nil || builderQuery == nil {
|
if params == nil || builderQuery == nil {
|
||||||
@@ -102,7 +97,7 @@ func (q *querier) runBuilderQuery(
|
|||||||
var err error
|
var err error
|
||||||
if _, ok := cacheKeys[queryName]; !ok || params.NoCache {
|
if _, ok := cacheKeys[queryName]; !ok || params.NoCache {
|
||||||
zap.L().Info("skipping cache for logs query", zap.String("queryName", queryName), zap.Int64("start", params.Start), zap.Int64("end", params.End), zap.Int64("step", params.Step), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName]))
|
zap.L().Info("skipping cache for logs query", zap.String("queryName", queryName), zap.Int64("start", params.Start), zap.Int64("end", params.End), zap.Int64("step", params.Step), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName]))
|
||||||
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params)
|
query, err = prepareLogsQuery(ctx, start, end, builderQuery, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
||||||
return
|
return
|
||||||
@@ -116,7 +111,7 @@ func (q *querier) runBuilderQuery(
|
|||||||
missedSeries := make([]querycache.CachedSeriesData, 0)
|
missedSeries := make([]querycache.CachedSeriesData, 0)
|
||||||
filteredMissedSeries := make([]querycache.CachedSeriesData, 0)
|
filteredMissedSeries := make([]querycache.CachedSeriesData, 0)
|
||||||
for _, miss := range misses {
|
for _, miss := range misses {
|
||||||
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.Start, miss.End, builderQuery, params)
|
query, err = prepareLogsQuery(ctx, miss.Start, miss.End, builderQuery, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
||||||
return
|
return
|
||||||
@@ -169,11 +164,7 @@ func (q *querier) runBuilderQuery(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if builderQuery.DataSource == v3.DataSourceTraces {
|
if builderQuery.DataSource == v3.DataSourceTraces {
|
||||||
|
tracesQueryBuilder := tracesV4.PrepareTracesQuery
|
||||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
|
||||||
if q.UseTraceNewSchema {
|
|
||||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
var query string
|
var query string
|
||||||
var err error
|
var err error
|
||||||
|
|||||||
@@ -6,11 +6,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
logsV3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
|
|
||||||
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
|
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
|
||||||
metricsV4 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4"
|
metricsV4 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||||
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
|
|
||||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||||
@@ -49,11 +47,9 @@ type querier struct {
|
|||||||
testingMode bool
|
testingMode bool
|
||||||
queriesExecuted []string
|
queriesExecuted []string
|
||||||
// tuple of start and end time in milliseconds
|
// tuple of start and end time in milliseconds
|
||||||
timeRanges [][]int
|
timeRanges [][]int
|
||||||
returnedSeries []*v3.Series
|
returnedSeries []*v3.Series
|
||||||
returnedErr error
|
returnedErr error
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type QuerierOptions struct {
|
type QuerierOptions struct {
|
||||||
@@ -63,23 +59,14 @@ type QuerierOptions struct {
|
|||||||
FluxInterval time.Duration
|
FluxInterval time.Duration
|
||||||
|
|
||||||
// used for testing
|
// used for testing
|
||||||
TestingMode bool
|
TestingMode bool
|
||||||
ReturnedSeries []*v3.Series
|
ReturnedSeries []*v3.Series
|
||||||
ReturnedErr error
|
ReturnedErr error
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||||
logsQueryBuilder := logsV3.PrepareLogsQuery
|
logsQueryBuilder := logsV4.PrepareLogsQuery
|
||||||
if opts.UseLogsNewSchema {
|
tracesQueryBuilder := tracesV4.PrepareTracesQuery
|
||||||
logsQueryBuilder = logsV4.PrepareLogsQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
|
||||||
if opts.UseTraceNewSchema {
|
|
||||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
|
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
|
||||||
|
|
||||||
@@ -96,11 +83,9 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
|||||||
BuildMetricQuery: metricsV4.PrepareMetricQuery,
|
BuildMetricQuery: metricsV4.PrepareMetricQuery,
|
||||||
}),
|
}),
|
||||||
|
|
||||||
testingMode: opts.TestingMode,
|
testingMode: opts.TestingMode,
|
||||||
returnedSeries: opts.ReturnedSeries,
|
returnedSeries: opts.ReturnedSeries,
|
||||||
returnedErr: opts.ReturnedErr,
|
returnedErr: opts.ReturnedErr,
|
||||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -446,11 +431,6 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
|||||||
len(params.CompositeQuery.BuilderQueries) == 1 &&
|
len(params.CompositeQuery.BuilderQueries) == 1 &&
|
||||||
params.CompositeQuery.PanelType != v3.PanelTypeTrace {
|
params.CompositeQuery.PanelType != v3.PanelTypeTrace {
|
||||||
for _, v := range params.CompositeQuery.BuilderQueries {
|
for _, v := range params.CompositeQuery.BuilderQueries {
|
||||||
if (v.DataSource == v3.DataSourceLogs && !q.UseLogsNewSchema) ||
|
|
||||||
(v.DataSource == v3.DataSourceTraces && !q.UseTraceNewSchema) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// only allow of logs queries with timestamp ordering desc
|
// only allow of logs queries with timestamp ordering desc
|
||||||
// TODO(nitya): allow for timestamp asc
|
// TODO(nitya): allow for timestamp asc
|
||||||
if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) &&
|
if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) &&
|
||||||
|
|||||||
@@ -1424,8 +1424,6 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
|||||||
telemetryStore,
|
telemetryStore,
|
||||||
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
||||||
"",
|
"",
|
||||||
true,
|
|
||||||
true,
|
|
||||||
time.Duration(time.Second),
|
time.Duration(time.Second),
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -53,8 +53,6 @@ type ServerOptions struct {
|
|||||||
FluxInterval string
|
FluxInterval string
|
||||||
FluxIntervalForTraceDetail string
|
FluxIntervalForTraceDetail string
|
||||||
Cluster string
|
Cluster string
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
SigNoz *signoz.SigNoz
|
SigNoz *signoz.SigNoz
|
||||||
Jwt *authtypes.JWT
|
Jwt *authtypes.JWT
|
||||||
}
|
}
|
||||||
@@ -110,8 +108,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.SigNoz.TelemetryStore,
|
serverOptions.SigNoz.TelemetryStore,
|
||||||
serverOptions.SigNoz.Prometheus,
|
serverOptions.SigNoz.Prometheus,
|
||||||
serverOptions.Cluster,
|
serverOptions.Cluster,
|
||||||
serverOptions.UseLogsNewSchema,
|
|
||||||
serverOptions.UseTraceNewSchema,
|
|
||||||
fluxIntervalForTraceDetail,
|
fluxIntervalForTraceDetail,
|
||||||
serverOptions.SigNoz.Cache,
|
serverOptions.SigNoz.Cache,
|
||||||
)
|
)
|
||||||
@@ -129,8 +125,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||||
reader,
|
reader,
|
||||||
c,
|
c,
|
||||||
serverOptions.UseLogsNewSchema,
|
|
||||||
serverOptions.UseTraceNewSchema,
|
|
||||||
serverOptions.SigNoz.SQLStore,
|
serverOptions.SigNoz.SQLStore,
|
||||||
serverOptions.SigNoz.TelemetryStore,
|
serverOptions.SigNoz.TelemetryStore,
|
||||||
serverOptions.SigNoz.Prometheus,
|
serverOptions.SigNoz.Prometheus,
|
||||||
@@ -173,8 +167,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
LogsParsingPipelineController: logParsingPipelineController,
|
LogsParsingPipelineController: logParsingPipelineController,
|
||||||
Cache: c,
|
Cache: c,
|
||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
|
||||||
JWT: serverOptions.Jwt,
|
JWT: serverOptions.Jwt,
|
||||||
AlertmanagerAPI: alertmanager.NewAPI(serverOptions.SigNoz.Alertmanager),
|
AlertmanagerAPI: alertmanager.NewAPI(serverOptions.SigNoz.Alertmanager),
|
||||||
FieldsAPI: fields.NewAPI(serverOptions.SigNoz.TelemetryStore),
|
FieldsAPI: fields.NewAPI(serverOptions.SigNoz.TelemetryStore),
|
||||||
@@ -435,25 +427,21 @@ func makeRulesManager(
|
|||||||
db *sqlx.DB,
|
db *sqlx.DB,
|
||||||
ch interfaces.Reader,
|
ch interfaces.Reader,
|
||||||
cache cache.Cache,
|
cache cache.Cache,
|
||||||
useLogsNewSchema bool,
|
|
||||||
useTraceNewSchema bool,
|
|
||||||
sqlstore sqlstore.SQLStore,
|
sqlstore sqlstore.SQLStore,
|
||||||
telemetryStore telemetrystore.TelemetryStore,
|
telemetryStore telemetrystore.TelemetryStore,
|
||||||
prometheus prometheus.Prometheus,
|
prometheus prometheus.Prometheus,
|
||||||
) (*rules.Manager, error) {
|
) (*rules.Manager, error) {
|
||||||
// create manager opts
|
// create manager opts
|
||||||
managerOpts := &rules.ManagerOptions{
|
managerOpts := &rules.ManagerOptions{
|
||||||
TelemetryStore: telemetryStore,
|
TelemetryStore: telemetryStore,
|
||||||
Prometheus: prometheus,
|
Prometheus: prometheus,
|
||||||
DBConn: db,
|
DBConn: db,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: zap.L(),
|
Logger: zap.L(),
|
||||||
Reader: ch,
|
Reader: ch,
|
||||||
Cache: cache,
|
Cache: cache,
|
||||||
EvalDelay: constants.GetEvalDelay(),
|
EvalDelay: constants.GetEvalDelay(),
|
||||||
UseLogsNewSchema: useLogsNewSchema,
|
SQLStore: sqlstore,
|
||||||
UseTraceNewSchema: useTraceNewSchema,
|
|
||||||
SQLStore: sqlstore,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// create Manager
|
// create Manager
|
||||||
|
|||||||
@@ -15,12 +15,8 @@ import (
|
|||||||
type Reader interface {
|
type Reader interface {
|
||||||
GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
|
GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
|
||||||
GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
|
GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
|
||||||
GetTopLevelOperations(ctx context.Context, start, end time.Time, services []string) (*map[string][]string, *model.ApiError)
|
|
||||||
GetServices(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceItem, *model.ApiError)
|
|
||||||
GetTopOperations(ctx context.Context, query *model.GetTopOperationsParams) (*[]model.TopOperationsItem, *model.ApiError)
|
GetTopOperations(ctx context.Context, query *model.GetTopOperationsParams) (*[]model.TopOperationsItem, *model.ApiError)
|
||||||
GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error)
|
|
||||||
GetServicesList(ctx context.Context) (*[]string, error)
|
GetServicesList(ctx context.Context) (*[]string, error)
|
||||||
GetDependencyGraph(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error)
|
|
||||||
|
|
||||||
GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError)
|
GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError)
|
||||||
|
|
||||||
@@ -74,9 +70,6 @@ type Reader interface {
|
|||||||
// Logs
|
// Logs
|
||||||
GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError)
|
GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError)
|
||||||
UpdateLogField(ctx context.Context, field *model.UpdateField) *model.ApiError
|
UpdateLogField(ctx context.Context, field *model.UpdateField) *model.ApiError
|
||||||
GetLogs(ctx context.Context, params *model.LogsFilterParams) (*[]model.SignozLog, *model.ApiError)
|
|
||||||
TailLogs(ctx context.Context, client *model.LogsTailClient)
|
|
||||||
AggregateLogs(ctx context.Context, params *model.LogsAggregateParams) (*model.GetLogsAggregatesResponse, *model.ApiError)
|
|
||||||
GetLogAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
|
GetLogAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
|
||||||
GetLogAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
|
GetLogAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
|
||||||
GetLogAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
|
GetLogAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
|
||||||
@@ -100,8 +93,6 @@ type Reader interface {
|
|||||||
ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.RuleStateHistoryContributor, error)
|
ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.RuleStateHistoryContributor, error)
|
||||||
GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]model.RuleStateHistory, error)
|
GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]model.RuleStateHistory, error)
|
||||||
|
|
||||||
GetMinAndMaxTimestampForTraceID(ctx context.Context, traceID []string) (int64, int64, error)
|
|
||||||
|
|
||||||
// Query Progress tracking helpers.
|
// Query Progress tracking helpers.
|
||||||
ReportQueryStartForProgressTracking(queryId string) (reportQueryFinished func(), err *model.ApiError)
|
ReportQueryStartForProgressTracking(queryId string) (reportQueryFinished func(), err *model.ApiError)
|
||||||
SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError)
|
SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError)
|
||||||
|
|||||||
@@ -45,7 +45,9 @@ func main() {
|
|||||||
var maxOpenConns int
|
var maxOpenConns int
|
||||||
var dialTimeout time.Duration
|
var dialTimeout time.Duration
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||||
|
// Deprecated
|
||||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||||
// Deprecated
|
// Deprecated
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
@@ -126,8 +128,6 @@ func main() {
|
|||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||||
Cluster: cluster,
|
Cluster: cluster,
|
||||||
UseLogsNewSchema: useLogsNewSchema,
|
|
||||||
UseTraceNewSchema: useTraceNewSchema,
|
|
||||||
SigNoz: signoz,
|
SigNoz: signoz,
|
||||||
Jwt: jwt,
|
Jwt: jwt,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,16 +70,6 @@ type RegisterEventParams struct {
|
|||||||
RateLimited bool `json:"rateLimited"`
|
RateLimited bool `json:"rateLimited"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetUsageParams struct {
|
|
||||||
StartTime string
|
|
||||||
EndTime string
|
|
||||||
ServiceName string
|
|
||||||
Period string
|
|
||||||
StepHour int
|
|
||||||
Start *time.Time
|
|
||||||
End *time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetServicesParams struct {
|
type GetServicesParams struct {
|
||||||
StartTime string `json:"start"`
|
StartTime string `json:"start"`
|
||||||
EndTime string `json:"end"`
|
EndTime string `json:"end"`
|
||||||
|
|||||||
@@ -34,33 +34,29 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type PrepareTaskOptions struct {
|
type PrepareTaskOptions struct {
|
||||||
Rule *ruletypes.PostableRule
|
Rule *ruletypes.PostableRule
|
||||||
TaskName string
|
TaskName string
|
||||||
RuleStore ruletypes.RuleStore
|
RuleStore ruletypes.RuleStore
|
||||||
MaintenanceStore ruletypes.MaintenanceStore
|
MaintenanceStore ruletypes.MaintenanceStore
|
||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
Reader interfaces.Reader
|
Reader interfaces.Reader
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
ManagerOpts *ManagerOptions
|
ManagerOpts *ManagerOptions
|
||||||
NotifyFunc NotifyFunc
|
NotifyFunc NotifyFunc
|
||||||
SQLStore sqlstore.SQLStore
|
SQLStore sqlstore.SQLStore
|
||||||
UseLogsNewSchema bool
|
OrgID string
|
||||||
UseTraceNewSchema bool
|
|
||||||
OrgID string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type PrepareTestRuleOptions struct {
|
type PrepareTestRuleOptions struct {
|
||||||
Rule *ruletypes.PostableRule
|
Rule *ruletypes.PostableRule
|
||||||
RuleStore ruletypes.RuleStore
|
RuleStore ruletypes.RuleStore
|
||||||
MaintenanceStore ruletypes.MaintenanceStore
|
MaintenanceStore ruletypes.MaintenanceStore
|
||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
Reader interfaces.Reader
|
Reader interfaces.Reader
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
ManagerOpts *ManagerOptions
|
ManagerOpts *ManagerOptions
|
||||||
NotifyFunc NotifyFunc
|
NotifyFunc NotifyFunc
|
||||||
SQLStore sqlstore.SQLStore
|
SQLStore sqlstore.SQLStore
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const taskNamesuffix = "webAppEditor"
|
const taskNamesuffix = "webAppEditor"
|
||||||
@@ -95,10 +91,7 @@ type ManagerOptions struct {
|
|||||||
|
|
||||||
EvalDelay time.Duration
|
EvalDelay time.Duration
|
||||||
|
|
||||||
PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
|
PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
|
||||||
|
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
PrepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
PrepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
||||||
Alertmanager alertmanager.Alertmanager
|
Alertmanager alertmanager.Alertmanager
|
||||||
SQLStore sqlstore.SQLStore
|
SQLStore sqlstore.SQLStore
|
||||||
@@ -121,9 +114,6 @@ type Manager struct {
|
|||||||
prepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
|
prepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
|
||||||
prepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
prepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
||||||
|
|
||||||
UseLogsNewSchema bool
|
|
||||||
UseTraceNewSchema bool
|
|
||||||
|
|
||||||
alertmanager alertmanager.Alertmanager
|
alertmanager alertmanager.Alertmanager
|
||||||
sqlstore sqlstore.SQLStore
|
sqlstore sqlstore.SQLStore
|
||||||
}
|
}
|
||||||
@@ -156,8 +146,6 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
|
|||||||
ruleId,
|
ruleId,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.UseLogsNewSchema,
|
|
||||||
opts.UseTraceNewSchema,
|
|
||||||
WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
WithSQLStore(opts.SQLStore),
|
WithSQLStore(opts.SQLStore),
|
||||||
)
|
)
|
||||||
@@ -407,19 +395,17 @@ func (m *Manager) editTask(_ context.Context, orgID string, rule *ruletypes.Post
|
|||||||
zap.L().Debug("editing a rule task", zap.String("name", taskName))
|
zap.L().Debug("editing a rule task", zap.String("name", taskName))
|
||||||
|
|
||||||
newTask, err := m.prepareTaskFunc(PrepareTaskOptions{
|
newTask, err := m.prepareTaskFunc(PrepareTaskOptions{
|
||||||
Rule: rule,
|
Rule: rule,
|
||||||
TaskName: taskName,
|
TaskName: taskName,
|
||||||
RuleStore: m.ruleStore,
|
RuleStore: m.ruleStore,
|
||||||
MaintenanceStore: m.maintenanceStore,
|
MaintenanceStore: m.maintenanceStore,
|
||||||
Logger: m.logger,
|
Logger: m.logger,
|
||||||
Reader: m.reader,
|
Reader: m.reader,
|
||||||
Cache: m.cache,
|
Cache: m.cache,
|
||||||
ManagerOpts: m.opts,
|
ManagerOpts: m.opts,
|
||||||
NotifyFunc: m.prepareNotifyFunc(),
|
NotifyFunc: m.prepareNotifyFunc(),
|
||||||
SQLStore: m.sqlstore,
|
SQLStore: m.sqlstore,
|
||||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
OrgID: orgID,
|
||||||
UseTraceNewSchema: m.opts.UseTraceNewSchema,
|
|
||||||
OrgID: orgID,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -595,19 +581,17 @@ func (m *Manager) addTask(_ context.Context, orgID string, rule *ruletypes.Posta
|
|||||||
|
|
||||||
zap.L().Debug("adding a new rule task", zap.String("name", taskName))
|
zap.L().Debug("adding a new rule task", zap.String("name", taskName))
|
||||||
newTask, err := m.prepareTaskFunc(PrepareTaskOptions{
|
newTask, err := m.prepareTaskFunc(PrepareTaskOptions{
|
||||||
Rule: rule,
|
Rule: rule,
|
||||||
TaskName: taskName,
|
TaskName: taskName,
|
||||||
RuleStore: m.ruleStore,
|
RuleStore: m.ruleStore,
|
||||||
MaintenanceStore: m.maintenanceStore,
|
MaintenanceStore: m.maintenanceStore,
|
||||||
Logger: m.logger,
|
Logger: m.logger,
|
||||||
Reader: m.reader,
|
Reader: m.reader,
|
||||||
Cache: m.cache,
|
Cache: m.cache,
|
||||||
ManagerOpts: m.opts,
|
ManagerOpts: m.opts,
|
||||||
NotifyFunc: m.prepareNotifyFunc(),
|
NotifyFunc: m.prepareNotifyFunc(),
|
||||||
SQLStore: m.sqlstore,
|
SQLStore: m.sqlstore,
|
||||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
OrgID: orgID,
|
||||||
UseTraceNewSchema: m.opts.UseTraceNewSchema,
|
|
||||||
OrgID: orgID,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -987,17 +971,15 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
|
|||||||
}
|
}
|
||||||
|
|
||||||
alertCount, apiErr := m.prepareTestRuleFunc(PrepareTestRuleOptions{
|
alertCount, apiErr := m.prepareTestRuleFunc(PrepareTestRuleOptions{
|
||||||
Rule: parsedRule,
|
Rule: parsedRule,
|
||||||
RuleStore: m.ruleStore,
|
RuleStore: m.ruleStore,
|
||||||
MaintenanceStore: m.maintenanceStore,
|
MaintenanceStore: m.maintenanceStore,
|
||||||
Logger: m.logger,
|
Logger: m.logger,
|
||||||
Reader: m.reader,
|
Reader: m.reader,
|
||||||
Cache: m.cache,
|
Cache: m.cache,
|
||||||
ManagerOpts: m.opts,
|
ManagerOpts: m.opts,
|
||||||
NotifyFunc: m.prepareTestNotifyFunc(),
|
NotifyFunc: m.prepareTestNotifyFunc(),
|
||||||
SQLStore: m.sqlstore,
|
SQLStore: m.sqlstore,
|
||||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
|
||||||
UseTraceNewSchema: m.opts.UseTraceNewSchema,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return alertCount, apiErr
|
return alertCount, apiErr
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
// TestNotification prepares a dummy rule for given rule parameters and
|
// TestNotification prepares a dummy rule for given rule parameters and
|
||||||
// sends a test notification. returns alert count and error (if any)
|
// sends a test notification. returns alert count and error (if any)
|
||||||
func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError) {
|
func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError) {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if opts.Rule == nil {
|
if opts.Rule == nil {
|
||||||
@@ -48,8 +47,6 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
|||||||
alertname,
|
alertname,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.UseLogsNewSchema,
|
|
||||||
opts.UseTraceNewSchema,
|
|
||||||
WithSendAlways(),
|
WithSendAlways(),
|
||||||
WithSendUnmatched(),
|
WithSendUnmatched(),
|
||||||
WithSQLStore(opts.SQLStore),
|
WithSQLStore(opts.SQLStore),
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
|
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
|
||||||
|
|
||||||
logsv3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
|
logsv3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
|
||||||
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
|
|
||||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||||
|
|
||||||
@@ -52,16 +51,12 @@ type ThresholdRule struct {
|
|||||||
// used for attribute metadata enrichment for logs and traces
|
// used for attribute metadata enrichment for logs and traces
|
||||||
logsKeys map[string]v3.AttributeKey
|
logsKeys map[string]v3.AttributeKey
|
||||||
spansKeys map[string]v3.AttributeKey
|
spansKeys map[string]v3.AttributeKey
|
||||||
|
|
||||||
useTraceNewSchema bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewThresholdRule(
|
func NewThresholdRule(
|
||||||
id string,
|
id string,
|
||||||
p *ruletypes.PostableRule,
|
p *ruletypes.PostableRule,
|
||||||
reader interfaces.Reader,
|
reader interfaces.Reader,
|
||||||
useLogsNewSchema bool,
|
|
||||||
useTraceNewSchema bool,
|
|
||||||
opts ...RuleOption,
|
opts ...RuleOption,
|
||||||
) (*ThresholdRule, error) {
|
) (*ThresholdRule, error) {
|
||||||
|
|
||||||
@@ -73,25 +68,20 @@ func NewThresholdRule(
|
|||||||
}
|
}
|
||||||
|
|
||||||
t := ThresholdRule{
|
t := ThresholdRule{
|
||||||
BaseRule: baseRule,
|
BaseRule: baseRule,
|
||||||
version: p.Version,
|
version: p.Version,
|
||||||
useTraceNewSchema: useTraceNewSchema,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
querierOption := querier.QuerierOptions{
|
querierOption := querier.QuerierOptions{
|
||||||
Reader: reader,
|
Reader: reader,
|
||||||
Cache: nil,
|
Cache: nil,
|
||||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||||
UseLogsNewSchema: useLogsNewSchema,
|
|
||||||
UseTraceNewSchema: useTraceNewSchema,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
querierOptsV2 := querierV2.QuerierOptions{
|
querierOptsV2 := querierV2.QuerierOptions{
|
||||||
Reader: reader,
|
Reader: reader,
|
||||||
Cache: nil,
|
Cache: nil,
|
||||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||||
UseLogsNewSchema: useLogsNewSchema,
|
|
||||||
UseTraceNewSchema: useTraceNewSchema,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t.querier = querier.NewQuerier(querierOption)
|
t.querier = querier.NewQuerier(querierOption)
|
||||||
@@ -301,11 +291,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (rul
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.spansKeys = spanKeys
|
r.spansKeys = spanKeys
|
||||||
if r.useTraceNewSchema {
|
tracesV4.Enrich(params, spanKeys)
|
||||||
tracesV4.Enrich(params, spanKeys)
|
|
||||||
} else {
|
|
||||||
tracesV3.Enrich(params, spanKeys)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -801,7 +801,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
|
|||||||
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
||||||
postableRule.RuleCondition.Target = &c.target
|
postableRule.RuleCondition.Target = &c.target
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
|
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -889,7 +889,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
|
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -930,7 +930,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
|
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -1005,7 +1005,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
|
|||||||
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
||||||
postableRule.RuleCondition.Target = &c.target
|
postableRule.RuleCondition.Target = &c.target
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
|
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -1057,7 +1057,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for idx, c := range cases {
|
for idx, c := range cases {
|
||||||
rule, err := NewThresholdRule("69", &postableRule, nil, true, true) // no eval delay
|
rule, err := NewThresholdRule("69", &postableRule, nil) // no eval delay
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -1105,7 +1105,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for idx, c := range cases {
|
for idx, c := range cases {
|
||||||
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
|
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -1244,8 +1244,8 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
|
|||||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||||
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
|
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", true, true, time.Duration(time.Second), readerCache)
|
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
|
||||||
rule, err := NewThresholdRule("69", &postableRule, reader, true, true)
|
rule, err := NewThresholdRule("69", &postableRule, reader)
|
||||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||||
"signoz_calls_total": {
|
"signoz_calls_total": {
|
||||||
v3.Delta: true,
|
v3.Delta: true,
|
||||||
@@ -1340,9 +1340,9 @@ func TestThresholdRuleNoData(t *testing.T) {
|
|||||||
}
|
}
|
||||||
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
|
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
|
||||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", true, true, time.Duration(time.Second), readerCache)
|
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", &postableRule, reader, true, true)
|
rule, err := NewThresholdRule("69", &postableRule, reader)
|
||||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||||
"signoz_calls_total": {
|
"signoz_calls_total": {
|
||||||
v3.Delta: true,
|
v3.Delta: true,
|
||||||
@@ -1444,9 +1444,9 @@ func TestThresholdRuleTracesLink(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", true, true, time.Duration(time.Second), nil)
|
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", &postableRule, reader, true, true)
|
rule, err := NewThresholdRule("69", &postableRule, reader)
|
||||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||||
"signoz_calls_total": {
|
"signoz_calls_total": {
|
||||||
v3.Delta: true,
|
v3.Delta: true,
|
||||||
@@ -1565,9 +1565,9 @@ func TestThresholdRuleLogsLink(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", true, true, time.Duration(time.Second), nil)
|
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", &postableRule, reader, true, true)
|
rule, err := NewThresholdRule("69", &postableRule, reader)
|
||||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||||
"signoz_calls_total": {
|
"signoz_calls_total": {
|
||||||
v3.Delta: true,
|
v3.Delta: true,
|
||||||
@@ -1643,7 +1643,7 @@ func TestThresholdRuleShiftBy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", &postableRule, nil, true, true)
|
rule, err := NewThresholdRule("69", &postableRule, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,8 +46,6 @@ func NewMockClickhouseReader(t *testing.T, testDB sqlstore.SQLStore) (*clickhous
|
|||||||
telemetryStore,
|
telemetryStore,
|
||||||
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
||||||
"",
|
"",
|
||||||
true,
|
|
||||||
true,
|
|
||||||
time.Duration(time.Second),
|
time.Duration(time.Second),
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user