Compare commits
39 Commits
main
...
v0.57.0-tr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d817df23f8 | ||
|
|
50bc68a4dc | ||
|
|
b4a46800d5 | ||
|
|
a7fa0bb4e4 | ||
|
|
252e0b698e | ||
|
|
f64285b89d | ||
|
|
93849ea850 | ||
|
|
471bd684c8 | ||
|
|
16d538e1ba | ||
|
|
549485bbe9 | ||
|
|
b843661097 | ||
|
|
80eda3c805 | ||
|
|
bb6f027b21 | ||
|
|
0418bfff0e | ||
|
|
aee3ca4fb1 | ||
|
|
09ff359610 | ||
|
|
c5c648748e | ||
|
|
f410355088 | ||
|
|
4bd531ce08 | ||
|
|
895856fa04 | ||
|
|
753eb0847e | ||
|
|
25020edfb6 | ||
|
|
6335d5eb22 | ||
|
|
e5d425f06e | ||
|
|
aeeb77bbc1 | ||
|
|
fa6fda0497 | ||
|
|
bb41435a20 | ||
|
|
dd23e4ebf7 | ||
|
|
16a7717598 | ||
|
|
4749ec18bc | ||
|
|
1487820750 | ||
|
|
fd09f57f76 | ||
|
|
9bc7c8708a | ||
|
|
2115093876 | ||
|
|
33f4d8306d | ||
|
|
dbf5f8b77a | ||
|
|
bfc46790bb | ||
|
|
7a011f3460 | ||
|
|
2c30e1493f |
@@ -38,9 +38,10 @@ type APIHandlerOptions struct {
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
// Querier Influx Interval
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
UseLicensesV3 bool
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
UseLicensesV3 bool
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -66,6 +67,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
UseLicensesV3: opts.UseLicensesV3,
|
||||
})
|
||||
|
||||
|
||||
@@ -2,32 +2,31 @@ package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||
ah.APIHandler.SearchTraces(w, r)
|
||||
return
|
||||
}
|
||||
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
ah.APIHandler.SearchTraces(w, r)
|
||||
return
|
||||
|
||||
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
|
||||
if ah.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
// This is commented since this will be taken care by new trace API
|
||||
|
||||
ah.WriteJSON(w, r, result)
|
||||
// if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||
// zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||
// ah.APIHandler.SearchTraces(w, r)
|
||||
// return
|
||||
// }
|
||||
// searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
|
||||
// if err != nil {
|
||||
// RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
// return
|
||||
// }
|
||||
|
||||
// result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
|
||||
// if ah.HandleError(w, err, http.StatusBadRequest) {
|
||||
// return
|
||||
// }
|
||||
|
||||
// ah.WriteJSON(w, r, result)
|
||||
|
||||
}
|
||||
|
||||
@@ -26,8 +26,9 @@ func NewDataConnector(
|
||||
dialTimeout time.Duration,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema, useTraceNewSchema)
|
||||
return &ClickhouseReader{
|
||||
conn: ch.GetConn(),
|
||||
appdb: localDB,
|
||||
|
||||
@@ -77,6 +77,7 @@ type ServerOptions struct {
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
UseLicensesV3 bool
|
||||
}
|
||||
|
||||
@@ -156,6 +157,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.DialTimeout,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
@@ -189,6 +191,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.DisableRules,
|
||||
lm,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -270,6 +273,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
FluxInterval: fluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
||||
UseLicensesV3: serverOptions.UseLicensesV3,
|
||||
}
|
||||
|
||||
@@ -737,7 +741,8 @@ func makeRulesManager(
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm baseint.FeatureLookup,
|
||||
useLogsNewSchema bool) (*baserules.Manager, error) {
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool) (*baserules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
@@ -767,8 +772,9 @@ func makeRulesManager(
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -94,6 +94,7 @@ func main() {
|
||||
var cluster string
|
||||
|
||||
var useLogsNewSchema bool
|
||||
var useTraceNewSchema bool
|
||||
var useLicensesV3 bool
|
||||
var cacheConfigPath, fluxInterval string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
@@ -105,6 +106,7 @@ func main() {
|
||||
var gatewayUrl string
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
@@ -145,6 +147,7 @@ func main() {
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
UseLicensesV3: useLicensesV3,
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
opts.UseTraceNewSchema,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
)
|
||||
|
||||
@@ -122,6 +123,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
opts.UseTraceNewSchema,
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
)
|
||||
|
||||
@@ -22,6 +22,7 @@ const (
|
||||
defaultTraceDB string = "signoz_traces"
|
||||
defaultOperationsTable string = "distributed_signoz_operations"
|
||||
defaultIndexTable string = "distributed_signoz_index_v2"
|
||||
defaultLocalIndexTable string = "signoz_index_v2"
|
||||
defaultErrorTable string = "distributed_signoz_error_index_v2"
|
||||
defaultDurationTable string = "distributed_durationSort"
|
||||
defaultUsageExplorerTable string = "distributed_usage_explorer"
|
||||
@@ -45,6 +46,11 @@ const (
|
||||
defaultLogsTableV2 string = "distributed_logs_v2"
|
||||
defaultLogsResourceLocalTableV2 string = "logs_v2_resource"
|
||||
defaultLogsResourceTableV2 string = "distributed_logs_v2_resource"
|
||||
|
||||
defaultTraceIndexTableV3 string = "distributed_signoz_index_v3"
|
||||
defaultTraceLocalTableName string = "signoz_index_v3"
|
||||
defaultTraceResourceTableV3 string = "distributed_traces_v3_resource"
|
||||
defaultTraceSummaryTable string = "distributed_trace_summary"
|
||||
)
|
||||
|
||||
// NamespaceConfig is Clickhouse's internal configuration data
|
||||
@@ -58,6 +64,7 @@ type namespaceConfig struct {
|
||||
TraceDB string
|
||||
OperationsTable string
|
||||
IndexTable string
|
||||
LocalIndexTable string
|
||||
DurationTable string
|
||||
UsageExplorerTable string
|
||||
SpansTable string
|
||||
@@ -82,6 +89,11 @@ type namespaceConfig struct {
|
||||
LogsTableV2 string
|
||||
LogsResourceLocalTableV2 string
|
||||
LogsResourceTableV2 string
|
||||
|
||||
TraceIndexTableV3 string
|
||||
TraceLocalTableNameV3 string
|
||||
TraceResourceTableV3 string
|
||||
TraceSummaryTable string
|
||||
}
|
||||
|
||||
// Connecto defines how to connect to the database
|
||||
@@ -150,6 +162,7 @@ func NewOptions(
|
||||
TraceDB: defaultTraceDB,
|
||||
OperationsTable: defaultOperationsTable,
|
||||
IndexTable: defaultIndexTable,
|
||||
LocalIndexTable: defaultLocalIndexTable,
|
||||
ErrorTable: defaultErrorTable,
|
||||
DurationTable: defaultDurationTable,
|
||||
UsageExplorerTable: defaultUsageExplorerTable,
|
||||
@@ -174,6 +187,11 @@ func NewOptions(
|
||||
LogsLocalTableV2: defaultLogsLocalTableV2,
|
||||
LogsResourceTableV2: defaultLogsResourceTableV2,
|
||||
LogsResourceLocalTableV2: defaultLogsResourceLocalTableV2,
|
||||
|
||||
TraceIndexTableV3: defaultTraceIndexTableV3,
|
||||
TraceLocalTableNameV3: defaultTraceLocalTableName,
|
||||
TraceResourceTableV3: defaultTraceResourceTableV3,
|
||||
TraceSummaryTable: defaultTraceSummaryTable,
|
||||
},
|
||||
others: make(map[string]*namespaceConfig, len(otherNamespaces)),
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -39,6 +39,7 @@ import (
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
|
||||
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
@@ -110,8 +111,9 @@ type APIHandler struct {
|
||||
// Websocket connection upgrader
|
||||
Upgrader *websocket.Upgrader
|
||||
|
||||
UseLogsNewSchema bool
|
||||
UseLicensesV3 bool
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
UseLicensesV3 bool
|
||||
|
||||
hostsRepo *inframetrics.HostsRepo
|
||||
processesRepo *inframetrics.ProcessesRepo
|
||||
@@ -163,6 +165,7 @@ type APIHandlerOpts struct {
|
||||
// Use Logs New schema
|
||||
UseLogsNewSchema bool
|
||||
|
||||
UseTraceNewSchema bool
|
||||
// Use Licenses V3 structure
|
||||
UseLicensesV3 bool
|
||||
}
|
||||
@@ -176,21 +179,23 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
||||
}
|
||||
|
||||
querierOpts := querier.QuerierOptions{
|
||||
Reader: opts.Reader,
|
||||
Cache: opts.Cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: opts.FluxInterval,
|
||||
FeatureLookup: opts.FeatureFlags,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
Reader: opts.Reader,
|
||||
Cache: opts.Cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: opts.FluxInterval,
|
||||
FeatureLookup: opts.FeatureFlags,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
}
|
||||
|
||||
querierOptsV2 := querierV2.QuerierOptions{
|
||||
Reader: opts.Reader,
|
||||
Cache: opts.Cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: opts.FluxInterval,
|
||||
FeatureLookup: opts.FeatureFlags,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
Reader: opts.Reader,
|
||||
Cache: opts.Cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: opts.FluxInterval,
|
||||
FeatureLookup: opts.FeatureFlags,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
}
|
||||
|
||||
querier := querier.NewQuerier(querierOpts)
|
||||
@@ -224,6 +229,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
||||
querier: querier,
|
||||
querierV2: querierv2,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
UseLicensesV3: opts.UseLicensesV3,
|
||||
hostsRepo: hostsRepo,
|
||||
processesRepo: processesRepo,
|
||||
@@ -242,9 +248,14 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
||||
logsQueryBuilder = logsv4.PrepareLogsQuery
|
||||
}
|
||||
|
||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
||||
if opts.UseTraceNewSchema {
|
||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
||||
}
|
||||
|
||||
builderOpts := queryBuilder.QueryBuilderOptions{
|
||||
BuildMetricQuery: metricsv3.PrepareMetricQuery,
|
||||
BuildTraceQuery: tracesV3.PrepareTracesQuery,
|
||||
BuildTraceQuery: tracesQueryBuilder,
|
||||
BuildLogQuery: logsQueryBuilder,
|
||||
}
|
||||
aH.queryBuilder = queryBuilder.NewQueryBuilder(builderOpts, aH.featureFlags)
|
||||
@@ -526,12 +537,6 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/configs", am.OpenAccess(aH.getConfigs)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/health", am.OpenAccess(aH.getHealth)).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/getSpanFilters", am.ViewAccess(aH.getSpanFilters)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/getTagFilters", am.ViewAccess(aH.getTagFilters)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/getFilteredSpans", am.ViewAccess(aH.getFilteredSpans)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/getFilteredSpans/aggregates", am.ViewAccess(aH.getFilteredSpanAggregates)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/getTagValues", am.ViewAccess(aH.getTagValues)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/listErrors", am.ViewAccess(aH.listErrors)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/countErrors", am.ViewAccess(aH.countErrors)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/errorFromErrorID", am.ViewAccess(aH.getErrorFromErrorID)).Methods(http.MethodGet)
|
||||
@@ -1847,86 +1852,6 @@ func (aH *APIHandler) getErrorFromGroupID(w http.ResponseWriter, r *http.Request
|
||||
aH.WriteJSON(w, r, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getSpanFilters(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
query, err := parseSpanFilterRequestBody(r)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.reader.GetSpanFilters(r.Context(), query)
|
||||
|
||||
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
|
||||
return
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getFilteredSpans(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
query, err := parseFilteredSpansRequest(r, aH)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.reader.GetFilteredSpans(r.Context(), query)
|
||||
|
||||
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
|
||||
return
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getFilteredSpanAggregates(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
query, err := parseFilteredSpanAggregatesRequest(r)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.reader.GetFilteredSpansAggregates(r.Context(), query)
|
||||
|
||||
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
|
||||
return
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getTagFilters(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
query, err := parseTagFilterRequest(r)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.reader.GetTagFilters(r.Context(), query)
|
||||
|
||||
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
|
||||
return
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getTagValues(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
query, err := parseTagValueRequest(r)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.reader.GetTagValues(r.Context(), query)
|
||||
|
||||
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
|
||||
return
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) setTTL(w http.ResponseWriter, r *http.Request) {
|
||||
ttlParams, err := parseTTLParams(r)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
@@ -4433,7 +4358,12 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
|
||||
RespondError(w, apiErrObj, errQuriesByName)
|
||||
return
|
||||
}
|
||||
tracesV3.Enrich(queryRangeParams, spanKeys)
|
||||
if aH.UseTraceNewSchema {
|
||||
tracesV4.Enrich(queryRangeParams, spanKeys)
|
||||
} else {
|
||||
tracesV3.Enrich(queryRangeParams, spanKeys)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// WARN: Only works for AND operator in traces query
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
|
||||
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
|
||||
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
|
||||
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
@@ -158,11 +159,16 @@ func (q *querier) runBuilderQuery(
|
||||
|
||||
if builderQuery.DataSource == v3.DataSourceTraces {
|
||||
|
||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
||||
if q.UseTraceNewSchema {
|
||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
||||
}
|
||||
|
||||
var query string
|
||||
var err error
|
||||
// for ts query with group by and limit form two queries
|
||||
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
|
||||
limitQuery, err := tracesV3.PrepareTracesQuery(
|
||||
limitQuery, err := tracesQueryBuilder(
|
||||
start,
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
@@ -173,7 +179,7 @@ func (q *querier) runBuilderQuery(
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
return
|
||||
}
|
||||
placeholderQuery, err := tracesV3.PrepareTracesQuery(
|
||||
placeholderQuery, err := tracesQueryBuilder(
|
||||
start,
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
@@ -186,7 +192,7 @@ func (q *querier) runBuilderQuery(
|
||||
}
|
||||
query = fmt.Sprintf(placeholderQuery, limitQuery)
|
||||
} else {
|
||||
query, err = tracesV3.PrepareTracesQuery(
|
||||
query, err = tracesQueryBuilder(
|
||||
start,
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
|
||||
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
|
||||
@@ -65,10 +66,11 @@ type QuerierOptions struct {
|
||||
FeatureLookup interfaces.FeatureLookup
|
||||
|
||||
// used for testing
|
||||
TestingMode bool
|
||||
ReturnedSeries []*v3.Series
|
||||
ReturnedErr error
|
||||
UseLogsNewSchema bool
|
||||
TestingMode bool
|
||||
ReturnedSeries []*v3.Series
|
||||
ReturnedErr error
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||
@@ -76,6 +78,10 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||
if opts.UseLogsNewSchema {
|
||||
logsQueryBuilder = logsV4.PrepareLogsQuery
|
||||
}
|
||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
||||
if opts.UseTraceNewSchema {
|
||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
||||
}
|
||||
|
||||
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
|
||||
|
||||
@@ -87,16 +93,17 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||
fluxInterval: opts.FluxInterval,
|
||||
|
||||
builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{
|
||||
BuildTraceQuery: tracesV3.PrepareTracesQuery,
|
||||
BuildTraceQuery: tracesQueryBuilder,
|
||||
BuildLogQuery: logsQueryBuilder,
|
||||
BuildMetricQuery: metricsV3.PrepareMetricQuery,
|
||||
}, opts.FeatureLookup),
|
||||
featureLookUp: opts.FeatureLookup,
|
||||
|
||||
testingMode: opts.TestingMode,
|
||||
returnedSeries: opts.ReturnedSeries,
|
||||
returnedErr: opts.ReturnedErr,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
testingMode: opts.TestingMode,
|
||||
returnedSeries: opts.ReturnedSeries,
|
||||
returnedErr: opts.ReturnedErr,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1383,6 +1383,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
featureManager.StartManager(),
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
)
|
||||
|
||||
q := &querier{
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
|
||||
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
|
||||
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
|
||||
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
@@ -158,11 +159,16 @@ func (q *querier) runBuilderQuery(
|
||||
|
||||
if builderQuery.DataSource == v3.DataSourceTraces {
|
||||
|
||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
||||
if q.UseTraceNewSchema {
|
||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
||||
}
|
||||
|
||||
var query string
|
||||
var err error
|
||||
// for ts query with group by and limit form two queries
|
||||
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
|
||||
limitQuery, err := tracesV3.PrepareTracesQuery(
|
||||
limitQuery, err := tracesQueryBuilder(
|
||||
start,
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
@@ -173,7 +179,7 @@ func (q *querier) runBuilderQuery(
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
return
|
||||
}
|
||||
placeholderQuery, err := tracesV3.PrepareTracesQuery(
|
||||
placeholderQuery, err := tracesQueryBuilder(
|
||||
start,
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
@@ -186,7 +192,7 @@ func (q *querier) runBuilderQuery(
|
||||
}
|
||||
query = fmt.Sprintf(placeholderQuery, limitQuery)
|
||||
} else {
|
||||
query, err = tracesV3.PrepareTracesQuery(
|
||||
query, err = tracesQueryBuilder(
|
||||
start,
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
|
||||
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
|
||||
@@ -64,10 +65,11 @@ type QuerierOptions struct {
|
||||
FeatureLookup interfaces.FeatureLookup
|
||||
|
||||
// used for testing
|
||||
TestingMode bool
|
||||
ReturnedSeries []*v3.Series
|
||||
ReturnedErr error
|
||||
UseLogsNewSchema bool
|
||||
TestingMode bool
|
||||
ReturnedSeries []*v3.Series
|
||||
ReturnedErr error
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||
@@ -76,6 +78,11 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||
logsQueryBuilder = logsV4.PrepareLogsQuery
|
||||
}
|
||||
|
||||
tracesQueryBuilder := tracesV3.PrepareTracesQuery
|
||||
if opts.UseTraceNewSchema {
|
||||
tracesQueryBuilder = tracesV4.PrepareTracesQuery
|
||||
}
|
||||
|
||||
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
|
||||
|
||||
return &querier{
|
||||
@@ -86,16 +93,17 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||
fluxInterval: opts.FluxInterval,
|
||||
|
||||
builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{
|
||||
BuildTraceQuery: tracesV3.PrepareTracesQuery,
|
||||
BuildTraceQuery: tracesQueryBuilder,
|
||||
BuildLogQuery: logsQueryBuilder,
|
||||
BuildMetricQuery: metricsV4.PrepareMetricQuery,
|
||||
}, opts.FeatureLookup),
|
||||
featureLookUp: opts.FeatureLookup,
|
||||
|
||||
testingMode: opts.TestingMode,
|
||||
returnedSeries: opts.ReturnedSeries,
|
||||
returnedErr: opts.ReturnedErr,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
testingMode: opts.TestingMode,
|
||||
returnedSeries: opts.ReturnedSeries,
|
||||
returnedErr: opts.ReturnedErr,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1437,6 +1437,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
featureManager.StartManager(),
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
)
|
||||
|
||||
q := &querier{
|
||||
|
||||
@@ -67,6 +67,7 @@ type ServerOptions struct {
|
||||
FluxInterval string
|
||||
Cluster string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
// Server runs HTTP, Mux and a grpc server
|
||||
@@ -130,6 +131,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.DialTimeout,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
)
|
||||
go clickhouseReader.Start(readerReady)
|
||||
reader = clickhouseReader
|
||||
@@ -157,7 +159,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
rm, err := makeRulesManager(
|
||||
serverOptions.PromConfigPath,
|
||||
constants.GetAlertManagerApiPrefix(),
|
||||
serverOptions.RuleRepoURL, localDB, reader, c, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema)
|
||||
serverOptions.RuleRepoURL, localDB, reader, c, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema, serverOptions.UseTraceNewSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -202,6 +204,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -727,7 +730,8 @@ func makeRulesManager(
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm interfaces.FeatureLookup,
|
||||
useLogsNewSchema bool) (*rules.Manager, error) {
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool) (*rules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromReader(ch)
|
||||
@@ -744,18 +748,19 @@ func makeRulesManager(
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &rules.ManagerOptions{
|
||||
NotifierOpts: notifierOpts,
|
||||
PqlEngine: pqle,
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
Logger: zap.L(),
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
Cache: cache,
|
||||
EvalDelay: constants.GetEvalDelay(),
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
NotifierOpts: notifierOpts,
|
||||
PqlEngine: pqle,
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
Logger: zap.L(),
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
Cache: cache,
|
||||
EvalDelay: constants.GetEvalDelay(),
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -238,8 +238,8 @@ const (
|
||||
SIGNOZ_EXP_HISTOGRAM_TABLENAME = "distributed_exp_hist"
|
||||
SIGNOZ_TRACE_DBNAME = "signoz_traces"
|
||||
SIGNOZ_SPAN_INDEX_TABLENAME = "distributed_signoz_index_v2"
|
||||
SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME = "signoz_index_v2"
|
||||
SIGNOZ_SPAN_INDEX_V3 = "distributed_signoz_index_v3"
|
||||
SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME = "signoz_index_v2"
|
||||
SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME = "signoz_index_v3"
|
||||
SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4"
|
||||
SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs"
|
||||
|
||||
@@ -29,15 +29,10 @@ type Reader interface {
|
||||
// GetDisks returns a list of disks configured in the underlying DB. It is supported by
|
||||
// clickhouse only.
|
||||
GetDisks(ctx context.Context) (*[]model.DiskItem, *model.ApiError)
|
||||
GetSpanFilters(ctx context.Context, query *model.SpanFilterParams) (*model.SpanFiltersResponse, *model.ApiError)
|
||||
GetTraceAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
|
||||
GetTraceAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
|
||||
GetTraceAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
|
||||
GetSpanAttributeKeys(ctx context.Context) (map[string]v3.AttributeKey, error)
|
||||
GetTagFilters(ctx context.Context, query *model.TagFilterParams) (*model.TagFilters, *model.ApiError)
|
||||
GetTagValues(ctx context.Context, query *model.TagFilterParams) (*model.TagValues, *model.ApiError)
|
||||
GetFilteredSpans(ctx context.Context, query *model.GetFilteredSpansParams) (*model.GetFilterSpansResponse, *model.ApiError)
|
||||
GetFilteredSpansAggregates(ctx context.Context, query *model.GetFilteredSpanAggregatesParams) (*model.GetFilteredSpansAggregatesResponse, *model.ApiError)
|
||||
|
||||
ListErrors(ctx context.Context, params *model.ListErrorsParams) (*[]model.Error, *model.ApiError)
|
||||
CountErrors(ctx context.Context, params *model.CountErrorsParams) (uint64, *model.ApiError)
|
||||
|
||||
@@ -39,6 +39,7 @@ func main() {
|
||||
var disableRules bool
|
||||
|
||||
var useLogsNewSchema bool
|
||||
var useTraceNewSchema bool
|
||||
// the url used to build link in the alert messages in slack and other systems
|
||||
var ruleRepoURL, cacheConfigPath, fluxInterval string
|
||||
var cluster string
|
||||
@@ -50,6 +51,7 @@ func main() {
|
||||
var dialTimeout time.Duration
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
@@ -87,6 +89,7 @@ func main() {
|
||||
FluxInterval: fluxInterval,
|
||||
Cluster: cluster,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
|
||||
@@ -269,6 +269,32 @@ type SearchSpanResponseItem struct {
|
||||
SpanKind string `json:"spanKind"`
|
||||
}
|
||||
|
||||
type SearchSpanResponseItemV2 struct {
|
||||
TimeUnixNano time.Time `json:"timestamp" ch:"timestamp"`
|
||||
DurationNano uint64 `json:"durationNano" ch:"durationNano"`
|
||||
SpanID string `json:"spanId" ch:"spanID"`
|
||||
TraceID string `json:"traceId" ch:"traceID"`
|
||||
HasError bool `json:"hasError" ch:"hasError"`
|
||||
Kind int8 `json:"kind" ch:"kind"`
|
||||
ServiceName string `json:"serviceName" ch:"serviceName"`
|
||||
Name string `json:"name" ch:"name"`
|
||||
References string `json:"references,omitempty" ch:"references"`
|
||||
Attributes_string map[string]string `json:"attributes_string" ch:"attributes_string"`
|
||||
Attributes_number map[string]float64 `json:"attributes_number" ch:"attributes_number"`
|
||||
Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"`
|
||||
Events []string `json:"event" ch:"events"`
|
||||
StatusMessage string `json:"statusMessage" ch:"statusMessage"`
|
||||
StatusCodeString string `json:"statusCodeString" ch:"statusCodeString"`
|
||||
SpanKind string `json:"spanKind" ch:"spanKind"`
|
||||
}
|
||||
|
||||
type TraceSummary struct {
|
||||
TraceID string `json:"traceId" ch:"trace_id"`
|
||||
Start time.Time `json:"start" ch:"start"`
|
||||
End time.Time `json:"end" ch:"end"`
|
||||
NumSpans uint64 `json:"numSpans" ch:"num_spans"`
|
||||
}
|
||||
|
||||
type OtelSpanRef struct {
|
||||
TraceId string `json:"traceId,omitempty"`
|
||||
SpanId string `json:"spanId,omitempty"`
|
||||
|
||||
@@ -35,7 +35,8 @@ type PrepareTaskOptions struct {
|
||||
ManagerOpts *ManagerOptions
|
||||
NotifyFunc NotifyFunc
|
||||
|
||||
UseLogsNewSchema bool
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
type PrepareTestRuleOptions struct {
|
||||
@@ -48,7 +49,8 @@ type PrepareTestRuleOptions struct {
|
||||
ManagerOpts *ManagerOptions
|
||||
NotifyFunc NotifyFunc
|
||||
|
||||
UseLogsNewSchema bool
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
const taskNamesuffix = "webAppEditor"
|
||||
@@ -91,9 +93,9 @@ type ManagerOptions struct {
|
||||
|
||||
PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
|
||||
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
PrepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
||||
|
||||
UseLogsNewSchema bool
|
||||
}
|
||||
|
||||
// The Manager manages recording and alerting rules.
|
||||
@@ -117,7 +119,8 @@ type Manager struct {
|
||||
prepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
|
||||
prepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
||||
|
||||
UseLogsNewSchema bool
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
func defaultOptions(o *ManagerOptions) *ManagerOptions {
|
||||
@@ -156,6 +159,7 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
opts.UseTraceNewSchema,
|
||||
WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
)
|
||||
|
||||
@@ -368,7 +372,8 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
|
||||
ManagerOpts: m.opts,
|
||||
NotifyFunc: m.prepareNotifyFunc(),
|
||||
|
||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: m.opts.UseTraceNewSchema,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -490,7 +495,8 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error {
|
||||
ManagerOpts: m.opts,
|
||||
NotifyFunc: m.prepareNotifyFunc(),
|
||||
|
||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: m.opts.UseTraceNewSchema,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -809,15 +815,16 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
|
||||
}
|
||||
|
||||
alertCount, apiErr := m.prepareTestRuleFunc(PrepareTestRuleOptions{
|
||||
Rule: parsedRule,
|
||||
RuleDB: m.ruleDB,
|
||||
Logger: m.logger,
|
||||
Reader: m.reader,
|
||||
Cache: m.cache,
|
||||
FF: m.featureFlags,
|
||||
ManagerOpts: m.opts,
|
||||
NotifyFunc: m.prepareNotifyFunc(),
|
||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
||||
Rule: parsedRule,
|
||||
RuleDB: m.ruleDB,
|
||||
Logger: m.logger,
|
||||
Reader: m.reader,
|
||||
Cache: m.cache,
|
||||
FF: m.featureFlags,
|
||||
ManagerOpts: m.opts,
|
||||
NotifyFunc: m.prepareNotifyFunc(),
|
||||
UseLogsNewSchema: m.opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: m.opts.UseTraceNewSchema,
|
||||
})
|
||||
|
||||
return alertCount, apiErr
|
||||
|
||||
@@ -49,6 +49,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
opts.UseTraceNewSchema,
|
||||
WithSendAlways(),
|
||||
WithSendUnmatched(),
|
||||
)
|
||||
|
||||
@@ -58,6 +58,7 @@ func NewThresholdRule(
|
||||
featureFlags interfaces.FeatureLookup,
|
||||
reader interfaces.Reader,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
opts ...RuleOption,
|
||||
) (*ThresholdRule, error) {
|
||||
|
||||
@@ -74,19 +75,21 @@ func NewThresholdRule(
|
||||
}
|
||||
|
||||
querierOption := querier.QuerierOptions{
|
||||
Reader: reader,
|
||||
Cache: nil,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FeatureLookup: featureFlags,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
Reader: reader,
|
||||
Cache: nil,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FeatureLookup: featureFlags,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
}
|
||||
|
||||
querierOptsV2 := querierV2.QuerierOptions{
|
||||
Reader: reader,
|
||||
Cache: nil,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FeatureLookup: featureFlags,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
Reader: reader,
|
||||
Cache: nil,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FeatureLookup: featureFlags,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
}
|
||||
|
||||
t.querier = querier.NewQuerier(querierOption)
|
||||
|
||||
@@ -791,7 +791,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
|
||||
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
|
||||
postableRule.RuleCondition.Target = &c.target
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -880,7 +880,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -922,7 +922,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -998,7 +998,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
|
||||
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
|
||||
postableRule.RuleCondition.Target = &c.target
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -1051,7 +1051,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
|
||||
|
||||
fm := featureManager.StartManager()
|
||||
for idx, c := range cases {
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true) // no eval delay
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true) // no eval delay
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -1100,7 +1100,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
|
||||
|
||||
fm := featureManager.StartManager()
|
||||
for idx, c := range cases {
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -1241,9 +1241,9 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
|
||||
}
|
||||
|
||||
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true)
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
|
||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||
"signoz_calls_total": {
|
||||
v3.Delta: true,
|
||||
@@ -1340,9 +1340,9 @@ func TestThresholdRuleNoData(t *testing.T) {
|
||||
}
|
||||
|
||||
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true)
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
|
||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||
"signoz_calls_total": {
|
||||
v3.Delta: true,
|
||||
@@ -1445,9 +1445,9 @@ func TestThresholdRuleTracesLink(t *testing.T) {
|
||||
}
|
||||
|
||||
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true)
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
|
||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||
"signoz_calls_total": {
|
||||
v3.Delta: true,
|
||||
@@ -1570,9 +1570,9 @@ func TestThresholdRuleLogsLink(t *testing.T) {
|
||||
}
|
||||
|
||||
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true)
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
|
||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||
"signoz_calls_total": {
|
||||
v3.Delta: true,
|
||||
@@ -1648,7 +1648,7 @@ func TestThresholdRuleShiftBy(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, nil, nil, true)
|
||||
rule, err := NewThresholdRule("69", &postableRule, nil, nil, true, true)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ func NewMockClickhouseReader(
|
||||
featureFlags,
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
)
|
||||
|
||||
return reader, mockDB
|
||||
|
||||
@@ -35,6 +35,8 @@ func GetListTsRanges(start, end int64) []LogsListTsRange {
|
||||
tStartNano = startNano
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result = append(result, LogsListTsRange{Start: startNano, End: endNano})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ func TestListTsRange(t *testing.T) {
|
||||
name: "testing for less then one hour",
|
||||
start: 1722262800000000000, // July 29, 2024 7:50:00 PM
|
||||
end: 1722263800000000000, // July 29, 2024 8:06:40 PM
|
||||
res: []LogsListTsRange{},
|
||||
res: []LogsListTsRange{
|
||||
{1722262800000000000, 1722263800000000000},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "testing for more than one hour",
|
||||
|
||||
Reference in New Issue
Block a user