fix: nil err check (#9662)

This pull request refactors error variable naming throughout the codebase for improved clarity and consistency. The main change is replacing the generic variable name err with apiErr when handling errors of type *model.ApiError. Additionally, some related function signatures and comments were updated to match this change. No business logic or behaviour is affected; this is a code quality and maintainability improvement.
This commit is contained in:
Tushar Vats
2025-12-01 09:38:17 +05:30
committed by GitHub
parent 8d61ee338b
commit af57d11b6a
8 changed files with 111 additions and 107 deletions

View File

@@ -19,7 +19,7 @@ type inMemoryQueryProgressTracker struct {
func (tracker *inMemoryQueryProgressTracker) ReportQueryStarted(
queryId string,
) (postQueryCleanup func(), err *model.ApiError) {
) (postQueryCleanup func(), apiErr *model.ApiError) {
tracker.lock.Lock()
defer tracker.lock.Unlock()

View File

@@ -9,7 +9,7 @@ type QueryProgressTracker interface {
// Tells the tracker that query with id `queryId` has started.
// Progress can only be reported for and tracked for a query that is in progress.
// Returns a cleanup function that must be called after the query finishes.
ReportQueryStarted(queryId string) (postQueryCleanup func(), err *model.ApiError)
ReportQueryStarted(queryId string) (postQueryCleanup func(), apiErr *model.ApiError)
// Report progress stats received from clickhouse for `queryId`
ReportQueryProgress(queryId string, chProgress *clickhouse.Progress) *model.ApiError
@@ -18,7 +18,7 @@ type QueryProgressTracker interface {
// The returned channel will produce `QueryProgress` instances representing
// the latest state of query progress stats. Also returns a function that
// can be called to unsubscribe before the query finishes, if needed.
SubscribeToQueryProgress(queryId string) (ch <-chan model.QueryProgress, unsubscribe func(), err *model.ApiError)
SubscribeToQueryProgress(queryId string) (ch <-chan model.QueryProgress, unsubscribe func(), apiErr *model.ApiError)
}
func NewQueryProgressTracker() QueryProgressTracker {

View File

@@ -1297,8 +1297,8 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
// check if there is existing things to be done
for _, tableName := range tableNameArray {
statusItem, err := r.checkTTLStatusItem(ctx, orgID, tableName)
if err != nil {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == constants.StatusPending {
@@ -1378,8 +1378,8 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
zap.L().Error("error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, orgID, tableName)
if err == nil {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
sqlDB.
BunDB().
@@ -1455,8 +1455,8 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
// check if there is existing things to be done
for _, tableName := range tableNames {
statusItem, err := r.checkTTLStatusItem(ctx, orgID, tableName)
if err != nil {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == constants.StatusPending {
@@ -1523,8 +1523,8 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
zap.L().Error("Error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, orgID, tableName)
if err == nil {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
sqlDB.
BunDB().
@@ -1669,8 +1669,8 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
}
for _, tableName := range tableNames {
statusItem, err := r.checkCustomRetentionTTLStatusItem(ctx, orgID, tableName)
if err != nil {
statusItem, apiErr := r.checkCustomRetentionTTLStatusItem(ctx, orgID, tableName)
if apiErr != nil {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing custom_retention_ttl_status check sql query")
}
if statusItem.Status == constants.StatusPending {
@@ -1974,8 +1974,8 @@ func (r *ClickHouseReader) checkCustomRetentionTTLStatusItem(ctx context.Context
}
func (r *ClickHouseReader) updateCustomRetentionTTLStatus(ctx context.Context, orgID, tableName, status string) {
statusItem, err := r.checkCustomRetentionTTLStatusItem(ctx, orgID, tableName)
if err == nil && statusItem != nil {
statusItem, apiErr := r.checkCustomRetentionTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil && statusItem != nil {
_, dbErr := r.sqlDB.BunDB().NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
@@ -2126,8 +2126,8 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
signozMetricDBName + "." + signozTSLocalTableNameV41Week,
}
for _, tableName := range tableNames {
statusItem, err := r.checkTTLStatusItem(ctx, orgID, tableName)
if err != nil {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == constants.StatusPending {
@@ -2176,8 +2176,8 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
zap.L().Error("Error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, orgID, tableName)
if err == nil {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
sqlDB.
BunDB().
@@ -2285,17 +2285,17 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, orgID string,
return ttl, nil
}
// setTTLQueryStatus fetches ttl_status table status from DB
func (r *ClickHouseReader) setTTLQueryStatus(ctx context.Context, orgID string, tableNameArray []string) (string, *model.ApiError) {
// getTTLQueryStatus fetches ttl_status table status from DB
func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string, tableNameArray []string) (string, *model.ApiError) {
failFlag := false
status := constants.StatusSuccess
for _, tableName := range tableNameArray {
statusItem, err := r.checkTTLStatusItem(ctx, orgID, tableName)
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
emptyStatusStruct := new(types.TTLSetting)
if statusItem == emptyStatusStruct {
return "", nil
}
if err != nil {
if apiErr != nil {
return "", &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == constants.StatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
@@ -2439,20 +2439,26 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
switch ttlParams.Type {
case constants.TraceTTL:
tableNameArray := []string{signozTraceDBName + "." + signozTraceTableName, signozTraceDBName + "." + signozDurationMVTable, signozTraceDBName + "." + signozSpansTable, signozTraceDBName + "." + signozErrorIndexTable, signozTraceDBName + "." + signozUsageExplorerTable, signozTraceDBName + "." + defaultDependencyGraphTable}
tableNameArray := []string{
r.TraceDB + "." + r.traceTableName,
r.TraceDB + "." + r.traceResourceTableV3,
r.TraceDB + "." + signozErrorIndexTable,
r.TraceDB + "." + signozUsageExplorerTable,
r.TraceDB + "." + defaultDependencyGraphTable,
r.TraceDB + "." + r.traceSummaryTable,
}
tableNameArray = getLocalTableNameArray(tableNameArray)
status, err := r.setTTLQueryStatus(ctx, orgID, tableNameArray)
if err != nil {
return nil, err
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
if apiErr != nil {
return nil, apiErr
}
dbResp, err := getTracesTTL()
if err != nil {
return nil, err
dbResp, apiErr := getTracesTTL()
if apiErr != nil {
return nil, apiErr
}
ttlQuery, err := r.checkTTLStatusItem(ctx, orgID, tableNameArray[0])
if err != nil {
return nil, err
ttlQuery, apiErr := r.checkTTLStatusItem(ctx, orgID, tableNameArray[0])
if apiErr != nil {
return nil, apiErr
}
ttlQuery.TTL = ttlQuery.TTL / 3600 // convert to hours
if ttlQuery.ColdStorageTTL != -1 {
@@ -2465,17 +2471,17 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
case constants.MetricsTTL:
tableNameArray := []string{signozMetricDBName + "." + signozSampleTableName}
tableNameArray = getLocalTableNameArray(tableNameArray)
status, err := r.setTTLQueryStatus(ctx, orgID, tableNameArray)
if err != nil {
return nil, err
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
if apiErr != nil {
return nil, apiErr
}
dbResp, err := getMetricsTTL()
if err != nil {
return nil, err
dbResp, apiErr := getMetricsTTL()
if apiErr != nil {
return nil, apiErr
}
ttlQuery, err := r.checkTTLStatusItem(ctx, orgID, tableNameArray[0])
if err != nil {
return nil, err
ttlQuery, apiErr := r.checkTTLStatusItem(ctx, orgID, tableNameArray[0])
if apiErr != nil {
return nil, apiErr
}
ttlQuery.TTL = ttlQuery.TTL / 3600 // convert to hours
if ttlQuery.ColdStorageTTL != -1 {
@@ -2488,17 +2494,17 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
case constants.LogsTTL:
tableNameArray := []string{r.logsDB + "." + r.logsTableName}
tableNameArray = getLocalTableNameArray(tableNameArray)
status, err := r.setTTLQueryStatus(ctx, orgID, tableNameArray)
if err != nil {
return nil, err
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
if apiErr != nil {
return nil, apiErr
}
dbResp, err := getLogsTTL()
if err != nil {
return nil, err
dbResp, apiErr := getLogsTTL()
if apiErr != nil {
return nil, apiErr
}
ttlQuery, err := r.checkTTLStatusItem(ctx, orgID, tableNameArray[0])
if err != nil {
return nil, err
ttlQuery, apiErr := r.checkTTLStatusItem(ctx, orgID, tableNameArray[0])
if apiErr != nil {
return nil, apiErr
}
ttlQuery.TTL = ttlQuery.TTL / 3600 // convert to hours
if ttlQuery.ColdStorageTTL != -1 {
@@ -2681,19 +2687,19 @@ func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams
zap.L().Error("errorId missing from params")
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("ErrorID missing from params")}
}
var err *model.ApiError
var apiErr *model.ApiError
getNextPrevErrorIDsResponse := model.NextPrevErrorIDs{
GroupID: queryParams.GroupID,
}
getNextPrevErrorIDsResponse.NextErrorID, getNextPrevErrorIDsResponse.NextTimestamp, err = r.getNextErrorID(ctx, queryParams)
if err != nil {
zap.L().Error("Unable to get next error ID due to err: ", zap.Error(err))
return nil, err
getNextPrevErrorIDsResponse.NextErrorID, getNextPrevErrorIDsResponse.NextTimestamp, apiErr = r.getNextErrorID(ctx, queryParams)
if apiErr != nil {
zap.L().Error("Unable to get next error ID due to err: ", zap.Error(apiErr))
return nil, apiErr
}
getNextPrevErrorIDsResponse.PrevErrorID, getNextPrevErrorIDsResponse.PrevTimestamp, err = r.getPrevErrorID(ctx, queryParams)
if err != nil {
zap.L().Error("Unable to get prev error ID due to err: ", zap.Error(err))
return nil, err
getNextPrevErrorIDsResponse.PrevErrorID, getNextPrevErrorIDsResponse.PrevTimestamp, apiErr = r.getPrevErrorID(ctx, queryParams)
if apiErr != nil {
zap.L().Error("Unable to get prev error ID due to err: ", zap.Error(apiErr))
return nil, apiErr
}
return &getNextPrevErrorIDsResponse, nil

View File

@@ -985,14 +985,14 @@ func (aH *APIHandler) metaForLinks(ctx context.Context, rule *ruletypes.Gettable
keys := make(map[string]v3.AttributeKey)
if rule.AlertType == ruletypes.AlertTypeLogs {
logFields, err := aH.reader.GetLogFieldsFromNames(ctx, logsv3.GetFieldNames(rule.PostableRule.RuleCondition.CompositeQuery))
if err == nil {
logFields, apiErr := aH.reader.GetLogFieldsFromNames(ctx, logsv3.GetFieldNames(rule.PostableRule.RuleCondition.CompositeQuery))
if apiErr == nil {
params := &v3.QueryRangeParamsV3{
CompositeQuery: rule.RuleCondition.CompositeQuery,
}
keys = model.GetLogFieldsV3(ctx, params, logFields)
} else {
zap.L().Error("failed to get log fields using empty keys; the link might not work as expected", zap.Error(err))
zap.L().Error("failed to get log fields using empty keys; the link might not work as expected", zap.Error(apiErr))
}
} else if rule.AlertType == ruletypes.AlertTypeTraces {
traceFields, err := aH.reader.GetSpanAttributeKeysByNames(ctx, logsv3.GetFieldNames(rule.PostableRule.RuleCondition.CompositeQuery))
@@ -4295,9 +4295,9 @@ func (aH *APIHandler) getQueryBuilderSuggestions(w http.ResponseWriter, r *http.
return
}
response, err := aH.reader.GetQBFilterSuggestionsForLogs(r.Context(), req)
if err != nil {
RespondError(w, err, nil)
response, apiErr := aH.reader.GetQBFilterSuggestionsForLogs(r.Context(), req)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
@@ -4453,10 +4453,9 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
}
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) && hasLogsQuery {
logsFields, err := aH.reader.GetLogFieldsFromNames(ctx, logsv3.GetFieldNames(queryRangeParams.CompositeQuery))
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
logsFields, apiErr := aH.reader.GetLogFieldsFromNames(ctx, logsv3.GetFieldNames(queryRangeParams.CompositeQuery))
if apiErr != nil {
RespondError(w, apiErr, errQuriesByName)
return
}
// get the fields if any logs query is present
@@ -4493,12 +4492,12 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
// Hook up query progress tracking if requested
queryIdHeader := r.Header.Get("X-SIGNOZ-QUERY-ID")
if len(queryIdHeader) > 0 {
onQueryFinished, err := aH.reader.ReportQueryStartForProgressTracking(queryIdHeader)
onQueryFinished, apiErr := aH.reader.ReportQueryStartForProgressTracking(queryIdHeader)
if err != nil {
if apiErr != nil {
zap.L().Error(
"couldn't report query start for progress tracking",
zap.String("queryId", queryIdHeader), zap.Error(err),
zap.String("queryId", queryIdHeader), zap.Error(apiErr),
)
} else {
@@ -4809,10 +4808,9 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) && hasLogsQuery {
// get the fields if any logs query is present
logsFields, err := aH.reader.GetLogFieldsFromNames(r.Context(), logsv3.GetFieldNames(queryRangeParams.CompositeQuery))
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, nil)
logsFields, apiErr := aH.reader.GetLogFieldsFromNames(r.Context(), logsv3.GetFieldNames(queryRangeParams.CompositeQuery))
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields)

View File

@@ -293,7 +293,7 @@ func (m *Manager) dashboardUuid(integrationId string, dashboardId string) string
}
func (m *Manager) parseDashboardUuid(dashboardUuid string) (
integrationId string, dashboardId string, err *model.ApiError,
integrationId string, dashboardId string, apiErr *model.ApiError,
) {
parts := strings.SplitN(dashboardUuid, "--", 3)
if len(parts) != 3 || parts[0] != "integration" {

View File

@@ -62,23 +62,23 @@ func (receiver *SummaryService) FilterValues(ctx context.Context, orgID valuer.U
response.FilterValues = filterValues
return &response, nil
case "metric_unit":
attributes, err := receiver.reader.GetAllMetricFilterUnits(ctx, params)
if err != nil {
return nil, err
attributes, apiErr := receiver.reader.GetAllMetricFilterUnits(ctx, params)
if apiErr != nil {
return nil, apiErr
}
response.FilterValues = attributes
return &response, nil
case "metric_type":
attributes, err := receiver.reader.GetAllMetricFilterTypes(ctx, params)
if err != nil {
return nil, err
attributes, apiErr := receiver.reader.GetAllMetricFilterTypes(ctx, params)
if apiErr != nil {
return nil, apiErr
}
response.FilterValues = attributes
return &response, nil
default:
attributes, err := receiver.reader.GetAllMetricFilterAttributeValues(ctx, params)
if err != nil {
return nil, err
attributes, apiErr := receiver.reader.GetAllMetricFilterAttributeValues(ctx, params)
if apiErr != nil {
return nil, apiErr
}
response.FilterValues = attributes
return &response, nil
@@ -108,45 +108,45 @@ func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, orgID val
})
g.Go(func() error {
dataPoints, err := receiver.reader.GetMetricsDataPoints(ctx, metricName)
if err != nil {
return err
dataPoints, apiErr := receiver.reader.GetMetricsDataPoints(ctx, metricName)
if apiErr != nil {
return apiErr.ToError()
}
metricDetailsDTO.Samples = dataPoints
return nil
})
g.Go(func() error {
lastReceived, err := receiver.reader.GetMetricsLastReceived(ctx, metricName)
if err != nil {
return err
lastReceived, apiErr := receiver.reader.GetMetricsLastReceived(ctx, metricName)
if apiErr != nil {
return apiErr.ToError()
}
metricDetailsDTO.LastReceived = lastReceived
return nil
})
g.Go(func() error {
totalSeries, err := receiver.reader.GetTotalTimeSeriesForMetricName(ctx, metricName)
if err != nil {
return err
totalSeries, apiErr := receiver.reader.GetTotalTimeSeriesForMetricName(ctx, metricName)
if apiErr != nil {
return apiErr.ToError()
}
metricDetailsDTO.TimeSeriesTotal = totalSeries
return nil
})
g.Go(func() error {
activeSeries, err := receiver.reader.GetActiveTimeSeriesForMetricName(ctx, metricName, 120*time.Minute)
if err != nil {
return err
activeSeries, apiErr := receiver.reader.GetActiveTimeSeriesForMetricName(ctx, metricName, 120*time.Minute)
if apiErr != nil {
return apiErr.ToError()
}
metricDetailsDTO.TimeSeriesActive = activeSeries
return nil
})
g.Go(func() error {
attributes, err := receiver.reader.GetAttributesForMetricName(ctx, metricName, nil, nil, nil)
if err != nil {
return err
attributes, apiErr := receiver.reader.GetAttributesForMetricName(ctx, metricName, nil, nil, nil)
if apiErr != nil {
return apiErr.ToError()
}
if attributes != nil {
metricDetailsDTO.Attributes = *attributes

View File

@@ -95,7 +95,7 @@ type Reader interface {
GetMinAndMaxTimestampForTraceID(ctx context.Context, traceID []string) (int64, int64, error)
// Query Progress tracking helpers.
ReportQueryStartForProgressTracking(queryId string) (reportQueryFinished func(), err *model.ApiError)
ReportQueryStartForProgressTracking(queryId string) (reportQueryFinished func(), apiErr *model.ApiError)
SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError)
GetCountOfThings(ctx context.Context, query string) (uint64, error)

View File

@@ -404,9 +404,9 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID,
if hasLogsQuery {
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(params) {
logsFields, err := r.reader.GetLogFieldsFromNames(ctx, logsv3.GetFieldNames(params.CompositeQuery))
if err != nil {
return nil, err
logsFields, apiErr := r.reader.GetLogFieldsFromNames(ctx, logsv3.GetFieldNames(params.CompositeQuery))
if apiErr != nil {
return nil, apiErr.ToError()
}
logsKeys := model.GetLogFieldsV3(ctx, params, logsFields)
r.logsKeys = logsKeys