mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-27 18:54:27 +00:00
Compare commits
4 Commits
v0.63.0-de
...
v0.64.0-74
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7405bfbbee | ||
|
|
67e822e23e | ||
|
|
60dc479a19 | ||
|
|
85cf4f4e2e |
@@ -24,13 +24,13 @@ const MQServiceDetailTypePerView = (
|
||||
producerLatencyOption: ProducerLatencyOptions,
|
||||
): Record<string, MessagingQueueServiceDetailType[]> => ({
|
||||
[MessagingQueuesViewType.consumerLag.value]: [
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
MessagingQueueServiceDetailType.ProducerDetails,
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
MessagingQueueServiceDetailType.NetworkLatency,
|
||||
],
|
||||
[MessagingQueuesViewType.partitionLatency.value]: [
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
MessagingQueueServiceDetailType.ProducerDetails,
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
],
|
||||
[MessagingQueuesViewType.producerLatency.value]: [
|
||||
producerLatencyOption === ProducerLatencyOptions.Consumers
|
||||
@@ -122,7 +122,7 @@ function MessagingQueuesDetails({
|
||||
producerLatencyOption: ProducerLatencyOptions;
|
||||
}): JSX.Element {
|
||||
const [currentTab, setCurrentTab] = useState<MessagingQueueServiceDetailType>(
|
||||
MessagingQueueServiceDetailType.ConsumerDetails,
|
||||
MessagingQueueServiceDetailType.ProducerDetails,
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@@ -179,10 +179,13 @@ export const convertToNanoseconds = (timestamp: number): bigint =>
|
||||
export const getStartAndEndTimesInMilliseconds = (
|
||||
timestamp: number,
|
||||
): { start: number; end: number } => {
|
||||
const FIVE_MINUTES_IN_MILLISECONDS = 5 * 60 * 1000; // 5 minutes in milliseconds - check with Shivanshu once
|
||||
const FIVE_MINUTES_IN_MILLISECONDS = 5 * 60 * 1000; // 300,000 milliseconds
|
||||
|
||||
const start = Math.floor(timestamp);
|
||||
const end = Math.floor(start + FIVE_MINUTES_IN_MILLISECONDS);
|
||||
const pointInTime = Math.floor(timestamp * 1000);
|
||||
|
||||
// Convert timestamp to milliseconds and floor it
|
||||
const start = Math.floor(pointInTime - FIVE_MINUTES_IN_MILLISECONDS);
|
||||
const end = Math.floor(pointInTime + FIVE_MINUTES_IN_MILLISECONDS);
|
||||
|
||||
return { start, end };
|
||||
};
|
||||
@@ -311,8 +314,8 @@ export const getMetaDataAndAPIPerView = (
|
||||
return {
|
||||
[MessagingQueuesViewType.consumerLag.value]: {
|
||||
tableApiPayload: {
|
||||
start: (selectedTimelineQuery?.start || 0) * 1e9,
|
||||
end: (selectedTimelineQuery?.end || 0) * 1e9,
|
||||
start: (selectedTimelineQuery?.start || 0) * 1e6,
|
||||
end: (selectedTimelineQuery?.end || 0) * 1e6,
|
||||
variables: {
|
||||
partition: selectedTimelineQuery?.partition,
|
||||
topic: selectedTimelineQuery?.topic,
|
||||
|
||||
@@ -2694,8 +2694,8 @@ func (r *ClickHouseReader) GetTagsInfoInLastHeartBeatInterval(ctx context.Contex
|
||||
}
|
||||
|
||||
// remove this after sometime
|
||||
func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField {
|
||||
lookup := map[string]model.LogField{}
|
||||
func removeUnderscoreDuplicateFields(fields []model.Field) []model.Field {
|
||||
lookup := map[string]model.Field{}
|
||||
for _, v := range fields {
|
||||
lookup[v.Name+v.DataType] = v
|
||||
}
|
||||
@@ -2706,7 +2706,7 @@ func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField {
|
||||
}
|
||||
}
|
||||
|
||||
updatedFields := []model.LogField{}
|
||||
updatedFields := []model.Field{}
|
||||
for _, v := range lookup {
|
||||
updatedFields = append(updatedFields, v)
|
||||
}
|
||||
@@ -2717,11 +2717,11 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe
|
||||
// response will contain top level fields from the otel log model
|
||||
response := model.GetFieldsResponse{
|
||||
Selected: constants.StaticSelectedLogFields,
|
||||
Interesting: []model.LogField{},
|
||||
Interesting: []model.Field{},
|
||||
}
|
||||
|
||||
// get attribute keys
|
||||
attributes := []model.LogField{}
|
||||
attributes := []model.Field{}
|
||||
query := fmt.Sprintf("SELECT DISTINCT name, datatype from %s.%s group by name, datatype", r.logsDB, r.logsAttributeKeys)
|
||||
err := r.db.Select(ctx, &attributes, query)
|
||||
if err != nil {
|
||||
@@ -2729,7 +2729,7 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe
|
||||
}
|
||||
|
||||
// get resource keys
|
||||
resources := []model.LogField{}
|
||||
resources := []model.Field{}
|
||||
query = fmt.Sprintf("SELECT DISTINCT name, datatype from %s.%s group by name, datatype", r.logsDB, r.logsResourceKeys)
|
||||
err = r.db.Select(ctx, &resources, query)
|
||||
if err != nil {
|
||||
@@ -2753,9 +2753,11 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) extractSelectedAndInterestingFields(tableStatement string, fieldType string, fields *[]model.LogField, response *model.GetFieldsResponse) {
|
||||
func (r *ClickHouseReader) extractSelectedAndInterestingFields(tableStatement string, overrideFieldType string, fields *[]model.Field, response *model.GetFieldsResponse) {
|
||||
for _, field := range *fields {
|
||||
field.Type = fieldType
|
||||
if overrideFieldType != "" {
|
||||
field.Type = overrideFieldType
|
||||
}
|
||||
// all static fields are assumed to be selected as we don't allow changing them
|
||||
if isColumn(r.useLogsNewSchema, tableStatement, field.Type, field.Name, field.DataType) {
|
||||
response.Selected = append(response.Selected, field)
|
||||
@@ -2945,6 +2947,165 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetTraceFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) {
|
||||
// response will contain top level fields from the otel trace model
|
||||
response := model.GetFieldsResponse{
|
||||
Selected: []model.Field{},
|
||||
Interesting: []model.Field{},
|
||||
}
|
||||
|
||||
// get the top level selected fields
|
||||
for _, field := range constants.NewStaticFieldsTraces {
|
||||
if (v3.AttributeKey{} == field) {
|
||||
continue
|
||||
}
|
||||
response.Selected = append(response.Selected, model.Field{
|
||||
Name: field.Key,
|
||||
DataType: field.DataType.String(),
|
||||
Type: constants.Static,
|
||||
})
|
||||
}
|
||||
|
||||
// get attribute keys
|
||||
attributes := []model.Field{}
|
||||
query := fmt.Sprintf("SELECT tagKey, tagType, dataType from %s.%s group by tagKey, tagType, dataType", r.TraceDB, r.spanAttributesKeysTable)
|
||||
rows, err := r.db.Query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var tagKey string
|
||||
var dataType string
|
||||
var tagType string
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&tagKey, &tagType, &dataType); err != nil {
|
||||
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
attributes = append(attributes, model.Field{
|
||||
Name: tagKey,
|
||||
DataType: dataType,
|
||||
Type: tagType,
|
||||
})
|
||||
}
|
||||
|
||||
statements := []model.ShowCreateTableStatement{}
|
||||
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.TraceDB, r.traceLocalTableName)
|
||||
err = r.db.Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
|
||||
r.extractSelectedAndInterestingFields(statements[0].Statement, "", &attributes, &response)
|
||||
|
||||
return &response, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) UpdateTraceField(ctx context.Context, field *model.UpdateField) *model.ApiError {
|
||||
if !field.Selected {
|
||||
return model.ForbiddenError(errors.New("removing a selected field is not allowed, please reach out to support."))
|
||||
}
|
||||
|
||||
// name of the materialized column
|
||||
colname := utils.GetClickhouseColumnNameV2(field.Type, field.DataType, field.Name)
|
||||
|
||||
field.DataType = strings.ToLower(field.DataType)
|
||||
|
||||
// dataType and chDataType of the materialized column
|
||||
var dataTypeMap = map[string]string{
|
||||
"string": "string",
|
||||
"bool": "bool",
|
||||
"int64": "number",
|
||||
"float64": "number",
|
||||
}
|
||||
var chDataTypeMap = map[string]string{
|
||||
"string": "String",
|
||||
"bool": "Bool",
|
||||
"int64": "Float64",
|
||||
"float64": "Float64",
|
||||
}
|
||||
chDataType := chDataTypeMap[field.DataType]
|
||||
dataType := dataTypeMap[field.DataType]
|
||||
|
||||
// typeName: tag => attributes, resource => resources
|
||||
typeName := field.Type
|
||||
if field.Type == string(v3.AttributeKeyTypeTag) {
|
||||
typeName = constants.Attributes
|
||||
} else if field.Type == string(v3.AttributeKeyTypeResource) {
|
||||
typeName = constants.Resources
|
||||
}
|
||||
|
||||
attrColName := fmt.Sprintf("%s_%s", typeName, dataType)
|
||||
for _, table := range []string{r.traceLocalTableName, r.traceTableName} {
|
||||
q := "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s` %s DEFAULT %s['%s'] CODEC(ZSTD(1))"
|
||||
query := fmt.Sprintf(q,
|
||||
r.TraceDB, table,
|
||||
r.cluster,
|
||||
colname, chDataType,
|
||||
attrColName,
|
||||
field.Name,
|
||||
)
|
||||
err := r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
|
||||
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s_exists` bool DEFAULT if(mapContains(%s, '%s') != 0, true, false) CODEC(ZSTD(1))",
|
||||
r.TraceDB, table,
|
||||
r.cluster,
|
||||
colname,
|
||||
attrColName,
|
||||
field.Name,
|
||||
)
|
||||
err = r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
}
|
||||
|
||||
// create the index
|
||||
if strings.ToLower(field.DataType) == "bool" {
|
||||
// there is no point in creating index for bool attributes as the cardinality is just 2
|
||||
return nil
|
||||
}
|
||||
|
||||
if field.IndexType == "" {
|
||||
field.IndexType = constants.DefaultLogSkipIndexType
|
||||
}
|
||||
if field.IndexGranularity == 0 {
|
||||
field.IndexGranularity = constants.DefaultLogSkipIndexGranularity
|
||||
}
|
||||
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS `%s_idx` (`%s`) TYPE %s GRANULARITY %d",
|
||||
r.TraceDB, r.traceLocalTableName,
|
||||
r.cluster,
|
||||
colname,
|
||||
colname,
|
||||
field.IndexType,
|
||||
field.IndexGranularity,
|
||||
)
|
||||
err := r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
|
||||
// add a default minmax index for numbers
|
||||
if dataType == "number" {
|
||||
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS `%s_minmax_idx` (`%s`) TYPE minmax GRANULARITY 1",
|
||||
r.TraceDB, r.traceLocalTableName,
|
||||
r.cluster,
|
||||
colname,
|
||||
colname,
|
||||
)
|
||||
err = r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetLogs(ctx context.Context, params *model.LogsFilterParams) (*[]model.SignozLog, *model.ApiError) {
|
||||
response := []model.SignozLog{}
|
||||
fields, apiErr := r.GetLogFields(ctx)
|
||||
|
||||
@@ -527,6 +527,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/settings/ingestion_key", am.AdminAccess(aH.insertIngestionKey)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/settings/ingestion_key", am.ViewAccess(aH.getIngestionKeys)).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v2/traces/fields", am.ViewAccess(aH.traceFields)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v2/traces/fields", am.EditAccess(aH.updateTraceField)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(aH.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/featureFlags", am.OpenAccess(aH.getFeatureFlags)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/configs", am.OpenAccess(aH.getConfigs)).Methods(http.MethodGet)
|
||||
@@ -4892,3 +4895,35 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
aH.queryRangeV4(r.Context(), queryRangeParams, w, r)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) traceFields(w http.ResponseWriter, r *http.Request) {
|
||||
fields, apiErr := aH.reader.GetTraceFields(r.Context())
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "failed to fetch fields from the db")
|
||||
return
|
||||
}
|
||||
aH.WriteJSON(w, r, fields)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) updateTraceField(w http.ResponseWriter, r *http.Request) {
|
||||
field := model.UpdateField{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&field); err != nil {
|
||||
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
RespondError(w, apiErr, "failed to decode payload")
|
||||
return
|
||||
}
|
||||
|
||||
err := logs.ValidateUpdateFieldPayloadV2(&field)
|
||||
if err != nil {
|
||||
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
RespondError(w, apiErr, "incorrect payload")
|
||||
return
|
||||
}
|
||||
|
||||
apiErr := aH.reader.UpdateTraceField(r.Context(), &field)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "failed to update field in the db")
|
||||
return
|
||||
}
|
||||
aH.WriteJSON(w, r, field)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_query AS (
|
||||
SELECT
|
||||
@@ -18,6 +20,8 @@ WITH consumer_query AS (
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
@@ -36,13 +40,15 @@ FROM
|
||||
consumer_query
|
||||
ORDER BY
|
||||
resource_string_service$$name;
|
||||
`, start, end, queueType, topic, partition, consumerGroup, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, consumerGroup, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S1 landing
|
||||
func generatePartitionLatencySQL(start, end int64, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH partition_query AS (
|
||||
SELECT
|
||||
@@ -54,6 +60,8 @@ WITH partition_query AS (
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 4
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, partition
|
||||
@@ -68,13 +76,15 @@ FROM
|
||||
partition_query
|
||||
ORDER BY
|
||||
topic;
|
||||
`, start, end, queueType, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S1 consumer
|
||||
func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_pl AS (
|
||||
SELECT
|
||||
@@ -87,6 +97,8 @@ WITH consumer_pl AS (
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
@@ -104,14 +116,15 @@ FROM
|
||||
consumer_pl
|
||||
ORDER BY
|
||||
consumer_group;
|
||||
`, start, end, queueType, topic, partition, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S3, producer overview
|
||||
func generateProducerPartitionThroughputSQL(start, end int64, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
// t, svc, rps, byte*, p99, err
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000 // t, svc, rps, byte*, p99, err
|
||||
query := fmt.Sprintf(`
|
||||
WITH producer_latency AS (
|
||||
SELECT
|
||||
@@ -124,6 +137,8 @@ WITH producer_latency AS (
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 4
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, resource_string_service$$name
|
||||
@@ -137,13 +152,15 @@ SELECT
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
FROM
|
||||
producer_latency
|
||||
`, start, end, queueType, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S3, producer topic/service overview
|
||||
func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
@@ -155,6 +172,8 @@ WITH consumer_latency AS (
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 4
|
||||
AND resource_string_service$$name = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
@@ -169,13 +188,15 @@ SELECT
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
FROM
|
||||
consumer_latency
|
||||
`, start, end, service, queueType, topic, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S3 consumer overview
|
||||
func generateConsumerLatencySQL(start, end int64, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
@@ -189,6 +210,8 @@ WITH consumer_latency AS (
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
GROUP BY topic, resource_string_service$$name
|
||||
@@ -205,13 +228,15 @@ FROM
|
||||
consumer_latency
|
||||
ORDER BY
|
||||
topic;
|
||||
`, start, end, queueType, timeRange, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
// S3 consumer topic/service
|
||||
func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_latency AS (
|
||||
SELECT
|
||||
@@ -223,6 +248,8 @@ WITH consumer_latency AS (
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND resource_string_service$$name = '%s'
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
@@ -237,7 +264,7 @@ SELECT
|
||||
COALESCE(total_requests / %d, 0) AS throughput
|
||||
FROM
|
||||
consumer_latency
|
||||
`, start, end, service, queueType, topic, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
@@ -293,6 +320,8 @@ GROUP BY
|
||||
|
||||
func generateProducerSQL(start, end int64, topic, partition, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH producer_query AS (
|
||||
SELECT
|
||||
@@ -304,6 +333,8 @@ WITH producer_query AS (
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 4
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.destination.name'] = '%s'
|
||||
@@ -320,12 +351,14 @@ FROM
|
||||
producer_query
|
||||
ORDER BY
|
||||
resource_string_service$$name;
|
||||
`, start, end, queueType, topic, partition, timeRange)
|
||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partitionID, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
attributes_string['messaging.client_id'] AS client_id,
|
||||
@@ -336,17 +369,21 @@ FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d'
|
||||
AND kind = 5
|
||||
AND attribute_string_messaging$$system = '%s'
|
||||
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
|
||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY service_name, client_id, service_instance_id
|
||||
ORDER BY throughput DESC
|
||||
`, timeRange, start, end, queueType, consumerGroup, partitionID)
|
||||
`, timeRange, start, end, tsBucketStart, tsBucketEnd, queueType, consumerGroup, partitionID)
|
||||
return query
|
||||
}
|
||||
|
||||
func onboardProducersSQL(start, end int64, queueType string) string {
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
COUNT(*) = 0 AS entries,
|
||||
@@ -358,11 +395,15 @@ FROM
|
||||
signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d';`, queueType, start, end)
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d';`, queueType, start, end, tsBucketStart, tsBucketEnd)
|
||||
return query
|
||||
}
|
||||
|
||||
func onboardConsumerSQL(start, end int64, queueType string) string {
|
||||
tsBucketStart := (start / 1000000000) - 1800
|
||||
tsBucketEnd := end / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
COUNT(*) = 0 AS entries,
|
||||
@@ -378,6 +419,8 @@ SELECT
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d';`, queueType, start, end)
|
||||
AND timestamp <= '%d'
|
||||
AND ts_bucket_start >= '%d'
|
||||
AND ts_bucket_start <= '%d' ;`, queueType, start, end, tsBucketStart, tsBucketEnd)
|
||||
return query
|
||||
}
|
||||
|
||||
@@ -228,8 +228,8 @@ func parseColumn(s string) (*string, error) {
|
||||
return &colName, nil
|
||||
}
|
||||
|
||||
func arrayToMap(fields []model.LogField) map[string]model.LogField {
|
||||
res := map[string]model.LogField{}
|
||||
func arrayToMap(fields []model.Field) map[string]model.Field {
|
||||
res := map[string]model.Field{}
|
||||
for _, field := range fields {
|
||||
res[field.Name] = field
|
||||
}
|
||||
@@ -251,7 +251,7 @@ func replaceInterestingFields(allFields *model.GetFieldsResponse, queryTokens []
|
||||
return queryTokens, nil
|
||||
}
|
||||
|
||||
func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]model.LogField, interestingFieldLookup map[string]model.LogField) (string, error) {
|
||||
func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]model.Field, interestingFieldLookup map[string]model.Field) (string, error) {
|
||||
op := strings.TrimSpace(operatorRegex.FindString(queryToken))
|
||||
opLower := strings.ToLower(op)
|
||||
|
||||
@@ -283,7 +283,7 @@ func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]mode
|
||||
}
|
||||
} else {
|
||||
// creating the query token here as we have the metadata
|
||||
field := model.LogField{}
|
||||
field := model.Field{}
|
||||
|
||||
if sfield, ok := selectedFieldsLookup[sqlColName]; ok {
|
||||
field = sfield
|
||||
|
||||
@@ -238,14 +238,14 @@ func TestParseColumn(t *testing.T) {
|
||||
func TestReplaceInterestingFields(t *testing.T) {
|
||||
queryTokens := []string{"id.userid IN (100) ", "and id_key >= 50 ", `AND body ILIKE '%searchstring%'`}
|
||||
allFields := model.GetFieldsResponse{
|
||||
Selected: []model.LogField{
|
||||
Selected: []model.Field{
|
||||
{
|
||||
Name: "id_key",
|
||||
DataType: "int64",
|
||||
Type: "attributes",
|
||||
},
|
||||
},
|
||||
Interesting: []model.LogField{
|
||||
Interesting: []model.Field{
|
||||
{
|
||||
Name: "id.userid",
|
||||
DataType: "int64",
|
||||
@@ -326,7 +326,7 @@ func TestCheckIfPrevousPaginateAndModifyOrder(t *testing.T) {
|
||||
}
|
||||
|
||||
var generateSQLQueryFields = model.GetFieldsResponse{
|
||||
Selected: []model.LogField{
|
||||
Selected: []model.Field{
|
||||
{
|
||||
Name: "field1",
|
||||
DataType: "int64",
|
||||
@@ -348,7 +348,7 @@ var generateSQLQueryFields = model.GetFieldsResponse{
|
||||
Type: "static",
|
||||
},
|
||||
},
|
||||
Interesting: []model.LogField{
|
||||
Interesting: []model.Field{
|
||||
{
|
||||
Name: "FielD1",
|
||||
DataType: "int64",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
func ValidateUpdateFieldPayload(field *model.UpdateField) error {
|
||||
@@ -38,3 +39,36 @@ func ValidateUpdateFieldPayload(field *model.UpdateField) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateUpdateFieldPayloadV2(field *model.UpdateField) error {
|
||||
if field.Name == "" {
|
||||
return fmt.Errorf("name cannot be empty")
|
||||
}
|
||||
if field.Type == "" {
|
||||
return fmt.Errorf("type cannot be empty")
|
||||
}
|
||||
if field.DataType == "" {
|
||||
return fmt.Errorf("dataType cannot be empty")
|
||||
}
|
||||
|
||||
// the logs api uses the old names i.e attributes and resources while traces use tag and attribute.
|
||||
// update log api to use tag and attribute.
|
||||
matched, err := regexp.MatchString(fmt.Sprintf("^(%s|%s)$", v3.AttributeKeyTypeTag, v3.AttributeKeyTypeResource), field.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !matched {
|
||||
return fmt.Errorf("type %s not supported", field.Type)
|
||||
}
|
||||
|
||||
if field.IndexType != "" {
|
||||
matched, err := regexp.MatchString(`^(minmax|set\([0-9]\)|bloom_filter\((0?.?[0-9]+|1)\)|tokenbf_v1\([0-9]+,[0-9]+,[0-9]+\)|ngrambf_v1\([0-9]+,[0-9]+,[0-9]+,[0-9]+\))$`, field.IndexType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !matched {
|
||||
return fmt.Errorf("index type %s not supported", field.IndexType)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery(
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
return
|
||||
}
|
||||
query = fmt.Sprintf(placeholderQuery, limitQuery)
|
||||
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
|
||||
} else {
|
||||
query, err = tracesQueryBuilder(
|
||||
start,
|
||||
|
||||
@@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery(
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
return
|
||||
}
|
||||
query = fmt.Sprintf(placeholderQuery, limitQuery)
|
||||
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
|
||||
} else {
|
||||
query, err = tracesQueryBuilder(
|
||||
start,
|
||||
|
||||
@@ -34,8 +34,8 @@ func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey)
|
||||
return v
|
||||
}
|
||||
|
||||
for _, key := range utils.GenerateEnrichmentKeys(key) {
|
||||
if val, ok := keys[key]; ok {
|
||||
for _, tkey := range utils.GenerateEnrichmentKeys(key) {
|
||||
if val, ok := keys[tkey]; ok {
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +74,19 @@ func getSelectLabels(groupBy []v3.AttributeKey) string {
|
||||
return strings.Join(labels, ",")
|
||||
}
|
||||
|
||||
// TODO(nitya): use the _exists columns as well in the future similar to logs
|
||||
func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) {
|
||||
if key.DataType == v3.AttributeKeyDataTypeString {
|
||||
if op == v3.FilterOperatorExists {
|
||||
return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorNotEqual]), nil
|
||||
} else {
|
||||
return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorEqual]), nil
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("unsupported operation, exists and not exists can only be applied on custom attributes or string type columns")
|
||||
}
|
||||
}
|
||||
|
||||
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
var conditions []string
|
||||
|
||||
@@ -110,7 +123,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
|
||||
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
|
||||
if item.Key.IsColumn {
|
||||
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(item.Key, item.Operator)
|
||||
subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -312,7 +325,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
}
|
||||
|
||||
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
|
||||
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
|
||||
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)"
|
||||
}
|
||||
|
||||
switch mq.AggregateOperator {
|
||||
@@ -350,7 +363,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
case v3.AggregateOperatorCount:
|
||||
if mq.AggregateAttribute.Key != "" {
|
||||
if mq.AggregateAttribute.IsColumn {
|
||||
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
|
||||
subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
|
||||
if err == nil {
|
||||
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
|
||||
}
|
||||
|
||||
@@ -265,9 +265,11 @@ func Test_buildTracesFilterQuery(t *testing.T) {
|
||||
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
|
||||
{Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
|
||||
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
||||
{Key: v3.AttributeKey{Key: "http_url", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
||||
{Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
||||
}},
|
||||
},
|
||||
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND path = ''",
|
||||
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND `attribute_string_path` = '' AND http_url = '' AND `attribute_string_http$$route` = ''",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -683,7 +685,7 @@ func TestPrepareTracesQuery(t *testing.T) {
|
||||
},
|
||||
},
|
||||
want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " +
|
||||
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (%s) group by `function` order by value DESC",
|
||||
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function` order by value DESC",
|
||||
},
|
||||
{
|
||||
name: "test with limit with resources- first",
|
||||
@@ -766,7 +768,7 @@ func TestPrepareTracesQuery(t *testing.T) {
|
||||
want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
|
||||
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " +
|
||||
"AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " +
|
||||
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (%s) group by `function`,`serviceName` order by value DESC",
|
||||
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function`,`serviceName` order by value DESC",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -290,7 +290,7 @@ const (
|
||||
UINT8 = "Uint8"
|
||||
)
|
||||
|
||||
var StaticSelectedLogFields = []model.LogField{
|
||||
var StaticSelectedLogFields = []model.Field{
|
||||
{
|
||||
Name: "timestamp",
|
||||
DataType: UINT32,
|
||||
|
||||
@@ -109,6 +109,10 @@ type Reader interface {
|
||||
SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError)
|
||||
|
||||
GetCountOfThings(ctx context.Context, query string) (uint64, error)
|
||||
|
||||
//trace
|
||||
GetTraceFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError)
|
||||
UpdateTraceField(ctx context.Context, field *model.UpdateField) *model.ApiError
|
||||
}
|
||||
|
||||
type Querier interface {
|
||||
|
||||
@@ -509,15 +509,15 @@ type ShowCreateTableStatement struct {
|
||||
Statement string `json:"statement" ch:"statement"`
|
||||
}
|
||||
|
||||
type LogField struct {
|
||||
type Field struct {
|
||||
Name string `json:"name" ch:"name"`
|
||||
DataType string `json:"dataType" ch:"datatype"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type GetFieldsResponse struct {
|
||||
Selected []LogField `json:"selected"`
|
||||
Interesting []LogField `json:"interesting"`
|
||||
Selected []Field `json:"selected"`
|
||||
Interesting []Field `json:"interesting"`
|
||||
}
|
||||
|
||||
// Represents a log record in query service requests and responses.
|
||||
|
||||
Reference in New Issue
Block a user