Compare commits

...

32 Commits

Author SHA1 Message Date
grandwizard28
2b2910b7f7 fix: fix some import issues 2025-12-29 01:16:30 +05:30
grandwizard28
f73db6ac2f feat: add the metastore cmd 2025-12-29 01:16:30 +05:30
grandwizard28
d22aabaa42 fix: add indexes for auth_token tables 2025-12-29 01:16:30 +05:30
grandwizard28
fbbe0e19cb fix: fix typenotfound in postgres 2025-12-29 01:16:30 +05:30
grandwizard28
3366edb28a fix: fix typenotfound in postgres 2025-12-29 01:16:30 +05:30
grandwizard28
95376520c3 feat: add remaining migrations 2025-12-29 01:16:30 +05:30
grandwizard28
c01c39a792 feat: add remaining migrations 2025-12-29 01:16:30 +05:30
grandwizard28
4b50e5422b chore: run go mod tidy 2025-12-29 01:16:30 +05:30
grandwizard28
94cfb36767 chore: apply the latest changes 2025-12-29 01:16:30 +05:30
grandwizard28
67056434bb test(operator): write more unit tests 2025-12-29 01:16:30 +05:30
grandwizard28
798b70a6f7 test(operator): write more unit tests 2025-12-29 01:16:30 +05:30
grandwizard28
48234f82bf test(operator): write more unit tests 2025-12-29 01:16:30 +05:30
grandwizard28
dfa0dcbc3a test(operator): write more unit tests 2025-12-29 01:16:30 +05:30
grandwizard28
9d013e8269 test(operator): write more unit tests 2025-12-29 01:16:30 +05:30
grandwizard28
46b716d4e4 refactor: rename support 2025-12-29 01:16:30 +05:30
grandwizard28
5746bb3f93 feat(sqlschema): add diff index support 2025-12-29 01:16:30 +05:30
grandwizard28
de09181ade feat(cmd): add migrate command 2025-12-29 01:16:30 +05:30
grandwizard28
078a34ed68 feat(sqlmigration): change support names and add test cases 2025-12-29 01:16:30 +05:30
grandwizard28
bec7c00b01 fix(cloudintegration): remove provider and org_id 2025-12-29 01:16:30 +05:30
grandwizard28
0234881dd3 docs(contributing): modify sql docs 2025-12-29 01:16:30 +05:30
grandwizard28
17a64d94c3 feat(sqlmigration): remove old dialect 2025-12-29 01:16:30 +05:30
grandwizard28
6418688fb5 feat(sqlmigration): add indices 2025-12-29 01:16:30 +05:30
grandwizard28
cad47cbd39 feat(sqlmigration): squash all migrations into one 2025-12-29 01:16:30 +05:30
Niladri Adhikary
7c051601f2 fix: normalize context-prefixed field keys (#9089)
* feat: normalize context-prefixed field keys

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* test: added tests validation for context-prefixed field

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* refactor: moved logic to parse.go

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: attribute key edge case

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: corrupt field context

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: corrupt field context

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* refactor: parse and signal

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* refactor: mismatch for unknown signal

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

---------

Signed-off-by: “niladrix719” <niladrix719@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-12-28 23:17:44 +05:30
Niladri Adhikary
b9f9c00da5 feat: implement case-insensitive query name handling in formula evaluation (#9302)
* feat: implement case-insensitive query name handling in formula evaluation

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* feat: optimized lookups

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* feat: updated naming

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: normalize keys in canDefaultZero for case insensitivity

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: lookup

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: canDefaultZero lookup

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

---------

Signed-off-by: “niladrix719” <niladrix719@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-12-28 22:29:37 +05:30
Asp-irin
49ff86e65a fix: correctly display OS type value for host detail
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-12-28 16:47:45 +05:30
Amlan Kumar Nandy
2dc6febb38 chore: warn users about incorrect usage with y axis unit (#9588) 2025-12-28 10:33:43 +05:30
lif
4ae268d867 fix: improve light mode text color for selected values in query builder (#9876)
In light mode, selected values in query builder Select components appeared
disabled due to inheriting light-colored text from dark mode styles.

This fix adds explicit text color (--text-ink-400) for .ant-select-selection-item
elements in light mode across QueryBuilder, QueryBuilderV2, and
MetricsAggregateSection styles.

Fixes #9801

Signed-off-by: majiayu000 <1835304752@qq.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-12-26 17:38:25 +00:00
Amlan Kumar Nandy
9d78d67461 chore: y axis management in metrics explorer (#9587) 2025-12-26 17:14:15 +00:00
Abhi kumar
055d0ba90d fix: added fix for limit still getting sent in payload even after removing (#9877)
* fix: added fix for limit still getting sent in payload even after removing

* chore: removed console log
2025-12-26 17:35:08 +05:30
Abhi kumar
09dc95cfe9 fix: added fix for metric selection tooltip scroll issue (#9869) 2025-12-26 13:40:19 +05:30
Abhi kumar
d218cd5733 fix: added fix for reduceTo selection based on metric type + code cleanup (#9732)
* fix: added fixes for reduce-to, auto open + metric based default value

* fix: fixed raise condition

* chore: removed unnessasary useeffect from spaceaggregation

* test: added fix for failing test in usequerybuilderoperations

* fix: pr review comments

* fix: pr review changes
2025-12-25 22:54:25 +05:30
131 changed files with 4284 additions and 9032 deletions

View File

@@ -4,7 +4,11 @@ import (
"log/slog"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
)
func main() {
@@ -14,6 +18,16 @@ func main() {
// register a list of commands to the root command
registerServer(cmd.RootCmd, logger)
cmd.RegisterGenerate(cmd.RootCmd, logger)
cmd.RegisterMetastore(
cmd.RootCmd,
logger,
func() factory.NamedMap[factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config]] {
return signoz.NewSQLStoreProviderFactories()
},
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
return signoz.NewSQLSchemaProviderFactories(sqlstore)
},
)
cmd.Execute(logger)
}

View File

@@ -5,12 +5,11 @@ import (
"log/slog"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/ee/authz/openfgaauthz"
"github.com/SigNoz/signoz/ee/authz/openfgaschema"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/authn"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/authz/openfgaauthz"
"github.com/SigNoz/signoz/pkg/authz/openfgaschema"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
@@ -19,7 +18,6 @@ import (
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
@@ -52,13 +50,6 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
// print the version
version.Info.PrettyPrint(config.Version)
// add enterprise sqlstore factories to the community sqlstore factories
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
return err
}
signoz, err := signoz.New(
ctx,
config,

View File

@@ -4,7 +4,14 @@ import (
"log/slog"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
)
func main() {
@@ -14,6 +21,26 @@ func main() {
// register a list of commands to the root command
registerServer(cmd.RootCmd, logger)
cmd.RegisterGenerate(cmd.RootCmd, logger)
cmd.RegisterMetastore(
cmd.RootCmd,
logger,
func() factory.NamedMap[factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config]] {
factories := signoz.NewSQLStoreProviderFactories()
if err := factories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
panic(err)
}
return factories
},
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
factories := signoz.NewSQLSchemaProviderFactories(sqlstore)
if err := factories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
panic(err)
}
return factories
},
)
cmd.Execute(logger)
}

128
cmd/metastore.go Normal file
View File

@@ -0,0 +1,128 @@
package cmd
import (
"log/slog"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlmigration"
"github.com/SigNoz/signoz/pkg/sqlmigrator"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/version"
"github.com/spf13/cobra"
)
// TODO(grandwizard28): DRY this code
func RegisterMetastore(parentCmd *cobra.Command, logger *slog.Logger, sqlstoreProviderFactories func() factory.NamedMap[factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config]], sqlschemaProviderFactories func(sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]]) {
metastoreCmd := &cobra.Command{
Use: "metastore",
Short: "Run commands to interact with the Metastore",
SilenceUsage: true,
SilenceErrors: true,
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
}
registerMigrate(metastoreCmd, logger, sqlstoreProviderFactories, sqlschemaProviderFactories)
parentCmd.AddCommand(metastoreCmd)
}
func registerMigrate(parentCmd *cobra.Command, logger *slog.Logger, sqlstoreProviderFactories func() factory.NamedMap[factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config]], sqlschemaProviderFactories func(sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]]) {
migrateCmd := &cobra.Command{
Use: "migrate",
Short: "Run migrations for the Metastore",
SilenceUsage: true,
SilenceErrors: true,
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
}
registerSync(migrateCmd, logger, sqlstoreProviderFactories, sqlschemaProviderFactories)
parentCmd.AddCommand(migrateCmd)
}
func registerSync(parentCmd *cobra.Command, logger *slog.Logger, sqlstoreProviderFactories func() factory.NamedMap[factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config]], sqlschemaProviderFactories func(sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]]) {
syncUpCmd := &cobra.Command{
Use: "sync",
Short: "Runs 'sync' migrations for the metastore. Sync migrations are used to mutate schemas of the metastore. These migrations need to be successfully applied before bringing up the application.",
SilenceUsage: true,
SilenceErrors: true,
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
}
registerSyncUp(syncUpCmd, logger, sqlstoreProviderFactories, sqlschemaProviderFactories)
parentCmd.AddCommand(syncUpCmd)
}
func registerSyncUp(parentCmd *cobra.Command, logger *slog.Logger, sqlstoreProviderFactories func() factory.NamedMap[factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config]], sqlschemaProviderFactories func(sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]]) {
syncUpCmd := &cobra.Command{
Use: "up",
Short: "Runs 'up' migrations for the metastore. Up migrations are used to apply new migrations to the metastore",
SilenceUsage: true,
SilenceErrors: true,
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
config, err := NewSigNozConfig(ctx, logger, signoz.DeprecatedFlags{})
if err != nil {
return err
}
// Initialize instrumentation
instrumentation, err := instrumentation.New(ctx, config.Instrumentation, version.Info, "signoz")
if err != nil {
return err
}
providerSettings := instrumentation.ToProviderSettings()
// Initialize sqlstore from the available sqlstore provider factories
sqlstore, err := factory.NewProviderFromNamedMap(
ctx,
providerSettings,
config.SQLStore,
sqlstoreProviderFactories(),
config.SQLStore.Provider,
)
if err != nil {
return err
}
// Initialize sqlschema from the available sqlschema provider factories
sqlschema, err := factory.NewProviderFromNamedMap(
ctx,
providerSettings,
config.SQLSchema,
sqlschemaProviderFactories(sqlstore),
config.SQLStore.Provider,
)
if err != nil {
return err
}
// Run migrations on the sqlstore
sqlmigrations, err := sqlmigration.New(
ctx,
providerSettings,
config.SQLMigration,
signoz.NewSQLMigrationProviderFactories(sqlstore, sqlschema),
)
if err != nil {
return err
}
err = sqlmigrator.New(ctx, providerSettings, sqlstore, sqlmigrations, config.SQLMigrator).Migrate(ctx)
if err != nil {
return err
}
return nil
},
}
parentCmd.AddCommand(syncUpCmd)
}

View File

@@ -78,13 +78,12 @@ All tables follow a consistent primary key pattern using a `id` column (referenc
## How to write migrations?
For schema migrations, use the [SQLMigration](/pkg/sqlmigration/sqlmigration.go) interface and write the migration in the same package. When creating migrations, adhere to these guidelines:
For schema migrations, use the [SQLMigration](/pkg/sqlmigration/sqlmigration.go) interface. The migrations are split into multiple packages based on the starting number of the series of the migration. For example, migrations with starting number `100` are in the `s100sqlmigration` package (read as series 100 sql migrations), migrations with starting number `200` are in the `s200sqlmigration` package, and so on. When creating migrations, adhere to these guidelines:
- Do not implement **`ON CASCADE` foreign key constraints**. Deletion operations should be handled explicitly in application logic rather than delegated to the database.
- Use the [SQLSchema](/pkg/sqlschema/sqlschema.go) interface to write migrations. SQLSchema is responsible for generating idempotent SQL statements to alter the database schema. For instance, if you want to add a column to the `users` table, you can use the `AddColumn` method to add the column. If the column already exists, the method will return no SQL statements.
- Do not **import types from the types package** in the `sqlmigration` package. Instead, define the required types within the migration package itself. This practice ensures migration stability as the core types evolve over time.
- Do not implement **`Down` migrations**. As the codebase matures, we may introduce this capability, but for now, the `Down` function should remain empty.
- Always write **idempotent** migrations. This means that if the migration is run multiple times, it should not cause an error.
- A migration which is **dependent on the underlying dialect** (sqlite, postgres, etc) should be written as part of the [SQLDialect](/pkg/sqlstore/sqlstore.go) interface. The implementation needs to go in the dialect specific package of the respective database.
## What should I remember?

View File

@@ -4,6 +4,7 @@ import (
"context"
"database/sql"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
@@ -32,9 +33,9 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
fmter: fmter,
settings: settings,
operator: sqlschema.NewOperator(fmter, sqlschema.OperatorSupport{
DropConstraint: true,
ColumnIfNotExistsExists: true,
AlterColumnSetNotNull: true,
SCreateAndDropConstraint: true,
SAlterTableAddAndDropColumnIfNotExistsAndExists: true,
SAlterTableAlterColumnSetAndDrop: true,
}),
}, nil
}
@@ -72,8 +73,9 @@ WHERE
if err != nil {
return nil, nil, err
}
if len(columns) == 0 {
return nil, nil, sql.ErrNoRows
return nil, nil, provider.sqlstore.WrapNotFoundErrf(sql.ErrNoRows, errors.CodeNotFound, "table (%s) not found", tableName)
}
sqlschemaColumns := make([]*sqlschema.Column, 0)
@@ -220,7 +222,8 @@ SELECT
ci.relname AS index_name,
i.indisunique AS unique,
i.indisprimary AS primary,
a.attname AS column_name
a.attname AS column_name,
array_position(i.indkey, a.attnum) AS column_position
FROM
pg_index i
LEFT JOIN pg_class ct ON ct.oid = i.indrelid
@@ -231,9 +234,10 @@ WHERE
a.attnum = ANY(i.indkey)
AND con.oid IS NULL
AND ct.relkind = 'r'
AND ct.relname = ?`, string(name))
AND ct.relname = ?
ORDER BY index_name, column_position`, string(name))
if err != nil {
return nil, err
return nil, provider.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "no indices for table (%s) found", name)
}
defer func() {
@@ -250,9 +254,11 @@ WHERE
unique bool
primary bool
columnName string
// starts from 0 and is unused in this function, this is to ensure that the column names are in the correct order
columnPosition int
)
if err := rows.Scan(&tableName, &indexName, &unique, &primary, &columnName); err != nil {
if err := rows.Scan(&tableName, &indexName, &unique, &primary, &columnName, &columnPosition); err != nil {
return nil, err
}
@@ -269,8 +275,12 @@ WHERE
}
indices := make([]sqlschema.Index, 0)
for _, index := range uniqueIndicesMap {
indices = append(indices, index)
for indexName, index := range uniqueIndicesMap {
if index.Name() == indexName {
indices = append(indices, index)
} else {
indices = append(indices, index.Named(indexName))
}
}
return indices, nil

View File

@@ -1,456 +0,0 @@
package postgressqlstore
import (
"context"
"fmt"
"reflect"
"slices"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/uptrace/bun"
)
var (
Identity = "id"
Integer = "bigint"
Text = "text"
)
var (
Org = "org"
User = "user"
UserNoCascade = "user_no_cascade"
FactorPassword = "factor_password"
CloudIntegration = "cloud_integration"
AgentConfigVersion = "agent_config_version"
)
var (
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
UserReferenceNoCascade = `("user_id") REFERENCES "users" ("id")`
FactorPasswordReference = `("password_id") REFERENCES "factor_password" ("id")`
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
AgentConfigVersionReference = `("version_id") REFERENCES "agent_config_version" ("id")`
)
type dialect struct{}
func (dialect *dialect) IntToTimestamp(ctx context.Context, bun bun.IDB, table string, column string) error {
columnType, err := dialect.GetColumnType(ctx, bun, table, column)
if err != nil {
return err
}
// bigint for postgres and INTEGER for sqlite
if columnType != "bigint" {
return nil
}
// if the columns is integer then do this
if _, err := bun.
ExecContext(ctx, `ALTER TABLE `+table+` RENAME COLUMN `+column+` TO `+column+`_old`); err != nil {
return err
}
// add new timestamp column
if _, err := bun.
NewAddColumn().
Table(table).
ColumnExpr(column + " TIMESTAMP").
Exec(ctx); err != nil {
return err
}
if _, err := bun.
NewUpdate().
Table(table).
Set(column + " = to_timestamp(cast(" + column + "_old as INTEGER))").
Where("1=1").
Exec(ctx); err != nil {
return err
}
// drop old column
if _, err := bun.
NewDropColumn().
Table(table).
Column(column + "_old").
Exec(ctx); err != nil {
return err
}
return nil
}
func (dialect *dialect) IntToBoolean(ctx context.Context, bun bun.IDB, table string, column string) error {
columnExists, err := dialect.ColumnExists(ctx, bun, table, column)
if err != nil {
return err
}
if !columnExists {
return nil
}
columnType, err := dialect.GetColumnType(ctx, bun, table, column)
if err != nil {
return err
}
if columnType != "bigint" {
return nil
}
if _, err := bun.
ExecContext(ctx, `ALTER TABLE `+table+` RENAME COLUMN `+column+` TO `+column+`_old`); err != nil {
return err
}
// add new boolean column
if _, err := bun.
NewAddColumn().
Table(table).
ColumnExpr(column + " BOOLEAN").
Exec(ctx); err != nil {
return err
}
// copy data from old column to new column, converting from int to boolean
if _, err := bun.NewUpdate().
Table(table).
Set(column + " = CASE WHEN " + column + "_old = 1 THEN true ELSE false END").
Where("1=1").
Exec(ctx); err != nil {
return err
}
// drop old column
if _, err := bun.NewDropColumn().Table(table).Column(column + "_old").Exec(ctx); err != nil {
return err
}
return nil
}
func (dialect *dialect) GetColumnType(ctx context.Context, bun bun.IDB, table string, column string) (string, error) {
var columnType string
err := bun.NewSelect().
ColumnExpr("data_type").
TableExpr("information_schema.columns").
Where("table_name = ?", table).
Where("column_name = ?", column).
Scan(ctx, &columnType)
if err != nil {
return "", err
}
return columnType, nil
}
func (dialect *dialect) ColumnExists(ctx context.Context, bun bun.IDB, table string, column string) (bool, error) {
var count int
err := bun.NewSelect().
ColumnExpr("COUNT(*)").
TableExpr("information_schema.columns").
Where("table_name = ?", table).
Where("column_name = ?", column).
Scan(ctx, &count)
if err != nil {
return false, err
}
return count > 0, nil
}
func (dialect *dialect) AddColumn(ctx context.Context, bun bun.IDB, table string, column string, columnExpr string) error {
exists, err := dialect.ColumnExists(ctx, bun, table, column)
if err != nil {
return err
}
if !exists {
_, err = bun.
NewAddColumn().
Table(table).
ColumnExpr(column + " " + columnExpr).
Exec(ctx)
if err != nil {
return err
}
}
return nil
}
func (dialect *dialect) RenameColumn(ctx context.Context, bun bun.IDB, table string, oldColumnName string, newColumnName string) (bool, error) {
oldColumnExists, err := dialect.ColumnExists(ctx, bun, table, oldColumnName)
if err != nil {
return false, err
}
newColumnExists, err := dialect.ColumnExists(ctx, bun, table, newColumnName)
if err != nil {
return false, err
}
if newColumnExists {
return true, nil
}
if !oldColumnExists {
return false, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "old column: %s doesn't exist", oldColumnName)
}
_, err = bun.
ExecContext(ctx, "ALTER TABLE "+table+" RENAME COLUMN "+oldColumnName+" TO "+newColumnName)
if err != nil {
return false, err
}
return true, nil
}
func (dialect *dialect) DropColumn(ctx context.Context, bun bun.IDB, table string, column string) error {
exists, err := dialect.ColumnExists(ctx, bun, table, column)
if err != nil {
return err
}
if exists {
_, err = bun.
NewDropColumn().
Table(table).
Column(column).
Exec(ctx)
if err != nil {
return err
}
}
return nil
}
func (dialect *dialect) TableExists(ctx context.Context, bun bun.IDB, table interface{}) (bool, error) {
count := 0
err := bun.
NewSelect().
ColumnExpr("count(*)").
Table("pg_catalog.pg_tables").
Where("tablename = ?", bun.Dialect().Tables().Get(reflect.TypeOf(table)).Name).
Scan(ctx, &count)
if err != nil {
return false, err
}
if count == 0 {
return false, nil
}
return true, nil
}
func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.IDB, oldModel interface{}, newModel interface{}, references []string, cb func(context.Context) error) error {
if len(references) == 0 {
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "cannot run migration without reference")
}
exists, err := dialect.TableExists(ctx, bun, newModel)
if err != nil {
return err
}
if exists {
return nil
}
var fkReferences []string
for _, reference := range references {
if reference == Org && !slices.Contains(fkReferences, OrgReference) {
fkReferences = append(fkReferences, OrgReference)
} else if reference == User && !slices.Contains(fkReferences, UserReference) {
fkReferences = append(fkReferences, UserReference)
} else if reference == UserNoCascade && !slices.Contains(fkReferences, UserReferenceNoCascade) {
fkReferences = append(fkReferences, UserReferenceNoCascade)
} else if reference == FactorPassword && !slices.Contains(fkReferences, FactorPasswordReference) {
fkReferences = append(fkReferences, FactorPasswordReference)
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
fkReferences = append(fkReferences, CloudIntegrationReference)
} else if reference == AgentConfigVersion && !slices.Contains(fkReferences, AgentConfigVersionReference) {
fkReferences = append(fkReferences, AgentConfigVersionReference)
}
}
createTable := bun.
NewCreateTable().
IfNotExists().
Model(newModel)
for _, fk := range fkReferences {
createTable = createTable.ForeignKey(fk)
}
_, err = createTable.Exec(ctx)
if err != nil {
return err
}
err = cb(ctx)
if err != nil {
return err
}
_, err = bun.
NewDropTable().
IfExists().
Model(oldModel).
Exec(ctx)
if err != nil {
return err
}
return nil
}
func (dialect *dialect) AddNotNullDefaultToColumn(ctx context.Context, bun bun.IDB, table string, column, columnType, defaultValue string) error {
query := fmt.Sprintf("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s, ALTER COLUMN %s SET NOT NULL", table, column, defaultValue, column)
if _, err := bun.ExecContext(ctx, query); err != nil {
return err
}
return nil
}
func (dialect *dialect) UpdatePrimaryKey(ctx context.Context, bun bun.IDB, oldModel interface{}, newModel interface{}, reference string, cb func(context.Context) error) error {
if reference == "" {
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "cannot run migration without reference")
}
oldTableName := bun.Dialect().Tables().Get(reflect.TypeOf(oldModel)).Name
newTableName := bun.Dialect().Tables().Get(reflect.TypeOf(newModel)).Name
columnType, err := dialect.GetColumnType(ctx, bun, oldTableName, Identity)
if err != nil {
return err
}
if columnType == Text {
return nil
}
fkReference := ""
if reference == Org {
fkReference = OrgReference
} else if reference == User {
fkReference = UserReference
}
_, err = bun.
NewCreateTable().
IfNotExists().
Model(newModel).
ForeignKey(fkReference).
Exec(ctx)
if err != nil {
return err
}
err = cb(ctx)
if err != nil {
return err
}
_, err = bun.
NewDropTable().
IfExists().
Model(oldModel).
Exec(ctx)
if err != nil {
return err
}
_, err = bun.
ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", newTableName, oldTableName))
if err != nil {
return err
}
return nil
}
func (dialect *dialect) AddPrimaryKey(ctx context.Context, bun bun.IDB, oldModel interface{}, newModel interface{}, reference string, cb func(context.Context) error) error {
if reference == "" {
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "cannot run migration without reference")
}
oldTableName := bun.Dialect().Tables().Get(reflect.TypeOf(oldModel)).Name
newTableName := bun.Dialect().Tables().Get(reflect.TypeOf(newModel)).Name
identityExists, err := dialect.ColumnExists(ctx, bun, oldTableName, Identity)
if err != nil {
return err
}
if identityExists {
return nil
}
fkReference := ""
if reference == Org {
fkReference = OrgReference
} else if reference == User {
fkReference = UserReference
}
_, err = bun.
NewCreateTable().
IfNotExists().
Model(newModel).
ForeignKey(fkReference).
Exec(ctx)
if err != nil {
return err
}
err = cb(ctx)
if err != nil {
return err
}
_, err = bun.
NewDropTable().
IfExists().
Model(oldModel).
Exec(ctx)
if err != nil {
return err
}
_, err = bun.
ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", newTableName, oldTableName))
if err != nil {
return err
}
return nil
}
func (dialect *dialect) DropColumnWithForeignKeyConstraint(ctx context.Context, bunIDB bun.IDB, model interface{}, column string) error {
existingTable := bunIDB.Dialect().Tables().Get(reflect.TypeOf(model))
columnExists, err := dialect.ColumnExists(ctx, bunIDB, existingTable.Name, column)
if err != nil {
return err
}
if !columnExists {
return nil
}
_, err = bunIDB.
NewDropColumn().
Model(model).
Column(column).
Exec(ctx)
if err != nil {
return err
}
return nil
}

View File

@@ -18,7 +18,6 @@ type provider struct {
settings factory.ScopedProviderSettings
sqldb *sql.DB
bundb *sqlstore.BunDB
dialect *dialect
formatter sqlstore.SQLFormatter
}
@@ -58,11 +57,11 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
pgDialect := pgdialect.New()
bunDB := sqlstore.NewBunDB(settings, sqldb, pgDialect, hooks)
return &provider{
settings: settings,
sqldb: sqldb,
bundb: bunDB,
dialect: new(dialect),
formatter: newFormatter(bunDB.Dialect()),
}, nil
}
@@ -75,10 +74,6 @@ func (provider *provider) SQLDB() *sql.DB {
return provider.sqldb
}
func (provider *provider) Dialect() sqlstore.SQLDialect {
return provider.dialect
}
func (provider *provider) Formatter() sqlstore.SQLFormatter {
return provider.formatter
}
@@ -107,7 +102,3 @@ func (provider *provider) WrapAlreadyExistsErrf(err error, code errors.Code, for
return err
}
func (dialect *dialect) ToggleForeignKeyConstraint(ctx context.Context, bun *bun.DB, enable bool) error {
return nil
}

View File

@@ -0,0 +1,29 @@
import { ApiV2Instance as axios } from 'api';
import { ErrorResponseHandlerV2 } from 'api/ErrorResponseHandlerV2';
import { AxiosError } from 'axios';
import { ErrorResponseV2, ErrorV2Resp, SuccessResponseV2 } from 'types/api';
import { MetricMetadataResponse } from 'types/api/metricsExplorer/v2/getMetricMetadata';
export const getMetricMetadata = async (
metricName: string,
signal?: AbortSignal,
headers?: Record<string, string>,
): Promise<SuccessResponseV2<MetricMetadataResponse> | ErrorResponseV2> => {
try {
const encodedMetricName = encodeURIComponent(metricName);
const response = await axios.get(
`/metrics/metadata?metricName=${encodedMetricName}`,
{
signal,
headers,
},
);
return {
httpStatusCode: response.status,
data: response.data,
};
} catch (error) {
return ErrorResponseHandlerV2(error as AxiosError<ErrorV2Resp>);
}
};

View File

@@ -560,6 +560,10 @@
border: 1px solid var(--bg-vanilla-300) !important;
background: var(--bg-vanilla-100) !important;
box-shadow: 0px 0px 8px 0px rgba(0, 0, 0, 0.1) !important;
.ant-select-selection-item {
color: var(--text-ink-400);
}
}
}
}
@@ -569,6 +573,10 @@
border: 1px solid var(--bg-vanilla-300) !important;
background: var(--bg-vanilla-100) !important;
box-shadow: 0px 0px 8px 0px rgba(0, 0, 0, 0.1) !important;
.ant-select-selection-item {
color: var(--text-ink-400);
}
}
.ant-select-arrow {

View File

@@ -169,6 +169,10 @@
.ant-select-selector {
border: 1px solid var(--bg-vanilla-300) !important;
background: var(--bg-vanilla-100) !important;
.ant-select-selection-item {
color: var(--text-ink-400);
}
}
}
}

View File

@@ -32,6 +32,7 @@ const ADD_ONS_KEYS = {
ORDER_BY: 'order_by',
LIMIT: 'limit',
LEGEND_FORMAT: 'legend_format',
REDUCE_TO: 'reduce_to',
};
const ADD_ONS_KEYS_TO_QUERY_PATH = {
@@ -40,13 +41,14 @@ const ADD_ONS_KEYS_TO_QUERY_PATH = {
[ADD_ONS_KEYS.ORDER_BY]: 'orderBy',
[ADD_ONS_KEYS.LIMIT]: 'limit',
[ADD_ONS_KEYS.LEGEND_FORMAT]: 'legend',
[ADD_ONS_KEYS.REDUCE_TO]: 'reduceTo',
};
const ADD_ONS = [
{
icon: <BarChart2 size={14} />,
label: 'Group By',
key: 'group_by',
key: ADD_ONS_KEYS.GROUP_BY,
description:
'Break down data by attributes like service name, endpoint, status code, or region. Essential for spotting patterns and comparing performance across different segments.',
docLink: 'https://signoz.io/docs/userguide/query-builder-v5/#grouping',
@@ -54,7 +56,7 @@ const ADD_ONS = [
{
icon: <ScrollText size={14} />,
label: 'Having',
key: 'having',
key: ADD_ONS_KEYS.HAVING,
description:
'Filter grouped results based on aggregate conditions. Show only groups meeting specific criteria, like error rates > 5% or p99 latency > 500',
docLink:
@@ -63,7 +65,7 @@ const ADD_ONS = [
{
icon: <ScrollText size={14} />,
label: 'Order By',
key: 'order_by',
key: ADD_ONS_KEYS.ORDER_BY,
description:
'Sort results to surface what matters most. Quickly identify slowest operations, most frequent errors, or highest resource consumers.',
docLink:
@@ -72,7 +74,7 @@ const ADD_ONS = [
{
icon: <ScrollText size={14} />,
label: 'Limit',
key: 'limit',
key: ADD_ONS_KEYS.LIMIT,
description:
'Show only the top/bottom N results. Perfect for focusing on outliers, reducing noise, and improving dashboard performance.',
docLink:
@@ -81,7 +83,7 @@ const ADD_ONS = [
{
icon: <ScrollText size={14} />,
label: 'Legend format',
key: 'legend_format',
key: ADD_ONS_KEYS.LEGEND_FORMAT,
description:
'Customize series labels using variables like {{service.name}}-{{endpoint}}. Makes charts readable at a glance during incident investigation.',
docLink:
@@ -92,7 +94,7 @@ const ADD_ONS = [
const REDUCE_TO = {
icon: <ScrollText size={14} />,
label: 'Reduce to',
key: 'reduce_to',
key: ADD_ONS_KEYS.REDUCE_TO,
description:
'Apply mathematical operations like sum, average, min, max, or percentiles to reduce multiple time series into a single value.',
docLink:
@@ -218,10 +220,9 @@ function QueryAddOns({
);
const availableAddOnKeys = new Set(filteredAddOns.map((addOn) => addOn.key));
// Filter and set selected views: add-ons that are both active and available
setSelectedViews(
ADD_ONS.filter(
filteredAddOns.filter(
(addOn) =>
activeAddOnKeys.has(addOn.key) && availableAddOnKeys.has(addOn.key),
),

View File

@@ -1,6 +1,12 @@
/* eslint-disable */
import { fireEvent, render, screen } from '@testing-library/react';
import React from 'react';
import {
fireEvent,
render,
screen,
userEvent,
waitFor,
within,
} from 'tests/test-utils';
import QueryAddOns from '../QueryV2/QueryAddOns/QueryAddOns';
import { PANEL_TYPES } from 'constants/queryBuilder';
@@ -55,16 +61,7 @@ jest.mock('../QueryV2/QueryAddOns/HavingFilter/HavingFilter', () => ({
),
}));
jest.mock(
'container/QueryBuilder/filters/ReduceToFilter/ReduceToFilter',
() => ({
ReduceToFilter: ({ onChange }: any) => (
<button data-testid="reduce-to" onClick={() => onChange('sum')}>
ReduceToFilter
</button>
),
}),
);
// ReduceToFilter is not mocked - we test the actual Ant Design Select component
function baseQuery(overrides: Partial<any> = {}): any {
return {
@@ -140,7 +137,7 @@ describe('QueryAddOns', () => {
expect(screen.getByTestId('order-by-content')).toBeInTheDocument();
});
it('limit input auto-opens when limit is set and changing it calls handler', () => {
it('limit input auto-opens when limit is set and changing it calls handler', async () => {
render(
<QueryAddOns
query={baseQuery({ limit: 5 })}
@@ -183,4 +180,88 @@ describe('QueryAddOns', () => {
expect(screen.getByTestId('limit-content')).toBeInTheDocument();
expect(limitInput.value).toBe('7');
});
it('shows reduce-to add-on when showReduceTo is true', () => {
render(
<QueryAddOns
query={baseQuery()}
version="v5"
isListViewPanel={false}
showReduceTo
panelType={PANEL_TYPES.TIME_SERIES}
index={0}
isForTraceOperator={false}
/>,
);
expect(screen.getByTestId('query-add-on-reduce_to')).toBeInTheDocument();
});
it('auto-opens reduce-to content when reduceTo is set', () => {
render(
<QueryAddOns
query={baseQuery({ reduceTo: 'sum' })}
version="v5"
isListViewPanel={false}
showReduceTo
panelType={PANEL_TYPES.TIME_SERIES}
index={0}
isForTraceOperator={false}
/>,
);
expect(screen.getByTestId('reduce-to-content')).toBeInTheDocument();
});
it('calls handleSetQueryData when reduce-to value changes', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
const query = baseQuery({
reduceTo: 'avg',
aggregations: [{ id: 'a', operator: 'count', reduceTo: 'avg' }],
});
render(
<QueryAddOns
query={query}
version="v5"
isListViewPanel={false}
showReduceTo
panelType={PANEL_TYPES.TIME_SERIES}
index={0}
isForTraceOperator={false}
/>,
);
// Wait for the reduce-to content section to be visible (it auto-opens when reduceTo is set)
await waitFor(() => {
expect(screen.getByTestId('reduce-to-content')).toBeInTheDocument();
});
// Get the Select component by its role (combobox)
// The Select is within the reduce-to-content section
const reduceToContent = screen.getByTestId('reduce-to-content');
const selectCombobox = within(reduceToContent).getByRole('combobox');
// Open the dropdown by clicking on the combobox
await user.click(selectCombobox);
// Wait for the dropdown listbox to appear
await screen.findByRole('listbox');
// Find and click the "Sum" option
const sumOption = await screen.findByText('Sum of values in timeframe');
await user.click(sumOption);
// Verify the handler was called with the correct value
await waitFor(() => {
expect(mockHandleSetQueryData).toHaveBeenCalledWith(0, {
...query,
aggregations: [
{
...(query.aggregations?.[0] as any),
reduceTo: 'sum',
},
],
});
});
});
});

View File

@@ -1,11 +1,18 @@
import './styles.scss';
import { Select } from 'antd';
import { WarningFilled } from '@ant-design/icons';
import { Select, Tooltip } from 'antd';
import { DefaultOptionType } from 'antd/es/select';
import classNames from 'classnames';
import { useMemo } from 'react';
import { UniversalYAxisUnitMappings } from './constants';
import { UniversalYAxisUnit, YAxisUnitSelectorProps } from './types';
import { getYAxisCategories, mapMetricUnitToUniversalUnit } from './utils';
import {
getUniversalNameFromMetricUnit,
getYAxisCategories,
mapMetricUnitToUniversalUnit,
} from './utils';
function YAxisUnitSelector({
value,
@@ -14,9 +21,24 @@ function YAxisUnitSelector({
loading = false,
'data-testid': dataTestId,
source,
initialValue,
}: YAxisUnitSelectorProps): JSX.Element {
const universalUnit = mapMetricUnitToUniversalUnit(value);
const incompatibleUnitMessage = useMemo(() => {
if (!initialValue || !value || loading) return '';
const initialUniversalUnit = mapMetricUnitToUniversalUnit(initialValue);
const currentUniversalUnit = mapMetricUnitToUniversalUnit(value);
if (initialUniversalUnit !== currentUniversalUnit) {
const initialUniversalUnitName = getUniversalNameFromMetricUnit(
initialValue,
);
const currentUniversalUnitName = getUniversalNameFromMetricUnit(value);
return `Unit mismatch. Saved unit is ${initialUniversalUnitName}, but ${currentUniversalUnitName} is selected.`;
}
return '';
}, [initialValue, value, loading]);
const handleSearch = (
searchTerm: string,
currentOption: DefaultOptionType | undefined,
@@ -49,6 +71,16 @@ function YAxisUnitSelector({
placeholder={placeholder}
filterOption={(input, option): boolean => handleSearch(input, option)}
loading={loading}
suffixIcon={
incompatibleUnitMessage ? (
<Tooltip title={incompatibleUnitMessage}>
<WarningFilled />
</Tooltip>
) : undefined
}
className={classNames({
'warning-state': incompatibleUnitMessage,
})}
data-testid={dataTestId}
>
{categories.map((category) => (

View File

@@ -91,4 +91,36 @@ describe('YAxisUnitSelector', () => {
expect(screen.getByText('Bytes (B)')).toBeInTheDocument();
expect(screen.getByText('Seconds (s)')).toBeInTheDocument();
});
it('shows warning message when incompatible unit is selected', () => {
render(
<YAxisUnitSelector
source={YAxisSource.ALERTS}
value="By"
onChange={mockOnChange}
initialValue="s"
/>,
);
const warningIcon = screen.getByLabelText('warning');
expect(warningIcon).toBeInTheDocument();
fireEvent.mouseOver(warningIcon);
return screen
.findByText(
'Unit mismatch. Saved unit is Seconds (s), but Bytes (B) is selected.',
)
.then((el) => expect(el).toBeInTheDocument());
});
it('does not show warning message when compatible unit is selected', () => {
render(
<YAxisUnitSelector
source={YAxisSource.ALERTS}
value="s"
onChange={mockOnChange}
initialValue="s"
/>,
);
const warningIcon = screen.queryByLabelText('warning');
expect(warningIcon).not.toBeInTheDocument();
});
});

View File

@@ -3,3 +3,13 @@
width: 220px;
}
}
.warning-state {
.ant-select-selector {
border-color: var(--bg-amber-400) !important;
}
.anticon {
color: var(--bg-amber-400) !important;
}
}

View File

@@ -6,6 +6,7 @@ export interface YAxisUnitSelectorProps {
disabled?: boolean;
'data-testid'?: string;
source: YAxisSource;
initialValue?: string;
}
export enum UniversalYAxisUnit {

View File

@@ -55,6 +55,7 @@ export const REACT_QUERY_KEY = {
GET_METRIC_DETAILS: 'GET_METRIC_DETAILS',
GET_RELATED_METRICS: 'GET_RELATED_METRICS',
GET_INSPECT_METRICS_DETAILS: 'GET_INSPECT_METRICS_DETAILS',
GET_METRIC_METADATA: 'GET_METRIC_METADATA',
// Traces Funnels Query Keys
GET_DOMAINS_LIST: 'GET_DOMAINS_LIST',

View File

@@ -5,9 +5,11 @@ import { useCreateAlertState } from 'container/CreateAlertV2/context';
import ChartPreviewComponent from 'container/FormAlertRules/ChartPreview';
import PlotTag from 'container/NewWidget/LeftContainer/WidgetGraph/PlotTag';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { useState } from 'react';
import useGetYAxisUnit from 'hooks/useGetYAxisUnit';
import { useEffect, useState } from 'react';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
import { AlertTypes } from 'types/api/alerts/alertTypes';
import { AlertDef } from 'types/api/alerts/def';
import { EQueryType } from 'types/common/dashboard';
import { GlobalReducer } from 'types/reducer/globalTime';
@@ -18,7 +20,13 @@ export interface ChartPreviewProps {
function ChartPreview({ alertDef }: ChartPreviewProps): JSX.Element {
const { currentQuery, panelType, stagedQuery } = useQueryBuilder();
const { thresholdState, alertState, setAlertState } = useCreateAlertState();
const {
alertType,
thresholdState,
alertState,
setAlertState,
isEditMode,
} = useCreateAlertState();
const { selectedTime: globalSelectedInterval } = useSelector<
AppState,
GlobalReducer
@@ -27,6 +35,25 @@ function ChartPreview({ alertDef }: ChartPreviewProps): JSX.Element {
const yAxisUnit = alertState.yAxisUnit || '';
const fetchYAxisUnit =
!isEditMode && alertType === AlertTypes.METRICS_BASED_ALERT;
const selectedQueryName = thresholdState.selectedQuery;
const { yAxisUnit: initialYAxisUnit, isLoading } = useGetYAxisUnit(
selectedQueryName,
{
enabled: fetchYAxisUnit,
},
);
// Every time a new metric is selected, set the y-axis unit to its unit value if present
// Only for metrics-based alerts
useEffect(() => {
if (fetchYAxisUnit) {
setAlertState({ type: 'SET_Y_AXIS_UNIT', payload: initialYAxisUnit });
}
}, [initialYAxisUnit, setAlertState, fetchYAxisUnit]);
const headline = (
<div className="chart-preview-headline">
<PlotTag
@@ -34,11 +61,13 @@ function ChartPreview({ alertDef }: ChartPreviewProps): JSX.Element {
panelType={panelType || PANEL_TYPES.TIME_SERIES}
/>
<YAxisUnitSelector
value={alertState.yAxisUnit}
value={yAxisUnit}
initialValue={initialYAxisUnit}
onChange={(value): void => {
setAlertState({ type: 'SET_Y_AXIS_UNIT', payload: value });
}}
source={YAxisSource.ALERTS}
loading={isLoading}
/>
</div>
);

View File

@@ -120,7 +120,6 @@ function FullView({
originalGraphType: selectedPanelType,
};
}
updatedQuery.builder.queryData[0].pageSize = 10;
return {
query: updatedQuery,
graphType: PANEL_TYPES.LIST,

View File

@@ -137,7 +137,6 @@ function GridCardGraph({
originalGraphType: widget.panelTypes,
};
}
updatedQuery.builder.queryData[0].pageSize = 10;
const initialDataSource = updatedQuery.builder.queryData[0].dataSource;
return {
query: updatedQuery,

View File

@@ -58,6 +58,27 @@
.explore-content {
padding: 0 8px;
.y-axis-unit-selector-container {
display: flex;
align-items: center;
gap: 10px;
padding-top: 10px;
margin-bottom: 10px;
.save-unit-container {
display: flex;
align-items: center;
gap: 10px;
.ant-btn {
border-radius: 2px;
.ant-typography {
font-size: 12px;
}
}
}
}
.ant-space {
margin-top: 10px;
margin-bottom: 20px;
@@ -75,6 +96,14 @@
.time-series-view {
min-width: 100%;
width: 100%;
position: relative;
.no-unit-warning {
position: absolute;
top: 30px;
right: 40px;
z-index: 1000;
}
}
.time-series-container {

View File

@@ -1,7 +1,7 @@
import './Explorer.styles.scss';
import * as Sentry from '@sentry/react';
import { Switch } from 'antd';
import { Switch, Tooltip } from 'antd';
import logEvent from 'api/common/logEvent';
import { QueryBuilderV2 } from 'components/QueryBuilderV2/QueryBuilderV2';
import WarningPopover from 'components/WarningPopover/WarningPopover';
@@ -25,10 +25,14 @@ import { generateExportToDashboardLink } from 'utils/dashboard/generateExportToD
import { v4 as uuid } from 'uuid';
import { MetricsExplorerEventKeys, MetricsExplorerEvents } from '../events';
// import QuerySection from './QuerySection';
import MetricDetails from '../MetricDetails/MetricDetails';
import TimeSeries from './TimeSeries';
import { ExplorerTabs } from './types';
import { splitQueryIntoOneChartPerQuery } from './utils';
import {
getMetricUnits,
splitQueryIntoOneChartPerQuery,
useGetMetrics,
} from './utils';
const ONE_CHART_PER_QUERY_ENABLED_KEY = 'isOneChartPerQueryEnabled';
@@ -40,6 +44,34 @@ function Explorer(): JSX.Element {
currentQuery,
} = useQueryBuilder();
const { safeNavigate } = useSafeNavigate();
const [isMetricDetailsOpen, setIsMetricDetailsOpen] = useState(false);
const metricNames = useMemo(() => {
const currentMetricNames: string[] = [];
stagedQuery?.builder.queryData.forEach((query) => {
if (query.aggregateAttribute?.key) {
currentMetricNames.push(query.aggregateAttribute?.key);
}
});
return currentMetricNames;
}, [stagedQuery]);
const {
metrics,
isLoading: isMetricUnitsLoading,
isError: isMetricUnitsError,
} = useGetMetrics(metricNames);
const units = useMemo(() => getMetricUnits(metrics), [metrics]);
const areAllMetricUnitsSame = useMemo(
() =>
!isMetricUnitsLoading &&
!isMetricUnitsError &&
units.length > 0 &&
units.every((unit) => unit && unit === units[0]),
[units, isMetricUnitsLoading, isMetricUnitsError],
);
const [searchParams, setSearchParams] = useSearchParams();
const isOneChartPerQueryEnabled =
@@ -48,7 +80,66 @@ function Explorer(): JSX.Element {
const [showOneChartPerQuery, toggleShowOneChartPerQuery] = useState(
isOneChartPerQueryEnabled,
);
const [disableOneChartPerQuery, toggleDisableOneChartPerQuery] = useState(
false,
);
const [selectedTab] = useState<ExplorerTabs>(ExplorerTabs.TIME_SERIES);
const [yAxisUnit, setYAxisUnit] = useState<string | undefined>();
const unitsLength = useMemo(() => units.length, [units]);
const firstUnit = useMemo(() => units?.[0], [units]);
useEffect(() => {
// Set the y axis unit to the first metric unit if
// 1. There is one metric unit and it is not empty
// 2. All metric units are the same and not empty
// Else, set the y axis unit to empty if
// 1. There are more than one metric units and they are not the same
// 2. There are no metric units
// 3. There is exactly one metric unit but it is empty/undefined
if (unitsLength === 0) {
setYAxisUnit(undefined);
} else if (unitsLength === 1 && firstUnit) {
setYAxisUnit(firstUnit);
} else if (unitsLength === 1 && !firstUnit) {
setYAxisUnit(undefined);
} else if (areAllMetricUnitsSame) {
if (firstUnit) {
setYAxisUnit(firstUnit);
} else {
setYAxisUnit(undefined);
}
} else if (unitsLength > 1 && !areAllMetricUnitsSame) {
setYAxisUnit(undefined);
}
}, [unitsLength, firstUnit, areAllMetricUnitsSame]);
useEffect(() => {
// Don't apply logic during loading to avoid overwriting user preferences
if (isMetricUnitsLoading) {
return;
}
// Disable one chart per query if -
// 1. There are more than one metric
// 2. The metric units are not the same
if (units.length > 1 && !areAllMetricUnitsSame) {
toggleShowOneChartPerQuery(true);
toggleDisableOneChartPerQuery(true);
} else if (units.length <= 1) {
toggleShowOneChartPerQuery(false);
toggleDisableOneChartPerQuery(true);
} else {
// When units are the same and loading is complete, restore URL-based preference
toggleShowOneChartPerQuery(isOneChartPerQueryEnabled);
toggleDisableOneChartPerQuery(false);
}
}, [
units,
areAllMetricUnitsSame,
isMetricUnitsLoading,
isOneChartPerQueryEnabled,
]);
const handleToggleShowOneChartPerQuery = (): void => {
toggleShowOneChartPerQuery(!showOneChartPerQuery);
@@ -68,15 +159,20 @@ function Explorer(): JSX.Element {
[updateAllQueriesOperators],
);
const exportDefaultQuery = useMemo(
() =>
updateAllQueriesOperators(
currentQuery || initialQueriesMap[DataSource.METRICS],
PANEL_TYPES.TIME_SERIES,
DataSource.METRICS,
),
[currentQuery, updateAllQueriesOperators],
);
const exportDefaultQuery = useMemo(() => {
const query = updateAllQueriesOperators(
currentQuery || initialQueriesMap[DataSource.METRICS],
PANEL_TYPES.TIME_SERIES,
DataSource.METRICS,
);
if (yAxisUnit && !query.unit) {
return {
...query,
unit: yAxisUnit,
};
}
return query;
}, [currentQuery, updateAllQueriesOperators, yAxisUnit]);
useShareBuilderUrl({ defaultValue: defaultQuery });
@@ -90,8 +186,16 @@ function Explorer(): JSX.Element {
const widgetId = uuid();
let query = queryToExport || exportDefaultQuery;
if (yAxisUnit && !query.unit) {
query = {
...query,
unit: yAxisUnit,
};
}
const dashboardEditView = generateExportToDashboardLink({
query: queryToExport || exportDefaultQuery,
query,
panelType: PANEL_TYPES.TIME_SERIES,
dashboardId: dashboard.id,
widgetId,
@@ -99,17 +203,33 @@ function Explorer(): JSX.Element {
safeNavigate(dashboardEditView);
},
[exportDefaultQuery, safeNavigate],
[exportDefaultQuery, safeNavigate, yAxisUnit],
);
const splitedQueries = useMemo(
() =>
splitQueryIntoOneChartPerQuery(
stagedQuery || initialQueriesMap[DataSource.METRICS],
metricNames,
units,
),
[stagedQuery],
[stagedQuery, metricNames, units],
);
const [selectedMetricName, setSelectedMetricName] = useState<string | null>(
null,
);
const handleOpenMetricDetails = (metricName: string): void => {
setIsMetricDetailsOpen(true);
setSelectedMetricName(metricName);
};
const handleCloseMetricDetails = (): void => {
setIsMetricDetailsOpen(false);
setSelectedMetricName(null);
};
useEffect(() => {
logEvent(MetricsExplorerEvents.TabChanged, {
[MetricsExplorerEventKeys.Tab]: 'explorer',
@@ -123,17 +243,44 @@ function Explorer(): JSX.Element {
const [warning, setWarning] = useState<Warning | undefined>(undefined);
const oneChartPerQueryDisabledTooltip = useMemo(() => {
if (splitedQueries.length <= 1) {
return 'One chart per query cannot be toggled for a single query.';
}
if (units.length <= 1) {
return 'One chart per query cannot be toggled when there is only one metric.';
}
if (disableOneChartPerQuery) {
return 'One chart per query cannot be disabled for multiple queries with different units.';
}
return undefined;
}, [disableOneChartPerQuery, splitedQueries.length, units.length]);
// Show the y axis unit selector if -
// 1. There is only one metric
// 2. The metric has no saved unit
const showYAxisUnitSelector = useMemo(
() => !isMetricUnitsLoading && units.length === 1 && !units[0],
[units, isMetricUnitsLoading],
);
return (
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
<div className="metrics-explorer-explore-container">
<div className="explore-header">
<div className="explore-header-left-actions">
<span>1 chart/query</span>
<Switch
checked={showOneChartPerQuery}
onChange={handleToggleShowOneChartPerQuery}
size="small"
/>
<Tooltip
open={disableOneChartPerQuery ? undefined : false}
title={oneChartPerQueryDisabledTooltip}
>
<Switch
checked={showOneChartPerQuery}
onChange={handleToggleShowOneChartPerQuery}
disabled={disableOneChartPerQuery || splitedQueries.length <= 1}
size="small"
/>
</Tooltip>
</div>
<div className="explore-header-right-actions">
{!isEmpty(warning) && <WarningPopover warningData={warning} />}
@@ -174,6 +321,16 @@ function Explorer(): JSX.Element {
<TimeSeries
showOneChartPerQuery={showOneChartPerQuery}
setWarning={setWarning}
areAllMetricUnitsSame={areAllMetricUnitsSame}
isMetricUnitsLoading={isMetricUnitsLoading}
isMetricUnitsError={isMetricUnitsError}
metricUnits={units}
metricNames={metricNames}
metrics={metrics}
handleOpenMetricDetails={handleOpenMetricDetails}
yAxisUnit={yAxisUnit}
setYAxisUnit={setYAxisUnit}
showYAxisUnitSelector={showYAxisUnitSelector}
/>
)}
{/* TODO: Enable once we have resolved all related metrics issues */}
@@ -187,9 +344,17 @@ function Explorer(): JSX.Element {
query={exportDefaultQuery}
sourcepage={DataSource.METRICS}
onExport={handleExport}
isOneChartPerQuery={false}
isOneChartPerQuery={showOneChartPerQuery}
splitedQueries={splitedQueries}
/>
{isMetricDetailsOpen && (
<MetricDetails
metricName={selectedMetricName}
isOpen={isMetricDetailsOpen}
onClose={handleCloseMetricDetails}
isModalTimeSelection={false}
/>
)}
</Sentry.ErrorBoundary>
);
}

View File

@@ -1,14 +1,18 @@
import { Color } from '@signozhq/design-tokens';
import { Tooltip, Typography } from 'antd';
import { isAxiosError } from 'axios';
import classNames from 'classnames';
import YAxisUnitSelector from 'components/YAxisUnitSelector';
import { YAxisSource } from 'components/YAxisUnitSelector/types';
import { ENTITY_VERSION_V5 } from 'constants/app';
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
import { BuilderUnitsFilter } from 'container/QueryBuilder/filters/BuilderUnitsFilter/BuilderUnits';
import TimeSeriesView from 'container/TimeSeriesView/TimeSeriesView';
import { convertDataValueToMs } from 'container/TimeSeriesView/utils';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { GetMetricQueryRange } from 'lib/dashboard/getQueryResults';
import { useMemo, useState } from 'react';
import { AlertTriangle } from 'lucide-react';
import { useMemo } from 'react';
import { useQueries } from 'react-query';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
@@ -24,6 +28,13 @@ import { splitQueryIntoOneChartPerQuery } from './utils';
function TimeSeries({
showOneChartPerQuery,
setWarning,
isMetricUnitsLoading,
metricUnits,
metricNames,
handleOpenMetricDetails,
yAxisUnit,
setYAxisUnit,
showYAxisUnitSelector,
}: TimeSeriesProps): JSX.Element {
const { stagedQuery, currentQuery } = useQueryBuilder();
@@ -56,13 +67,14 @@ function TimeSeries({
showOneChartPerQuery
? splitQueryIntoOneChartPerQuery(
stagedQuery || initialQueriesMap[DataSource.METRICS],
metricNames,
metricUnits,
)
: [stagedQuery || initialQueriesMap[DataSource.METRICS]],
[showOneChartPerQuery, stagedQuery],
// eslint-disable-next-line react-hooks/exhaustive-deps
[showOneChartPerQuery, stagedQuery, JSON.stringify(metricUnits)],
);
const [yAxisUnit, setYAxisUnit] = useState<string>('');
const queries = useQueries(
queryPayloads.map((payload, index) => ({
queryKey: [
@@ -126,32 +138,148 @@ function TimeSeries({
setYAxisUnit(value);
};
// TODO: Enable once we have resolved all related metrics v2 api issues
// Show the save unit button if
// 1. There is only one metric
// 2. The metric has no saved unit
// 3. The user has selected a unit
// const showSaveUnitButton = useMemo(
// () =>
// metricUnits.length === 1 &&
// Boolean(metrics?.[0]) &&
// !metricUnits[0] &&
// yAxisUnit,
// [metricUnits, metrics, yAxisUnit],
// );
// const {
// mutate: updateMetricMetadata,
// isLoading: isUpdatingMetricMetadata,
// } = useUpdateMetricMetadata();
// const handleSaveUnit = (): void => {
// updateMetricMetadata(
// {
// metricName: metricNames[0],
// payload: {
// unit: yAxisUnit,
// description: metrics[0]?.description ?? '',
// metricType: metrics[0]?.type as MetricType,
// temporality: metrics[0]?.temporality,
// },
// },
// {
// onSuccess: () => {
// notifications.success({
// message: 'Unit saved successfully',
// });
// queryClient.invalidateQueries([
// REACT_QUERY_KEY.GET_METRIC_DETAILS,
// metricNames[0],
// ]);
// },
// onError: () => {
// notifications.error({
// message: 'Failed to save unit',
// });
// },
// },
// );
// };
return (
<>
<BuilderUnitsFilter onChange={onUnitChangeHandler} yAxisUnit={yAxisUnit} />
<div className="y-axis-unit-selector-container">
{showYAxisUnitSelector && (
<>
<YAxisUnitSelector
onChange={onUnitChangeHandler}
value={yAxisUnit}
source={YAxisSource.EXPLORER}
data-testid="y-axis-unit-selector"
/>
{/* TODO: Enable once we have resolved all related metrics v2 api issues */}
{/* {showSaveUnitButton && (
<div className="save-unit-container">
<Typography.Text>
Save the selected unit for this metric?
</Typography.Text>
<Button
type="primary"
size="small"
disabled={isUpdatingMetricMetadata}
onClick={handleSaveUnit}
>
<Typography.Paragraph>Yes</Typography.Paragraph>
</Button>
</div>
)} */}
</>
)}
</div>
<div
className={classNames({
'time-series-container': changeLayoutForOneChartPerQuery,
})}
>
{responseData.map((datapoint, index) => (
<div
className="time-series-view"
// eslint-disable-next-line react/no-array-index-key
key={index}
>
<TimeSeriesView
isFilterApplied={false}
isError={queries[index].isError}
isLoading={queries[index].isLoading}
data={datapoint}
yAxisUnit={yAxisUnit}
dataSource={DataSource.METRICS}
error={queries[index].error as APIError}
setWarning={setWarning}
/>
</div>
))}
{responseData.map((datapoint, index) => {
const isQueryDataItem = index < metricNames.length;
const metricName = isQueryDataItem ? metricNames[index] : undefined;
const metricUnit = isQueryDataItem ? metricUnits[index] : undefined;
// Show the no unit warning if -
// 1. The metric query is not loading
// 2. The metric units are not loading
// 3. There are more than one metric
// 4. The current metric unit is empty
// 5. Is a queryData item
const isMetricUnitEmpty =
isQueryDataItem &&
!queries[index].isLoading &&
!isMetricUnitsLoading &&
metricUnits.length > 1 &&
!metricUnit &&
metricName;
const currentYAxisUnit = yAxisUnit || metricUnit;
return (
<div
className="time-series-view"
// eslint-disable-next-line react/no-array-index-key
key={index}
>
{isMetricUnitEmpty && metricName && (
<Tooltip
className="no-unit-warning"
title={
<Typography.Text>
This metric does not have a unit. Please set one for it in the{' '}
<Typography.Link
onClick={(): void => handleOpenMetricDetails(metricName)}
>
metric details
</Typography.Link>{' '}
page.
</Typography.Text>
}
>
<AlertTriangle size={16} color={Color.BG_AMBER_400} />
</Tooltip>
)}
<TimeSeriesView
isFilterApplied={false}
isError={queries[index].isError}
isLoading={queries[index].isLoading || isMetricUnitsLoading}
data={datapoint}
yAxisUnit={currentYAxisUnit}
dataSource={DataSource.METRICS}
error={queries[index].error as APIError}
setWarning={setWarning}
/>
</div>
);
})}
</div>
</>
);

View File

@@ -1,4 +1,6 @@
import { render, screen } from '@testing-library/react';
import { Temporality } from 'api/metricsExplorer/getMetricDetails';
import { MetricType } from 'api/metricsExplorer/getMetricsList';
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
import * as useOptionsMenuHooks from 'container/OptionsMenu';
import * as useUpdateDashboardHooks from 'hooks/dashboard/useUpdateDashboard';
@@ -12,13 +14,18 @@ import { MemoryRouter } from 'react-router-dom';
import { useSearchParams } from 'react-router-dom-v5-compat';
import store from 'store';
import { LicenseEvent } from 'types/api/licensesV3/getActive';
import { DataSource } from 'types/common/queryBuilder';
import { MetricMetadata } from 'types/api/metricsExplorer/v2/getMetricMetadata';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { DataSource, QueryBuilderContextType } from 'types/common/queryBuilder';
import Explorer from '../Explorer';
import * as useGetMetricsHooks from '../utils';
const mockSetSearchParams = jest.fn();
const queryClient = new QueryClient();
const mockUpdateAllQueriesOperators = jest.fn();
const mockUpdateAllQueriesOperators = jest
.fn()
.mockReturnValue(initialQueriesMap[DataSource.METRICS]);
const mockUseQueryBuilderData = {
handleRunQuery: jest.fn(),
stagedQuery: initialQueriesMap[DataSource.METRICS],
@@ -126,6 +133,30 @@ jest.spyOn(useQueryBuilderHooks, 'useQueryBuilder').mockReturnValue({
...mockUseQueryBuilderData,
} as any);
const Y_AXIS_UNIT_SELECTOR_TEST_ID = 'y-axis-unit-selector';
const mockMetric: MetricMetadata = {
type: MetricType.SUM,
description: 'metric1 description',
unit: 'metric1 unit',
temporality: Temporality.CUMULATIVE,
isMonotonic: true,
};
function renderExplorer(): void {
render(
<QueryClientProvider client={queryClient}>
<MemoryRouter>
<Provider store={store}>
<ErrorModalProvider>
<Explorer />
</ErrorModalProvider>
</Provider>
</MemoryRouter>
</QueryClientProvider>,
);
}
describe('Explorer', () => {
beforeEach(() => {
jest.clearAllMocks();
@@ -142,17 +173,7 @@ describe('Explorer', () => {
mockSetSearchParams,
]);
render(
<QueryClientProvider client={queryClient}>
<MemoryRouter>
<Provider store={store}>
<ErrorModalProvider>
<Explorer />
</ErrorModalProvider>
</Provider>
</MemoryRouter>
</QueryClientProvider>,
);
renderExplorer();
expect(mockUpdateAllQueriesOperators).toHaveBeenCalledWith(
initialQueriesMap[DataSource.METRICS],
@@ -166,18 +187,13 @@ describe('Explorer', () => {
new URLSearchParams({ isOneChartPerQueryEnabled: 'true' }),
mockSetSearchParams,
]);
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
isLoading: false,
isError: false,
metrics: [mockMetric, mockMetric],
});
render(
<QueryClientProvider client={queryClient}>
<MemoryRouter>
<Provider store={store}>
<ErrorModalProvider>
<Explorer />
</ErrorModalProvider>
</Provider>
</MemoryRouter>
</QueryClientProvider>,
);
renderExplorer();
const toggle = screen.getByRole('switch');
expect(toggle).toBeChecked();
@@ -188,20 +204,132 @@ describe('Explorer', () => {
new URLSearchParams({ isOneChartPerQueryEnabled: 'false' }),
mockSetSearchParams,
]);
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
isLoading: false,
isError: false,
metrics: [mockMetric, mockMetric],
});
render(
<QueryClientProvider client={queryClient}>
<MemoryRouter>
<Provider store={store}>
<ErrorModalProvider>
<Explorer />
</ErrorModalProvider>
</Provider>
</MemoryRouter>
</QueryClientProvider>,
);
renderExplorer();
const toggle = screen.getByRole('switch');
expect(toggle).not.toBeChecked();
});
it('should not render y axis unit selector for single metric which has a unit', () => {
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
isLoading: false,
isError: false,
metrics: [mockMetric],
});
renderExplorer();
const yAxisUnitSelector = screen.queryByTestId(Y_AXIS_UNIT_SELECTOR_TEST_ID);
expect(yAxisUnitSelector).not.toBeInTheDocument();
});
it('should not render y axis unit selector for mutliple metrics with same unit', () => {
(useSearchParams as jest.Mock).mockReturnValueOnce([
new URLSearchParams({ isOneChartPerQueryEnabled: 'true' }),
mockSetSearchParams,
]);
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
isLoading: false,
isError: false,
metrics: [mockMetric, mockMetric],
});
renderExplorer();
const yAxisUnitSelector = screen.queryByTestId(Y_AXIS_UNIT_SELECTOR_TEST_ID);
expect(yAxisUnitSelector).not.toBeInTheDocument();
});
it('should hide y axis unit selector for multiple metrics with different units', () => {
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
isLoading: false,
isError: false,
metrics: [mockMetric, mockMetric],
});
renderExplorer();
const yAxisUnitSelector = screen.queryByTestId(Y_AXIS_UNIT_SELECTOR_TEST_ID);
expect(yAxisUnitSelector).not.toBeInTheDocument();
// One chart per query toggle should be disabled
const oneChartPerQueryToggle = screen.getByRole('switch');
expect(oneChartPerQueryToggle).toBeDisabled();
});
it('should render empty y axis unit selector for a single metric with no unit', () => {
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
isLoading: false,
isError: false,
metrics: [
{
type: MetricType.SUM,
description: 'metric1 description',
unit: '',
temporality: Temporality.CUMULATIVE,
isMonotonic: true,
},
],
});
renderExplorer();
const yAxisUnitSelector = screen.queryByTestId(Y_AXIS_UNIT_SELECTOR_TEST_ID);
expect(yAxisUnitSelector).toBeInTheDocument();
expect(yAxisUnitSelector).toHaveTextContent('Please select a unit');
});
it('one chart per query should be off and disabled when there is only one query', () => {
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
isLoading: false,
isError: false,
metrics: [mockMetric],
});
renderExplorer();
const oneChartPerQueryToggle = screen.getByRole('switch');
expect(oneChartPerQueryToggle).not.toBeChecked();
expect(oneChartPerQueryToggle).toBeDisabled();
});
it('one chart per query should enabled by default when there are multiple metrics with the same unit', () => {
const mockQueryData = {
...initialQueriesMap[DataSource.METRICS].builder.queryData[0],
aggregateAttribute: {
...(initialQueriesMap[DataSource.METRICS].builder.queryData[0]
.aggregateAttribute as BaseAutocompleteData),
key: 'metric1',
},
};
const mockStagedQueryWithMultipleQueries = {
...initialQueriesMap[DataSource.METRICS],
builder: {
...initialQueriesMap[DataSource.METRICS].builder,
queryData: [mockQueryData, mockQueryData],
},
};
jest.spyOn(useQueryBuilderHooks, 'useQueryBuilder').mockReturnValue(({
...mockUseQueryBuilderData,
stagedQuery: mockStagedQueryWithMultipleQueries,
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
isLoading: false,
isError: false,
metrics: [mockMetric, mockMetric],
});
renderExplorer();
const oneChartPerQueryToggle = screen.getByRole('switch');
expect(oneChartPerQueryToggle).toBeEnabled();
});
});

View File

@@ -0,0 +1,180 @@
import { render, RenderResult, screen, waitFor } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { Temporality } from 'api/metricsExplorer/getMetricDetails';
import { MetricType } from 'api/metricsExplorer/getMetricsList';
import { UpdateMetricMetadataResponse } from 'api/metricsExplorer/updateMetricMetadata';
import * as useUpdateMetricMetadataHooks from 'hooks/metricsExplorer/useUpdateMetricMetadata';
import { UseUpdateMetricMetadataProps } from 'hooks/metricsExplorer/useUpdateMetricMetadata';
import { UseMutationResult } from 'react-query';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { MetricMetadata } from 'types/api/metricsExplorer/v2/getMetricMetadata';
import TimeSeries from '../TimeSeries';
import { TimeSeriesProps } from '../types';
type MockUpdateMetricMetadata = UseMutationResult<
SuccessResponse<UpdateMetricMetadataResponse> | ErrorResponse,
Error,
UseUpdateMetricMetadataProps
>;
const mockUpdateMetricMetadata = jest.fn();
jest
.spyOn(useUpdateMetricMetadataHooks, 'useUpdateMetricMetadata')
.mockReturnValue(({
mutate: mockUpdateMetricMetadata,
isLoading: false,
} as Partial<MockUpdateMetricMetadata>) as MockUpdateMetricMetadata);
jest.mock('container/TimeSeriesView/TimeSeriesView', () => ({
__esModule: true,
default: jest.fn().mockReturnValue(
<div role="img" aria-label="warning">
TimeSeriesView
</div>,
),
}));
jest.mock('react-query', () => ({
...jest.requireActual('react-query'),
useQueryClient: jest.fn().mockReturnValue({
invalidateQueries: jest.fn(),
}),
useQueries: jest.fn().mockImplementation((queries: any[]) =>
queries.map(() => ({
data: undefined,
isLoading: false,
isError: false,
error: undefined,
})),
),
}));
jest.mock('react-redux', () => ({
...jest.requireActual('react-redux'),
useSelector: jest.fn().mockReturnValue({
globalTime: {
selectedTime: '5min',
maxTime: 1713738000000,
minTime: 1713734400000,
},
}),
}));
const mockMetric: MetricMetadata = {
type: MetricType.SUM,
description: 'metric1 description',
unit: 'metric1 unit',
temporality: Temporality.CUMULATIVE,
isMonotonic: true,
};
const mockSetWarning = jest.fn();
const mockSetIsMetricDetailsOpen = jest.fn();
const mockSetYAxisUnit = jest.fn();
function renderTimeSeries(
overrides: Partial<TimeSeriesProps> = {},
): RenderResult {
return render(
<TimeSeries
showOneChartPerQuery={false}
setWarning={mockSetWarning}
areAllMetricUnitsSame={false}
isMetricUnitsLoading={false}
metricUnits={[]}
metricNames={[]}
metrics={[]}
isMetricUnitsError={false}
handleOpenMetricDetails={mockSetIsMetricDetailsOpen}
yAxisUnit="count"
setYAxisUnit={mockSetYAxisUnit}
showYAxisUnitSelector={false}
// eslint-disable-next-line react/jsx-props-no-spreading
{...overrides}
/>,
);
}
describe('TimeSeries', () => {
it('should render a warning icon when a metric has no unit among multiple metrics', () => {
const user = userEvent.setup();
const { container } = renderTimeSeries({
metricUnits: ['', 'count'],
metricNames: ['metric1', 'metric2'],
metrics: [undefined, undefined],
});
const alertIcon = container.querySelector('.no-unit-warning') as HTMLElement;
user.hover(alertIcon);
waitFor(() =>
expect(
screen.findByText('This metric does not have a unit'),
).toBeInTheDocument(),
);
});
it('clicking on warning icon tooltip should open metric details modal', async () => {
const user = userEvent.setup();
const { container } = renderTimeSeries({
metricUnits: ['', 'count'],
metricNames: ['metric1', 'metric2'],
metrics: [mockMetric, mockMetric],
yAxisUnit: 'seconds',
});
const alertIcon = container.querySelector('.no-unit-warning') as HTMLElement;
user.hover(alertIcon);
const metricDetailsLink = await screen.findByText('metric details');
user.click(metricDetailsLink);
waitFor(() =>
expect(mockSetIsMetricDetailsOpen).toHaveBeenCalledWith('metric1'),
);
});
// TODO: Unskip this test once the save unit button is implemented
// Tracking at - https://github.com/SigNoz/engineering-pod/issues/3495
it.skip('shows Save unit button when metric had no unit but one is selected', () => {
const { findByText, getByRole } = renderTimeSeries({
metricUnits: [undefined],
metricNames: ['metric1'],
metrics: [mockMetric],
yAxisUnit: 'seconds',
});
expect(
findByText('Save the selected unit for this metric?'),
).toBeInTheDocument();
const yesButton = getByRole('button', { name: 'Yes' });
expect(yesButton).toBeInTheDocument();
expect(yesButton).toBeEnabled();
});
// TODO: Unskip this test once the save unit button is implemented
// Tracking at - https://github.com/SigNoz/engineering-pod/issues/3495
it.skip('clicking on save unit button shoould upated metric metadata', () => {
const user = userEvent.setup();
const { getByRole } = renderTimeSeries({
metricUnits: [''],
metricNames: ['metric1'],
metrics: [mockMetric],
yAxisUnit: 'seconds',
});
const yesButton = getByRole('button', { name: /Yes/i });
user.click(yesButton);
expect(mockUpdateMetricMetadata).toHaveBeenCalledWith(
{
metricName: 'metric1',
payload: expect.objectContaining({ unit: 'seconds' }),
},
expect.objectContaining({
onSuccess: expect.any(Function),
onError: expect.any(Function),
}),
);
});
});

View File

@@ -0,0 +1,161 @@
import { renderHook } from '@testing-library/react';
import { Temporality } from 'api/metricsExplorer/getMetricDetails';
import { MetricType } from 'api/metricsExplorer/getMetricsList';
import { initialQueriesMap } from 'constants/queryBuilder';
import * as useGetMultipleMetricsHook from 'hooks/metricsExplorer/useGetMultipleMetrics';
import { UseQueryResult } from 'react-query';
import { SuccessResponseV2 } from 'types/api';
import {
MetricMetadata,
MetricMetadataResponse,
} from 'types/api/metricsExplorer/v2/getMetricMetadata';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import {
IBuilderFormula,
IBuilderQuery,
Query,
} from 'types/api/queryBuilder/queryBuilderData';
import { DataSource } from 'types/common/queryBuilder';
import {
getMetricUnits,
splitQueryIntoOneChartPerQuery,
useGetMetrics,
} from '../utils';
const MOCK_QUERY_DATA_1: IBuilderQuery = {
...initialQueriesMap[DataSource.METRICS].builder.queryData[0],
aggregateAttribute: {
...(initialQueriesMap[DataSource.METRICS].builder.queryData[0]
.aggregateAttribute as BaseAutocompleteData),
key: 'metric1',
},
};
const MOCK_QUERY_DATA_2: IBuilderQuery = {
...initialQueriesMap[DataSource.METRICS].builder.queryData[0],
aggregateAttribute: {
...(initialQueriesMap[DataSource.METRICS].builder.queryData[0]
.aggregateAttribute as BaseAutocompleteData),
key: 'metric2',
},
};
const MOCK_FORMULA_DATA: IBuilderFormula = {
expression: '1 + 1',
disabled: false,
queryName: 'Mock Formula',
legend: 'Mock Legend',
};
const MOCK_QUERY_WITH_MULTIPLE_QUERY_DATA: Query = {
...initialQueriesMap[DataSource.METRICS],
builder: {
...initialQueriesMap[DataSource.METRICS].builder,
queryData: [MOCK_QUERY_DATA_1, MOCK_QUERY_DATA_2],
queryFormulas: [MOCK_FORMULA_DATA, MOCK_FORMULA_DATA],
},
};
describe('splitQueryIntoOneChartPerQuery', () => {
it('should split a query with multiple queryData to multiple distinct queries, each with a single queryData', () => {
const result = splitQueryIntoOneChartPerQuery(
MOCK_QUERY_WITH_MULTIPLE_QUERY_DATA,
['metric1', 'metric2'],
[undefined, 'unit2'],
);
expect(result).toHaveLength(4);
// Verify query 1 has the correct data
expect(result[0].builder.queryData).toHaveLength(1);
expect(result[0].builder.queryData[0]).toEqual(MOCK_QUERY_DATA_1);
expect(result[0].builder.queryFormulas).toHaveLength(0);
expect(result[0].unit).toBeUndefined();
// Verify query 2 has the correct data
expect(result[1].builder.queryData).toHaveLength(1);
expect(result[1].builder.queryData[0]).toEqual(MOCK_QUERY_DATA_2);
expect(result[1].builder.queryFormulas).toHaveLength(0);
expect(result[1].unit).toBe('unit2');
// Verify query 3 has the correct data
expect(result[2].builder.queryFormulas).toHaveLength(1);
expect(result[2].builder.queryFormulas[0]).toEqual(MOCK_FORMULA_DATA);
expect(result[2].builder.queryData).toHaveLength(2); // 2 disabled queries
expect(result[2].builder.queryData[0].disabled).toBe(true);
expect(result[2].builder.queryData[1].disabled).toBe(true);
expect(result[2].unit).toBeUndefined();
// Verify query 4 has the correct data
expect(result[3].builder.queryFormulas).toHaveLength(1);
expect(result[3].builder.queryFormulas[0]).toEqual(MOCK_FORMULA_DATA);
expect(result[3].builder.queryData).toHaveLength(2); // 2 disabled queries
expect(result[3].builder.queryData[0].disabled).toBe(true);
expect(result[3].builder.queryData[1].disabled).toBe(true);
expect(result[3].unit).toBeUndefined();
});
});
const MOCK_METRIC_METADATA: MetricMetadata = {
description: 'Metric 1 description',
unit: 'unit1',
type: MetricType.GAUGE,
temporality: Temporality.DELTA,
isMonotonic: true,
};
describe('useGetMetrics', () => {
beforeEach(() => {
jest
.spyOn(useGetMultipleMetricsHook, 'useGetMultipleMetrics')
.mockReturnValue([
({
isLoading: false,
isError: false,
data: {
httpStatusCode: 200,
data: {
status: 'success',
data: MOCK_METRIC_METADATA,
},
},
} as Partial<
UseQueryResult<SuccessResponseV2<MetricMetadataResponse>, Error>
>) as UseQueryResult<SuccessResponseV2<MetricMetadataResponse>, Error>,
]);
});
it('should return the correct metrics data', () => {
const { result } = renderHook(() => useGetMetrics(['metric1']));
expect(result.current.metrics).toHaveLength(1);
expect(result.current.metrics[0]).toBeDefined();
expect(result.current.metrics[0]).toEqual(MOCK_METRIC_METADATA);
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
});
it('should return array of undefined values of correct length when metrics data is not yet loaded', () => {
jest
.spyOn(useGetMultipleMetricsHook, 'useGetMultipleMetrics')
.mockReturnValue([
({
isLoading: true,
isError: false,
} as Partial<
UseQueryResult<SuccessResponseV2<MetricMetadataResponse>, Error>
>) as UseQueryResult<SuccessResponseV2<MetricMetadataResponse>, Error>,
]);
const { result } = renderHook(() => useGetMetrics(['metric1']));
expect(result.current.metrics).toHaveLength(1);
expect(result.current.metrics[0]).toBeUndefined();
});
});
describe('getMetricUnits', () => {
it('should return the same unit for units that are not known to the universal unit mapper', () => {
const result = getMetricUnits([MOCK_METRIC_METADATA]);
expect(result).toHaveLength(1);
expect(result[0]).toEqual(MOCK_METRIC_METADATA.unit);
});
it('should return universal unit for units that are known to the universal unit mapper', () => {
const result = getMetricUnits([{ ...MOCK_METRIC_METADATA, unit: 'seconds' }]);
expect(result).toHaveLength(1);
expect(result[0]).toBe('s');
});
});

View File

@@ -3,6 +3,7 @@ import { Dispatch, SetStateAction } from 'react';
import { UseQueryResult } from 'react-query';
import { SuccessResponse, Warning } from 'types/api';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import { MetricMetadata } from 'types/api/metricsExplorer/v2/getMetricMetadata';
export enum ExplorerTabs {
TIME_SERIES = 'time-series',
@@ -12,6 +13,16 @@ export enum ExplorerTabs {
export interface TimeSeriesProps {
showOneChartPerQuery: boolean;
setWarning: Dispatch<SetStateAction<Warning | undefined>>;
areAllMetricUnitsSame: boolean;
isMetricUnitsLoading: boolean;
isMetricUnitsError: boolean;
metricUnits: (string | undefined)[];
metricNames: string[];
metrics: (MetricMetadata | undefined)[];
handleOpenMetricDetails: (metricName: string) => void;
yAxisUnit: string | undefined;
setYAxisUnit: (unit: string) => void;
showYAxisUnitSelector: boolean;
}
export interface RelatedMetricsProps {

View File

@@ -1,20 +1,40 @@
import { mapMetricUnitToUniversalUnit } from 'components/YAxisUnitSelector/utils';
import { useGetMultipleMetrics } from 'hooks/metricsExplorer/useGetMultipleMetrics';
import { MetricMetadata } from 'types/api/metricsExplorer/v2/getMetricMetadata';
import { Query } from 'types/api/queryBuilder/queryBuilderData';
import { v4 as uuid } from 'uuid';
export const splitQueryIntoOneChartPerQuery = (query: Query): Query[] => {
/**
* Split a query with multiple queryData to multiple distinct queries, each with a single queryData.
* @param query - The query to split
* @param units - The units of the metrics, can be undefined if the metric has no unit
* @returns The split queries
*/
export const splitQueryIntoOneChartPerQuery = (
query: Query,
metricNames: string[],
units: (string | undefined)[],
): Query[] => {
const queries: Query[] = [];
query.builder.queryData.forEach((currentQuery) => {
const newQuery = {
...query,
id: uuid(),
builder: {
...query.builder,
queryData: [currentQuery],
queryFormulas: [],
},
};
queries.push(newQuery);
if (currentQuery.aggregateAttribute?.key) {
const metricIndex = metricNames.indexOf(
currentQuery.aggregateAttribute?.key,
);
const unit = metricIndex >= 0 ? units[metricIndex] : undefined;
const newQuery = {
...query,
id: uuid(),
builder: {
...query.builder,
queryData: [currentQuery],
queryFormulas: [],
},
unit,
};
queries.push(newQuery);
}
});
query.builder.queryFormulas.forEach((currentFormula) => {
@@ -35,3 +55,43 @@ export const splitQueryIntoOneChartPerQuery = (query: Query): Query[] => {
return queries;
};
/**
* Hook to get data for multiple metrics with a synchronous loading and error state
* @param metricNames - The names of the metrics to get
* @param isEnabled - Whether the hook is enabled
* @returns The loading state, the metrics data, and the error state
*/
export function useGetMetrics(
metricNames: string[],
isEnabled = true,
): {
isLoading: boolean;
isError: boolean;
metrics: (MetricMetadata | undefined)[];
} {
const metricsData = useGetMultipleMetrics(metricNames, {
enabled: metricNames.length > 0 && isEnabled,
});
return {
isLoading: metricsData.some((metric) => metric.isLoading),
metrics: metricsData
.map((metric) => metric.data?.data)
.map((data) => data?.data),
isError: metricsData.some((metric) => metric.isError),
};
}
/**
* To get the units of the metrics in the universal unit standard.
* If the unit is not known to the universal unit mapper, it will return the unit as is.
* @param metrics - The metrics to get the units for
* @returns The units of the metrics, can be undefined if the metric has no unit
*/
export function getMetricUnits(
metrics: (MetricMetadata | undefined)[],
): (string | undefined)[] {
return metrics
.map((metric) => metric?.unit)
.map((unit) => mapMetricUnitToUniversalUnit(unit) || undefined);
}

View File

@@ -131,8 +131,8 @@ function MetricDetails({
>
Open in Explorer
</Button>
{/* Show the based on the feature flag. Will remove before releasing the feature */}
{showInspectFeature && (
{/* Show the inspect button if the metric type is GAUGE */}
{showInspectFeature && openInspectModal && (
<Button
className="inspect-metrics-button"
aria-label="Inspect Metric"

View File

@@ -11,7 +11,7 @@ export interface MetricDetailsProps {
isOpen: boolean;
metricName: string | null;
isModalTimeSelection: boolean;
openInspectModal: (metricName: string) => void;
openInspectModal?: (metricName: string) => void;
}
export interface DashboardsAndAlertsPopoverProps {

View File

@@ -370,10 +370,6 @@ function NewWidget({
// this has been moved here from the left container
const [requestData, setRequestData] = useState<GetQueryResultsProps>(() => {
const updatedQuery = cloneDeep(stagedQuery || initialQueriesMap.metrics);
if (updatedQuery?.builder?.queryData?.[0]) {
updatedQuery.builder.queryData[0].pageSize = 10;
}
if (selectedWidget) {
if (selectedGraph === PANEL_TYPES.LIST) {
return {
@@ -419,16 +415,12 @@ function NewWidget({
useEffect(() => {
if (stagedQuery) {
setIsLoadingPanelData(false);
const updatedStagedQuery = cloneDeep(stagedQuery);
if (updatedStagedQuery?.builder?.queryData?.[0]) {
updatedStagedQuery.builder.queryData[0].pageSize = 10;
}
setRequestData((prev) => ({
...prev,
selectedTime: selectedTime.enum || prev.selectedTime,
globalSelectedInterval: customGlobalSelectedInterval,
graphType: getGraphType(selectedGraph || selectedWidget.panelTypes),
query: updatedStagedQuery,
query: stagedQuery,
fillGaps: selectedWidget.fillSpans || false,
isLogScale: selectedWidget.isLogScale || false,
formatForWeb:

View File

@@ -206,6 +206,10 @@
.ant-select-selector {
border-color: var(--bg-vanilla-300);
background: var(--bg-vanilla-300);
.ant-select-selection-item {
color: var(--text-ink-400);
}
}
.ant-input-number {

View File

@@ -1,7 +1,5 @@
import { Select } from 'antd';
import { ATTRIBUTE_TYPES, PANEL_TYPES } from 'constants/queryBuilder';
import { useEffect, useState } from 'react';
import { MetricAggregateOperator } from 'types/common/queryBuilder';
interface SpaceAggregationOptionsProps {
panelType: PANEL_TYPES | null;
@@ -22,39 +20,13 @@ export default function SpaceAggregationOptions({
operators,
qbVersion,
}: SpaceAggregationOptionsProps): JSX.Element {
const placeHolderText =
panelType === PANEL_TYPES.VALUE || qbVersion === 'v3' ? 'Sum' : 'Sum By';
const [defaultValue, setDefaultValue] = useState(
selectedValue || placeHolderText,
);
useEffect(() => {
if (!selectedValue) {
if (
aggregatorAttributeType === ATTRIBUTE_TYPES.HISTOGRAM ||
aggregatorAttributeType === ATTRIBUTE_TYPES.EXPONENTIAL_HISTOGRAM
) {
setDefaultValue(MetricAggregateOperator.P90);
onSelect(MetricAggregateOperator.P90);
} else if (aggregatorAttributeType === ATTRIBUTE_TYPES.SUM) {
setDefaultValue(MetricAggregateOperator.SUM);
onSelect(MetricAggregateOperator.SUM);
} else if (aggregatorAttributeType === ATTRIBUTE_TYPES.GAUGE) {
setDefaultValue(MetricAggregateOperator.AVG);
onSelect(MetricAggregateOperator.AVG);
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [aggregatorAttributeType]);
return (
<div
className="spaceAggregationOptionsContainer"
key={aggregatorAttributeType}
>
<Select
defaultValue={defaultValue}
defaultValue={selectedValue}
style={{ minWidth: '5.625rem' }}
disabled={disabled}
onChange={onSelect}

View File

@@ -0,0 +1,16 @@
.selectOptionContainer {
display: flex;
gap: 8px;
justify-content: space-between;
align-items: center;
overflow-x: auto;
&::-webkit-scrollbar {
width: 0.2rem;
height: 0.2rem;
}
}
.option-renderer-tooltip {
pointer-events: none;
}

View File

@@ -1,4 +1,4 @@
import './QueryBuilderSearch.styles.scss';
import './OptionRenderer.styles.scss';
import { Tooltip } from 'antd';
@@ -13,7 +13,11 @@ function OptionRenderer({
return (
<span className="option">
{type ? (
<Tooltip title={`${value}`} placement="topLeft">
<Tooltip
title={`${value}`}
placement="topLeft"
rootClassName="option-renderer-tooltip"
>
<div className="selectOptionContainer">
<div className="option-value">{value}</div>
<div className="option-meta-data-container">
@@ -29,7 +33,11 @@ function OptionRenderer({
</div>
</Tooltip>
) : (
<Tooltip title={label} placement="topLeft">
<Tooltip
title={label}
placement="topLeft"
rootClassName="option-renderer-tooltip"
>
<span>{label}</span>
</Tooltip>
)}

View File

@@ -5,19 +5,6 @@
gap: 12px;
}
.selectOptionContainer {
display: flex;
gap: 8px;
justify-content: space-between;
align-items: center;
overflow-x: auto;
&::-webkit-scrollbar {
width: 0.2rem;
height: 0.2rem;
}
}
.logs-popup {
&.hide-scroll {
.rc-virtual-list-holder {

View File

@@ -0,0 +1,88 @@
import { render, screen } from '@testing-library/react';
import { MetricType } from 'api/metricsExplorer/getMetricsList';
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
import { ReduceToFilter } from './ReduceToFilter';
const mockOnChange = jest.fn();
function baseQuery(overrides: Partial<IBuilderQuery> = {}): IBuilderQuery {
return {
dataSource: 'traces',
aggregations: [],
groupBy: [],
orderBy: [],
legend: '',
limit: null,
having: { expression: '' },
...overrides,
} as IBuilderQuery;
}
describe('ReduceToFilter', () => {
beforeEach(() => {
jest.clearAllMocks();
});
it('initializes with default avg when no reduceTo is set', () => {
render(<ReduceToFilter query={baseQuery()} onChange={mockOnChange} />);
expect(screen.getByTestId('reduce-to')).toBeInTheDocument();
expect(
screen.getByText('Average of values in timeframe'),
).toBeInTheDocument();
});
it('initializes from query.aggregations[0].reduceTo', () => {
render(
<ReduceToFilter
query={baseQuery({
aggregations: [{ reduceTo: 'sum' } as any],
aggregateAttribute: { key: 'test', type: MetricType.SUM },
})}
onChange={mockOnChange}
/>,
);
expect(screen.getByText('Sum of values in timeframe')).toBeInTheDocument();
});
it('initializes from query.reduceTo when aggregations[0].reduceTo is not set', () => {
render(
<ReduceToFilter
query={baseQuery({
reduceTo: 'max',
aggregateAttribute: { key: 'test', type: MetricType.GAUGE },
})}
onChange={mockOnChange}
/>,
);
expect(screen.getByText('Max of values in timeframe')).toBeInTheDocument();
});
it('updates to sum when aggregateAttribute.type is SUM', async () => {
const { rerender } = render(
<ReduceToFilter
query={baseQuery({
aggregateAttribute: { key: 'test', type: MetricType.GAUGE },
})}
onChange={mockOnChange}
/>,
);
rerender(
<ReduceToFilter
query={baseQuery({
aggregateAttribute: { key: 'test2', type: MetricType.SUM },
})}
onChange={mockOnChange}
/>,
);
const reduceToFilterText = (await screen.findByText(
'Sum of values in timeframe',
)) as HTMLElement;
expect(reduceToFilterText).toBeInTheDocument();
});
});

View File

@@ -1,6 +1,7 @@
import { Select } from 'antd';
import { MetricType } from 'api/metricsExplorer/getMetricsList';
import { REDUCE_TO_VALUES } from 'constants/queryBuilder';
import { memo } from 'react';
import { memo, useEffect, useRef, useState } from 'react';
import { MetricAggregation } from 'types/api/v5/queryRange';
// ** Types
import { ReduceOperators } from 'types/common/queryBuilder';
@@ -12,19 +13,46 @@ export const ReduceToFilter = memo(function ReduceToFilter({
query,
onChange,
}: ReduceToFilterProps): JSX.Element {
const reduceToValue =
(query.aggregations?.[0] as MetricAggregation)?.reduceTo || query.reduceTo;
const currentValue =
REDUCE_TO_VALUES.find((option) => option.value === reduceToValue) ||
REDUCE_TO_VALUES[0];
const isMounted = useRef<boolean>(false);
const [currentValue, setCurrentValue] = useState<
SelectOption<ReduceOperators, string>
>(REDUCE_TO_VALUES[2]); // default to avg
const handleChange = (
newValue: SelectOption<ReduceOperators, string>,
): void => {
setCurrentValue(newValue);
onChange(newValue.value);
};
useEffect(
() => {
if (!isMounted.current) {
const reduceToValue =
(query.aggregations?.[0] as MetricAggregation)?.reduceTo || query.reduceTo;
setCurrentValue(
REDUCE_TO_VALUES.find((option) => option.value === reduceToValue) ||
REDUCE_TO_VALUES[2],
);
isMounted.current = true;
return;
}
const aggregationAttributeType = query.aggregateAttribute?.type as
| MetricType
| undefined;
if (aggregationAttributeType === MetricType.SUM) {
handleChange(REDUCE_TO_VALUES[1]);
} else {
handleChange(REDUCE_TO_VALUES[2]);
}
},
// eslint-disable-next-line react-hooks/exhaustive-deps
[query.aggregateAttribute?.key],
);
return (
<Select
placeholder="Reduce to"

View File

@@ -0,0 +1,32 @@
import { getMetricMetadata } from 'api/metricsExplorer/v2/getMetricMetadata';
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
import { useQueries, UseQueryOptions, UseQueryResult } from 'react-query';
import { SuccessResponseV2 } from 'types/api';
import { MetricMetadataResponse } from 'types/api/metricsExplorer/v2/getMetricMetadata';
type QueryResult = UseQueryResult<
SuccessResponseV2<MetricMetadataResponse>,
Error
>;
type UseGetMultipleMetrics = (
metricNames: string[],
options?: UseQueryOptions<SuccessResponseV2<MetricMetadataResponse>, Error>,
headers?: Record<string, string>,
) => QueryResult[];
export const useGetMultipleMetrics: UseGetMultipleMetrics = (
metricNames,
options,
headers,
) =>
useQueries(
metricNames.map(
(metricName) =>
({
queryKey: [REACT_QUERY_KEY.GET_METRIC_METADATA, metricName],
queryFn: ({ signal }) => getMetricMetadata(metricName, signal, headers),
...options,
} as UseQueryOptions<SuccessResponseV2<MetricMetadataResponse>, Error>),
),
);

View File

@@ -5,7 +5,7 @@ import updateMetricMetadata, {
import { useMutation, UseMutationResult } from 'react-query';
import { ErrorResponse, SuccessResponse } from 'types/api';
interface UseUpdateMetricMetadataProps {
export interface UseUpdateMetricMetadataProps {
metricName: string;
payload: UpdateMetricMetadataProps;
}

View File

@@ -188,7 +188,7 @@ describe('useQueryBuilderOperations - Empty Aggregate Attribute Type', () => {
timeAggregation: MetricAggregateOperator.RATE,
metricName: 'new_sum_metric',
temporality: '',
spaceAggregation: '',
spaceAggregation: MetricAggregateOperator.SUM,
},
],
}),
@@ -239,7 +239,7 @@ describe('useQueryBuilderOperations - Empty Aggregate Attribute Type', () => {
timeAggregation: MetricAggregateOperator.RATE,
metricName: 'new_sum_metric',
temporality: '',
spaceAggregation: '',
spaceAggregation: MetricAggregateOperator.SUM,
},
],
}),
@@ -315,7 +315,7 @@ describe('useQueryBuilderOperations - Empty Aggregate Attribute Type', () => {
timeAggregation: MetricAggregateOperator.AVG,
metricName: 'new_gauge',
temporality: '',
spaceAggregation: '',
spaceAggregation: MetricAggregateOperator.AVG,
},
],
}),

View File

@@ -317,7 +317,7 @@ export const useQueryOperations: UseQueryOperations = ({
timeAggregation: MetricAggregateOperator.RATE,
metricName: newQuery.aggregateAttribute?.key || '',
temporality: '',
spaceAggregation: '',
spaceAggregation: MetricAggregateOperator.SUM,
},
];
} else if (newQuery.aggregateAttribute?.type === ATTRIBUTE_TYPES.GAUGE) {
@@ -326,7 +326,20 @@ export const useQueryOperations: UseQueryOperations = ({
timeAggregation: MetricAggregateOperator.AVG,
metricName: newQuery.aggregateAttribute?.key || '',
temporality: '',
spaceAggregation: '',
spaceAggregation: MetricAggregateOperator.AVG,
},
];
} else if (
newQuery.aggregateAttribute?.type === ATTRIBUTE_TYPES.HISTOGRAM ||
newQuery.aggregateAttribute?.type ===
ATTRIBUTE_TYPES.EXPONENTIAL_HISTOGRAM
) {
newQuery.aggregations = [
{
timeAggregation: '',
metricName: newQuery.aggregateAttribute?.key || '',
temporality: '',
spaceAggregation: MetricAggregateOperator.P90,
},
];
} else {

View File

@@ -0,0 +1,238 @@
import { renderHook } from '@testing-library/react';
import { UniversalYAxisUnit } from 'components/YAxisUnitSelector/types';
import { useGetMetrics } from 'container/MetricsExplorer/Explorer/utils';
import { MetricMetadata } from 'types/api/metricsExplorer/v2/getMetricMetadata';
import { Query } from 'types/api/queryBuilder/queryBuilderData';
import { EQueryType } from 'types/common/dashboard';
import { DataSource, QueryBuilderContextType } from 'types/common/queryBuilder';
import { useQueryBuilder } from './queryBuilder/useQueryBuilder';
import useGetYAxisUnit from './useGetYAxisUnit';
jest.mock('./queryBuilder/useQueryBuilder');
jest.mock('container/MetricsExplorer/Explorer/utils', () => ({
...jest.requireActual('container/MetricsExplorer/Explorer/utils'),
useGetMetrics: jest.fn(),
}));
const mockUseQueryBuilder = useQueryBuilder as jest.MockedFunction<
typeof useQueryBuilder
>;
const mockUseGetMetrics = useGetMetrics as jest.MockedFunction<
typeof useGetMetrics
>;
const MOCK_METRIC_1 = {
unit: UniversalYAxisUnit.BYTES,
} as MetricMetadata;
const MOCK_METRIC_2 = {
unit: UniversalYAxisUnit.SECONDS,
} as MetricMetadata;
const MOCK_METRIC_3 = {
unit: '',
} as MetricMetadata;
function createMockCurrentQuery(
queryType: EQueryType,
queryData: Query['builder']['queryData'] = [],
): Query {
return {
queryType,
promql: [],
builder: {
queryData,
queryFormulas: [],
queryTraceOperator: [],
},
clickhouse_sql: [],
id: 'test-id',
};
}
describe('useGetYAxisUnit', () => {
beforeEach(() => {
jest.clearAllMocks();
mockUseGetMetrics.mockReturnValue({
isLoading: false,
isError: false,
metrics: [],
});
mockUseQueryBuilder.mockReturnValue(({
currentQuery: undefined,
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
});
it('should return undefined yAxisUnit and not call useGetMetrics when currentQuery is null', async () => {
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBeUndefined();
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
expect(mockUseGetMetrics).toHaveBeenCalledWith([], false);
});
it('should return undefined yAxisUnit when queryType is PROM', async () => {
const mockCurrentQuery = createMockCurrentQuery(EQueryType.PROM);
mockUseQueryBuilder.mockReturnValueOnce(({
currentQuery: mockCurrentQuery,
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBeUndefined();
expect(mockUseGetMetrics).toHaveBeenCalledWith([], false);
});
it('should return undefined yAxisUnit when queryType is CLICKHOUSE', async () => {
const mockCurrentQuery = createMockCurrentQuery(EQueryType.CLICKHOUSE);
mockUseQueryBuilder.mockReturnValueOnce(({
currentQuery: mockCurrentQuery,
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBeUndefined();
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
expect(mockUseGetMetrics).toHaveBeenCalledWith([], false);
});
it('should return undefined yAxisUnit when dataSource is TRACES', async () => {
const mockCurrentQuery = createMockCurrentQuery(EQueryType.QUERY_BUILDER, [
{
dataSource: DataSource.TRACES,
aggregateAttribute: { key: 'trace_metric' },
} as Query['builder']['queryData'][0],
]);
mockUseQueryBuilder.mockReturnValueOnce(({
currentQuery: mockCurrentQuery,
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBeUndefined();
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
expect(mockUseGetMetrics).toHaveBeenCalledWith([], false);
});
it('should return undefined yAxisUnit when dataSource is LOGS', async () => {
const mockCurrentQuery = createMockCurrentQuery(EQueryType.QUERY_BUILDER, [
{
dataSource: DataSource.LOGS,
aggregateAttribute: { key: 'log_metric' },
} as Query['builder']['queryData'][number],
]);
mockUseQueryBuilder.mockReturnValueOnce(({
currentQuery: mockCurrentQuery,
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBeUndefined();
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
expect(mockUseGetMetrics).toHaveBeenCalledWith([], false);
});
it('should extract all metric names from queryData when no selected query name is provided', () => {
const mockCurrentQuery = createMockCurrentQuery(EQueryType.QUERY_BUILDER, [
{
dataSource: DataSource.METRICS,
aggregateAttribute: { key: 'metric1' },
queryName: 'query1',
} as Query['builder']['queryData'][number],
{
dataSource: DataSource.METRICS,
aggregateAttribute: { key: 'metric2' },
queryName: 'query2',
} as Query['builder']['queryData'][number],
]);
mockUseQueryBuilder.mockReturnValueOnce(({
stagedQuery: mockCurrentQuery,
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
renderHook(() => useGetYAxisUnit());
expect(mockUseGetMetrics).toHaveBeenCalledWith(['metric1', 'metric2'], true);
});
it('should extract metric name for the selected query only when one is provided', () => {
const mockCurrentQuery = createMockCurrentQuery(EQueryType.QUERY_BUILDER, [
{
dataSource: DataSource.METRICS,
aggregateAttribute: { key: 'metric1' },
queryName: 'query1',
} as Query['builder']['queryData'][number],
{
dataSource: DataSource.METRICS,
aggregateAttribute: { key: 'metric2' },
queryName: 'query2',
} as Query['builder']['queryData'][number],
]);
mockUseQueryBuilder.mockReturnValueOnce(({
stagedQuery: mockCurrentQuery,
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
renderHook(() => useGetYAxisUnit('query2'));
expect(mockUseGetMetrics).toHaveBeenCalledWith(['metric2'], true);
});
it('should return the unit when there is a single metric with a non-empty unit', async () => {
mockUseGetMetrics.mockReturnValue({
isLoading: false,
isError: false,
metrics: [MOCK_METRIC_1],
});
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBe(UniversalYAxisUnit.BYTES);
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
});
it('should return undefined when there is a single metric with no unit', async () => {
mockUseGetMetrics.mockReturnValue({
isLoading: false,
isError: false,
metrics: [MOCK_METRIC_3],
});
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBeUndefined();
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
});
it('should return the unit when all metrics have the same non-empty unit', async () => {
mockUseGetMetrics.mockReturnValue({
isLoading: false,
isError: false,
metrics: [MOCK_METRIC_1, MOCK_METRIC_1],
});
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBe(UniversalYAxisUnit.BYTES);
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
});
it('should return undefined when metrics have different units', async () => {
mockUseGetMetrics.mockReturnValueOnce({
isLoading: false,
isError: false,
metrics: [MOCK_METRIC_1, MOCK_METRIC_2],
});
const { result } = renderHook(() => useGetYAxisUnit());
expect(result.current.yAxisUnit).toBeUndefined();
expect(result.current.isLoading).toBe(false);
expect(result.current.isError).toBe(false);
});
});

View File

@@ -0,0 +1,108 @@
import {
getMetricUnits,
useGetMetrics,
} from 'container/MetricsExplorer/Explorer/utils';
import { useEffect, useMemo, useState } from 'react';
import { EQueryType } from 'types/common/dashboard';
import { DataSource } from 'types/common/queryBuilder';
import { useQueryBuilder } from './queryBuilder/useQueryBuilder';
interface UseGetYAxisUnitResult {
yAxisUnit: string | undefined;
isLoading: boolean;
isError: boolean;
}
/**
* Hook to get the y-axis unit for a given metrics-based query.
* @param selectedQueryName - The name of the query to get the y-axis unit for.
* @param params.enabled - Active state of the hook.
* @returns `{ yAxisUnit, isLoading, isError }` The y-axis unit, loading state, and error state
*/
function useGetYAxisUnit(
selectedQueryName?: string,
params: {
enabled?: boolean;
} = {
enabled: true,
},
): UseGetYAxisUnitResult {
const { stagedQuery } = useQueryBuilder();
const [yAxisUnit, setYAxisUnit] = useState<string | undefined>();
const metricNames: string[] | null = useMemo(() => {
// If the query type is not QUERY_BUILDER, return null
if (stagedQuery?.queryType !== EQueryType.QUERY_BUILDER) {
return null;
}
// If the data source is not METRICS, return null
const dataSource = stagedQuery?.builder?.queryData?.[0]?.dataSource;
if (dataSource !== DataSource.METRICS) {
return null;
}
const currentMetricNames: string[] = [];
// If a selected query name is provided, return the metric name for that query only
if (selectedQueryName) {
stagedQuery?.builder?.queryData?.forEach((query) => {
if (
query.queryName === selectedQueryName &&
query.aggregateAttribute?.key
) {
currentMetricNames.push(query.aggregateAttribute?.key);
}
});
return currentMetricNames.length ? currentMetricNames : null;
}
// Else, return all metric names
stagedQuery?.builder?.queryData?.forEach((query) => {
if (query.aggregateAttribute?.key) {
currentMetricNames.push(query.aggregateAttribute?.key);
}
});
return currentMetricNames.length ? currentMetricNames : null;
}, [
selectedQueryName,
stagedQuery?.builder?.queryData,
stagedQuery?.queryType,
]);
const { metrics, isLoading, isError } = useGetMetrics(
metricNames ?? [],
!!metricNames && params?.enabled,
);
const units = useMemo(() => getMetricUnits(metrics), [metrics]);
const areAllMetricUnitsSame = useMemo(
() => units.every((unit) => unit === units[0]),
[units],
);
useEffect(() => {
// If there are no metrics, set the y-axis unit to undefined
if (units.length === 0) {
setYAxisUnit(undefined);
// If there is one metric and it has a non-empty unit, set the y-axis unit to it
} else if (units.length === 1 && units[0] !== '') {
setYAxisUnit(units[0]);
// If all metrics have the same non-empty unit, set the y-axis unit to it
} else if (areAllMetricUnitsSame) {
if (units[0] !== '') {
setYAxisUnit(units[0]);
} else {
setYAxisUnit(undefined);
}
// If there is more than one metric and they have different units, set the y-axis unit to undefined
} else if (units.length > 1 && !areAllMetricUnitsSame) {
setYAxisUnit(undefined);
// If there is one metric and it has an empty unit, set the y-axis unit to undefined
} else if (units.length === 1 && units[0] === '') {
setYAxisUnit(undefined);
}
}, [units, areAllMetricUnitsSame]);
return { yAxisUnit, isLoading, isError };
}
export default useGetYAxisUnit;

View File

@@ -0,0 +1,15 @@
import { Temporality } from 'api/metricsExplorer/getMetricDetails';
import { MetricType } from 'api/metricsExplorer/getMetricsList';
export interface MetricMetadata {
description: string;
type: MetricType;
unit: string;
temporality: Temporality;
isMonotonic: boolean;
}
export interface MetricMetadataResponse {
status: string;
data: MetricMetadata;
}

View File

@@ -80,6 +80,17 @@ func parseFieldKeyRequest(r *http.Request) (*telemetrytypes.FieldKeySelector, er
name := r.URL.Query().Get("searchText")
if name != "" && fieldContext == telemetrytypes.FieldContextUnspecified {
parsedFieldKey := telemetrytypes.GetFieldKeyFromKeyText(name)
if parsedFieldKey.FieldContext != telemetrytypes.FieldContextUnspecified {
// Only apply inferred context if it is valid for the current signal
if isContextValidForSignal(parsedFieldKey.FieldContext, signal) {
name = parsedFieldKey.Name
fieldContext = parsedFieldKey.FieldContext
}
}
}
req = telemetrytypes.FieldKeySelector{
StartUnixMilli: startUnixMilli,
EndUnixMilli: endUnixMilli,
@@ -102,6 +113,16 @@ func parseFieldValueRequest(r *http.Request) (*telemetrytypes.FieldValueSelector
}
name := r.URL.Query().Get("name")
if name != "" && keySelector.FieldContext == telemetrytypes.FieldContextUnspecified {
parsedFieldKey := telemetrytypes.GetFieldKeyFromKeyText(name)
if parsedFieldKey.FieldContext != telemetrytypes.FieldContextUnspecified {
// Only apply inferred context if it is valid for the current signal
if isContextValidForSignal(parsedFieldKey.FieldContext, keySelector.Signal) {
name = parsedFieldKey.Name
keySelector.FieldContext = parsedFieldKey.FieldContext
}
}
}
keySelector.Name = name
existingQuery := r.URL.Query().Get("existingQuery")
value := r.URL.Query().Get("searchText")
@@ -121,3 +142,21 @@ func parseFieldValueRequest(r *http.Request) (*telemetrytypes.FieldValueSelector
return &req, nil
}
func isContextValidForSignal(ctx telemetrytypes.FieldContext, signal telemetrytypes.Signal) bool {
if ctx == telemetrytypes.FieldContextResource ||
ctx == telemetrytypes.FieldContextAttribute ||
ctx == telemetrytypes.FieldContextScope {
return true
}
switch signal.StringValue() {
case telemetrytypes.SignalLogs.StringValue():
return ctx == telemetrytypes.FieldContextLog || ctx == telemetrytypes.FieldContextBody
case telemetrytypes.SignalTraces.StringValue():
return ctx == telemetrytypes.FieldContextSpan || ctx == telemetrytypes.FieldContextEvent || ctx == telemetrytypes.FieldContextTrace
case telemetrytypes.SignalMetrics.StringValue():
return ctx == telemetrytypes.FieldContextMetric
}
return true
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/transition"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
@@ -57,11 +56,6 @@ func (handler *handler) Create(rw http.ResponseWriter, r *http.Request) {
return
}
dashboardMigrator := transition.NewDashboardMigrateV5(handler.providerSettings.Logger, nil, nil)
if req["version"] != "v5" {
dashboardMigrator.Migrate(ctx, req)
}
dashboard, err := handler.module.Create(ctx, orgID, claims.Email, valuer.MustNewUUID(claims.UserID), req)
if err != nil {
render.Error(rw, err)

View File

@@ -176,7 +176,7 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
onConflictClause := ""
if len(onConflictSetStmts) > 0 {
onConflictClause = fmt.Sprintf(
"conflict(id, provider, org_id) do update SET\n%s",
"conflict(id) do update SET\n%s",
strings.Join(onConflictSetStmts, ",\n"),
)
}

View File

@@ -520,7 +520,7 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
if _, ok := hostAttrs[record.HostName]; ok {
record.Meta = hostAttrs[record.HostName]
}
if osType, ok := record.Meta["os_type"]; ok {
if osType, ok := record.Meta[GetDotMetrics("os_type")]; ok {
record.OS = osType
}
record.Active = activeHosts[record.HostName]

View File

@@ -38,6 +38,7 @@ import (
"github.com/SigNoz/signoz/pkg/sharder/noopsharder"
"github.com/SigNoz/signoz/pkg/sharder/singlesharder"
"github.com/SigNoz/signoz/pkg/sqlmigration"
"github.com/SigNoz/signoz/pkg/sqlmigration/s100sqlmigration"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlschema/sqlitesqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
@@ -96,64 +97,9 @@ func NewSQLSchemaProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedMap[
func NewSQLMigrationProviderFactories(
sqlstore sqlstore.SQLStore,
sqlschema sqlschema.SQLSchema,
telemetryStore telemetrystore.TelemetryStore,
providerSettings factory.ProviderSettings,
) factory.NamedMap[factory.ProviderFactory[sqlmigration.SQLMigration, sqlmigration.Config]] {
return factory.MustNewNamedMap(
sqlmigration.NewAddDataMigrationsFactory(),
sqlmigration.NewAddOrganizationFactory(),
sqlmigration.NewAddPreferencesFactory(),
sqlmigration.NewAddDashboardsFactory(),
sqlmigration.NewAddSavedViewsFactory(),
sqlmigration.NewAddAgentsFactory(),
sqlmigration.NewAddPipelinesFactory(),
sqlmigration.NewAddIntegrationsFactory(),
sqlmigration.NewAddLicensesFactory(),
sqlmigration.NewAddPatsFactory(),
sqlmigration.NewModifyDatetimeFactory(),
sqlmigration.NewModifyOrgDomainFactory(),
sqlmigration.NewUpdateOrganizationFactory(sqlstore),
sqlmigration.NewAddAlertmanagerFactory(sqlstore),
sqlmigration.NewUpdateDashboardAndSavedViewsFactory(sqlstore),
sqlmigration.NewUpdatePatAndOrgDomainsFactory(sqlstore),
sqlmigration.NewUpdatePipelines(sqlstore),
sqlmigration.NewDropLicensesSitesFactory(sqlstore),
sqlmigration.NewUpdateInvitesFactory(sqlstore),
sqlmigration.NewUpdatePatFactory(sqlstore),
sqlmigration.NewUpdateAlertmanagerFactory(sqlstore),
sqlmigration.NewUpdatePreferencesFactory(sqlstore),
sqlmigration.NewUpdateApdexTtlFactory(sqlstore),
sqlmigration.NewUpdateResetPasswordFactory(sqlstore),
sqlmigration.NewUpdateRulesFactory(sqlstore),
sqlmigration.NewAddVirtualFieldsFactory(),
sqlmigration.NewUpdateIntegrationsFactory(sqlstore),
sqlmigration.NewUpdateOrganizationsFactory(sqlstore),
sqlmigration.NewDropGroupsFactory(sqlstore),
sqlmigration.NewCreateQuickFiltersFactory(sqlstore),
sqlmigration.NewUpdateQuickFiltersFactory(sqlstore),
sqlmigration.NewAuthRefactorFactory(sqlstore),
sqlmigration.NewUpdateLicenseFactory(sqlstore),
sqlmigration.NewMigratePATToFactorAPIKey(sqlstore),
sqlmigration.NewUpdateApiMonitoringFiltersFactory(sqlstore),
sqlmigration.NewAddKeyOrganizationFactory(sqlstore),
sqlmigration.NewAddTraceFunnelsFactory(sqlstore),
sqlmigration.NewUpdateDashboardFactory(sqlstore),
sqlmigration.NewDropFeatureSetFactory(),
sqlmigration.NewDropDeprecatedTablesFactory(),
sqlmigration.NewUpdateAgentsFactory(sqlstore),
sqlmigration.NewUpdateUsersFactory(sqlstore, sqlschema),
sqlmigration.NewUpdateUserInviteFactory(sqlstore, sqlschema),
sqlmigration.NewUpdateOrgDomainFactory(sqlstore, sqlschema),
sqlmigration.NewAddFactorIndexesFactory(sqlstore, sqlschema),
sqlmigration.NewQueryBuilderV5MigrationFactory(sqlstore, telemetryStore),
sqlmigration.NewAddMeterQuickFiltersFactory(sqlstore, sqlschema),
sqlmigration.NewUpdateTTLSettingForCustomRetentionFactory(sqlstore, sqlschema),
sqlmigration.NewAddRoutePolicyFactory(sqlstore, sqlschema),
sqlmigration.NewAddAuthTokenFactory(sqlstore, sqlschema),
sqlmigration.NewAddAuthzFactory(sqlstore, sqlschema),
sqlmigration.NewAddPublicDashboardsFactory(sqlstore, sqlschema),
sqlmigration.NewAddRoleFactory(sqlstore, sqlschema),
sqlmigration.NewUpdateAuthzFactory(sqlstore, sqlschema),
s100sqlmigration.NewV100Factory(sqlstore, sqlschema),
)
}

View File

@@ -45,8 +45,6 @@ func TestNewProviderFactories(t *testing.T) {
NewSQLMigrationProviderFactories(
sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual),
sqlschematest.New(map[string]*sqlschema.Table{}, map[string][]*sqlschema.UniqueConstraint{}, map[string]sqlschema.Index{}),
telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherEqual),
instrumentationtest.New().ToProviderSettings(),
)
})

View File

@@ -219,7 +219,7 @@ func New(
ctx,
providerSettings,
config.SQLMigration,
NewSQLMigrationProviderFactories(sqlstore, sqlschema, telemetrystore, providerSettings),
NewSQLMigrationProviderFactories(sqlstore, sqlschema),
)
if err != nil {
return nil, err

View File

@@ -1,49 +0,0 @@
package sqlmigration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addDataMigrations struct{}
func NewAddDataMigrationsFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_data_migrations"), newAddDataMigrations)
}
func newAddDataMigrations(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addDataMigrations{}, nil
}
func (migration *addDataMigrations) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addDataMigrations) Up(ctx context.Context, db *bun.DB) error {
// table:data_migrations
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:data_migrations"`
ID int `bun:"id,pk,autoincrement"`
Version string `bun:"version,unique,notnull,type:VARCHAR(255)"`
CreatedAt time.Time `bun:"created_at,notnull,default:current_timestamp"`
Succeeded bool `bun:"succeeded,notnull,default:false"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addDataMigrations) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,158 +0,0 @@
package sqlmigration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addOrganization struct{}
func NewAddOrganizationFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_organization"), newAddOrganization)
}
func newAddOrganization(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addOrganization{}, nil
}
func (migration *addOrganization) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addOrganization) Up(ctx context.Context, db *bun.DB) error {
// table:organizations
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:organizations"`
ID string `bun:"id,pk,type:text"`
Name string `bun:"name,type:text,notnull"`
CreatedAt int `bun:"created_at,notnull"`
IsAnonymous int `bun:"is_anonymous,notnull,default:0"`
HasOptedUpdates int `bun:"has_opted_updates,notnull,default:1"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:groups
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:groups"`
ID string `bun:"id,pk,type:text"`
Name string `bun:"name,type:text,notnull,unique"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:users
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:users"`
ID string `bun:"id,pk,type:text"`
Name string `bun:"name,type:text,notnull"`
Email string `bun:"email,type:text,notnull,unique"`
Password string `bun:"password,type:text,notnull"`
CreatedAt int `bun:"created_at,notnull"`
ProfilePictureURL string `bun:"profile_picture_url,type:text"`
GroupID string `bun:"group_id,type:text,notnull"`
OrgID string `bun:"org_id,type:text,notnull"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id")`).
ForeignKey(`("group_id") REFERENCES "groups" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:invites
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:invites"`
ID int `bun:"id,pk,autoincrement"`
Name string `bun:"name,type:text,notnull"`
Email string `bun:"email,type:text,notnull,unique"`
Token string `bun:"token,type:text,notnull"`
CreatedAt int `bun:"created_at,notnull"`
Role string `bun:"role,type:text,notnull"`
OrgID string `bun:"org_id,type:text,notnull"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:reset_password_request
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:reset_password_request"`
ID int `bun:"id,pk,autoincrement"`
Token string `bun:"token,type:text,notnull"`
UserID string `bun:"user_id,type:text,notnull"`
}{}).
ForeignKey(`("user_id") REFERENCES "users" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:user_flags
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:user_flags"`
UserID string `bun:"user_id,pk,type:text,notnull"`
Flags string `bun:"flags,type:text"`
}{}).
ForeignKey(`("user_id") REFERENCES "users" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:apdex_settings
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:apdex_settings"`
ServiceName string `bun:"service_name,pk,type:text"`
Threshold float64 `bun:"threshold,type:float,notnull"`
ExcludeStatusCodes string `bun:"exclude_status_codes,type:text,notnull"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:ingestion_keys
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:ingestion_keys"`
KeyId string `bun:"key_id,pk,type:text"`
Name string `bun:"name,type:text"`
CreatedAt time.Time `bun:"created_at,default:current_timestamp"`
IngestionKey string `bun:"ingestion_key,type:text,notnull"`
IngestionURL string `bun:"ingestion_url,type:text,notnull"`
DataRegion string `bun:"data_region,type:text,notnull"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addOrganization) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,63 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addPreferences struct{}
func NewAddPreferencesFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_preferences"), newAddPreferences)
}
func newAddPreferences(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addPreferences{}, nil
}
func (migration *addPreferences) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addPreferences) Up(ctx context.Context, db *bun.DB) error {
// table:user_preference
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:user_preference"`
PreferenceID string `bun:"preference_id,type:text,pk"`
PreferenceValue string `bun:"preference_value,type:text"`
UserID string `bun:"user_id,type:text,pk"`
}{}).
IfNotExists().
ForeignKey(`("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`).
Exec(ctx); err != nil {
return err
}
// table:org_preference
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:org_preference"`
PreferenceID string `bun:"preference_id,pk,type:text,notnull"`
PreferenceValue string `bun:"preference_value,type:text,notnull"`
OrgID string `bun:"org_id,pk,type:text,notnull"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE ON UPDATE CASCADE`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addPreferences) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,125 +0,0 @@
package sqlmigration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addDashboards struct{}
func NewAddDashboardsFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_dashboards"), newAddDashboards)
}
func newAddDashboards(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addDashboards{}, nil
}
func (migration *addDashboards) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addDashboards) Up(ctx context.Context, db *bun.DB) error {
// table:dashboards
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:dashboards"`
ID int `bun:"id,pk,autoincrement"`
UUID string `bun:"uuid,type:text,notnull,unique"`
CreatedAt time.Time `bun:"created_at,notnull"`
CreatedBy string `bun:"created_by,type:text,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
UpdatedBy string `bun:"updated_by,type:text,notnull"`
Data string `bun:"data,type:text,notnull"`
Locked int `bun:"locked,notnull,default:0"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:rules
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:rules"`
ID int `bun:"id,pk,autoincrement"`
CreatedAt time.Time `bun:"created_at,notnull"`
CreatedBy string `bun:"created_by,type:text,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
UpdatedBy string `bun:"updated_by,type:text,notnull"`
Deleted int `bun:"deleted,notnull,default:0"`
Data string `bun:"data,type:text,notnull"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:notification_channels
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:notification_channels"`
ID int `bun:"id,pk,autoincrement"`
CreatedAt time.Time `bun:"created_at,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
Name string `bun:"name,type:text,notnull,unique"`
Type string `bun:"type,type:text,notnull"`
Deleted int `bun:"deleted,notnull,default:0"`
Data string `bun:"data,type:text,notnull"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:planned_maintenance
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:planned_maintenance"`
ID int `bun:"id,pk,autoincrement"`
Name string `bun:"name,type:text,notnull"`
Description string `bun:"description,type:text"`
AlertIDs string `bun:"alert_ids,type:text"`
Schedule string `bun:"schedule,type:text,notnull"`
CreatedAt time.Time `bun:"created_at,notnull"`
CreatedBy string `bun:"created_by,type:text,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
UpdatedBy string `bun:"updated_by,type:text,notnull"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// table:ttl_status
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:ttl_status"`
ID int `bun:"id,pk,autoincrement"`
TransactionID string `bun:"transaction_id,type:text,notnull"`
CreatedAt time.Time `bun:"created_at,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
TableName string `bun:"table_name,type:text,notnull"`
TTL int `bun:"ttl,notnull,default:0"`
ColdStorageTTL int `bun:"cold_storage_ttl,notnull,default:0"`
Status string `bun:"status,type:text,notnull"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addDashboards) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,57 +0,0 @@
package sqlmigration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addSavedViews struct{}
func NewAddSavedViewsFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_saved_views"), newAddSavedViews)
}
func newAddSavedViews(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addSavedViews{}, nil
}
func (migration *addSavedViews) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addSavedViews) Up(ctx context.Context, db *bun.DB) error {
// table:saved_views op:create
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:saved_views"`
UUID string `bun:"uuid,pk,type:text"`
Name string `bun:"name,type:text,notnull"`
Category string `bun:"category,type:text,notnull"`
CreatedAt time.Time `bun:"created_at,notnull"`
CreatedBy string `bun:"created_by,type:text"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
UpdatedBy string `bun:"updated_by,type:text"`
SourcePage string `bun:"source_page,type:text,notnull"`
Tags string `bun:"tags,type:text"`
Data string `bun:"data,type:text,notnull"`
ExtraData string `bun:"extra_data,type:text"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addSavedViews) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,102 +0,0 @@
package sqlmigration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addAgents struct{}
func NewAddAgentsFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_agents"), newAddAgents)
}
func newAddAgents(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addAgents{}, nil
}
func (migration *addAgents) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addAgents) Up(ctx context.Context, db *bun.DB) error {
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:agents"`
AgentID string `bun:"agent_id,pk,type:text,unique"`
StartedAt time.Time `bun:"started_at,notnull"`
TerminatedAt time.Time `bun:"terminated_at"`
CurrentStatus string `bun:"current_status,type:text,notnull"`
EffectiveConfig string `bun:"effective_config,type:text,notnull"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:agent_config_versions"`
ID string `bun:"id,pk,type:text"`
CreatedBy string `bun:"created_by,type:text"`
CreatedAt time.Time `bun:"created_at,default:CURRENT_TIMESTAMP"`
UpdatedBy string `bun:"updated_by,type:text"`
UpdatedAt time.Time `bun:"updated_at,default:CURRENT_TIMESTAMP"`
Version int `bun:"version,default:1,unique:element_version_idx"`
Active int `bun:"active"`
IsValid int `bun:"is_valid"`
Disabled int `bun:"disabled"`
ElementType string `bun:"element_type,notnull,type:varchar(120),unique:element_version_idx"`
DeployStatus string `bun:"deploy_status,notnull,type:varchar(80),default:'DIRTY'"`
DeploySequence int `bun:"deploy_sequence"`
DeployResult string `bun:"deploy_result,type:text"`
LastHash string `bun:"last_hash,type:text"`
LastConfig string `bun:"last_config,type:text"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// add an index on the last_hash column
if _, err := db.NewCreateIndex().
Table("agent_config_versions").
Column("last_hash").
Index("idx_agent_config_versions_last_hash").
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:agent_config_elements"`
ID string `bun:"id,pk,type:text"`
CreatedBy string `bun:"created_by,type:text"`
CreatedAt time.Time `bun:"created_at,default:CURRENT_TIMESTAMP"`
UpdatedBy string `bun:"updated_by,type:text"`
UpdatedAt time.Time `bun:"updated_at,default:CURRENT_TIMESTAMP"`
ElementID string `bun:"element_id,type:text,notnull,unique:agent_config_elements_u1"`
ElementType string `bun:"element_type,type:varchar(120),notnull,unique:agent_config_elements_u1"`
VersionID string `bun:"version_id,type:text,notnull,unique:agent_config_elements_u1"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addAgents) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,55 +0,0 @@
package sqlmigration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addPipelines struct{}
func NewAddPipelinesFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_pipelines"), newAddPipelines)
}
func newAddPipelines(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addPipelines{}, nil
}
func (migration *addPipelines) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addPipelines) Up(ctx context.Context, db *bun.DB) error {
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:pipelines"`
ID string `bun:"id,pk,type:text"`
OrderID int `bun:"order_id"`
Enabled bool `bun:"enabled"`
CreatedBy string `bun:"created_by,type:text"`
CreatedAt time.Time `bun:"created_at,default:current_timestamp"`
Name string `bun:"name,type:varchar(400),notnull"`
Alias string `bun:"alias,type:varchar(20),notnull"`
Description string `bun:"description,type:text"`
Filter string `bun:"filter,type:text,notnull"`
ConfigJSON string `bun:"config_json,type:text"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addPipelines) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,79 +0,0 @@
package sqlmigration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addIntegrations struct{}
func NewAddIntegrationsFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_integrations"), newAddIntegrations)
}
func newAddIntegrations(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addIntegrations{}, nil
}
func (migration *addIntegrations) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addIntegrations) Up(ctx context.Context, db *bun.DB) error {
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:integrations_installed"`
IntegrationID string `bun:"integration_id,pk,type:text"`
ConfigJSON string `bun:"config_json,type:text"`
InstalledAt time.Time `bun:"installed_at,default:current_timestamp"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:cloud_integrations_accounts"`
CloudProvider string `bun:"cloud_provider,type:text,unique:cloud_provider_id"`
ID string `bun:"id,type:text,notnull,unique:cloud_provider_id"`
ConfigJSON string `bun:"config_json,type:text"`
CloudAccountID string `bun:"cloud_account_id,type:text"`
LastAgentReportJSON string `bun:"last_agent_report_json,type:text"`
CreatedAt time.Time `bun:"created_at,notnull,default:current_timestamp"`
RemovedAt time.Time `bun:"removed_at,type:timestamp"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:cloud_integrations_service_configs"`
CloudProvider string `bun:"cloud_provider,type:text,notnull,unique:service_cloud_provider_account"`
CloudAccountID string `bun:"cloud_account_id,type:text,notnull,unique:service_cloud_provider_account"`
ServiceID string `bun:"service_id,type:text,notnull,unique:service_cloud_provider_account"`
ConfigJSON string `bun:"config_json,type:text"`
CreatedAt time.Time `bun:"created_at,default:current_timestamp"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addIntegrations) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,92 +0,0 @@
package sqlmigration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addLicenses struct{}
func NewAddLicensesFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_licenses"), newAddLicenses)
}
func newAddLicenses(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addLicenses{}, nil
}
func (migration *addLicenses) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addLicenses) Up(ctx context.Context, db *bun.DB) error {
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:licenses"`
Key string `bun:"key,pk,type:text"`
CreatedAt time.Time `bun:"createdAt,default:current_timestamp"`
UpdatedAt time.Time `bun:"updatedAt,default:current_timestamp"`
PlanDetails string `bun:"planDetails,type:text"`
ActivationID string `bun:"activationId,type:text"`
ValidationMessage string `bun:"validationMessage,type:text"`
LastValidated time.Time `bun:"lastValidated,default:current_timestamp"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:sites"`
UUID string `bun:"uuid,pk,type:text"`
Alias string `bun:"alias,type:varchar(180),default:'PROD'"`
URL string `bun:"url,type:varchar(300)"`
CreatedAt time.Time `bun:"createdAt,default:current_timestamp"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:feature_status"`
Name string `bun:"name,pk,type:text"`
Active bool `bun:"active"`
Usage int `bun:"usage,default:0"`
UsageLimit int `bun:"usage_limit,default:0"`
Route string `bun:"route,type:text"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:licenses_v3"`
ID string `bun:"id,pk,type:text"`
Key string `bun:"key,type:text,notnull,unique"`
Data string `bun:"data,type:text"`
}{}).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addLicenses) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,74 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addPats struct{}
func NewAddPatsFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_pats"), newAddPats)
}
func newAddPats(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addPats{}, nil
}
func (migration *addPats) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addPats) Up(ctx context.Context, db *bun.DB) error {
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:org_domains"`
ID string `bun:"id,pk,type:text"`
OrgID string `bun:"org_id,type:text,notnull"`
Name string `bun:"name,type:varchar(50),notnull,unique"`
CreatedAt int `bun:"created_at,notnull"`
UpdatedAt int `bun:"updated_at"`
Data string `bun:"data,type:text,notnull"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:personal_access_tokens"`
ID int `bun:"id,pk,autoincrement"`
Role string `bun:"role,type:text,notnull,default:'ADMIN'"`
UserID string `bun:"user_id,type:text,notnull"`
Token string `bun:"token,type:text,notnull,unique"`
Name string `bun:"name,type:text,notnull"`
CreatedAt int `bun:"created_at,notnull,default:0"`
ExpiresAt int `bun:"expires_at,notnull,default:0"`
UpdatedAt int `bun:"updated_at,notnull,default:0"`
LastUsed int `bun:"last_used,notnull,default:0"`
Revoked bool `bun:"revoked,notnull,default:false"`
UpdatedByUserID string `bun:"updated_by_user_id,type:text,notnull,default:''"`
}{}).
ForeignKey(`("user_id") REFERENCES "users" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addPats) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,93 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type modifyDatetime struct{}
func NewModifyDatetimeFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("modify_datetime"), newModifyDatetime)
}
func newModifyDatetime(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &modifyDatetime{}, nil
}
func (migration *modifyDatetime) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *modifyDatetime) Up(ctx context.Context, db *bun.DB) error {
// only run this for old sqlite db
if db.Dialect().Name().String() != "sqlite" {
return nil
}
// begin transaction
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
tables := []string{"dashboards", "rules", "planned_maintenance", "ttl_status", "saved_views"}
columns := []string{"created_at", "updated_at"}
for _, table := range tables {
for _, column := range columns {
if err := modifyColumn(ctx, tx, table, column); err != nil {
return err
}
}
}
for _, column := range []string{"started_at", "terminated_at"} {
if err := modifyColumn(ctx, tx, "agents", column); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func modifyColumn(ctx context.Context, tx bun.Tx, table string, column string) error {
// rename old column
if _, err := tx.ExecContext(ctx, `ALTER TABLE `+table+` RENAME COLUMN `+column+` TO `+column+`_old`); err != nil {
return err
}
// cannot add not null constraint to the column
if _, err := tx.ExecContext(ctx, `ALTER TABLE `+table+` ADD COLUMN `+column+` TIMESTAMP`); err != nil {
return err
}
// update the new column with the value of the old column
if _, err := tx.ExecContext(ctx, `UPDATE `+table+` SET `+column+` = `+column+`_old`); err != nil {
return err
}
// drop the old column
if _, err := tx.ExecContext(ctx, `ALTER TABLE `+table+` DROP COLUMN `+column+`_old`); err != nil {
return err
}
return nil
}
func (migration *modifyDatetime) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,73 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/migrate"
)
type modifyOrgDomain struct{}
func NewModifyOrgDomainFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("modify_org_domain"), newModifyOrgDomain)
}
func newModifyOrgDomain(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &modifyOrgDomain{}, nil
}
func (migration *modifyOrgDomain) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *modifyOrgDomain) Up(ctx context.Context, db *bun.DB) error {
// only run this for old sqlite db
if db.Dialect().Name() != dialect.SQLite {
return nil
}
// begin transaction
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// rename old column
if _, err := tx.ExecContext(ctx, `ALTER TABLE org_domains RENAME COLUMN updated_at TO updated_at_old`); err != nil {
return err
}
if _, err := tx.ExecContext(ctx, `ALTER TABLE org_domains ADD COLUMN updated_at INTEGER`); err != nil {
return err
}
if _, err := tx.ExecContext(ctx, `UPDATE org_domains SET updated_at = CAST(updated_at_old AS INTEGER)`); err != nil {
return err
}
// drop the old column
if _, err := tx.ExecContext(ctx, `ALTER TABLE org_domains DROP COLUMN updated_at_old`); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *modifyOrgDomain) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,154 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateOrganization struct {
store sqlstore.SQLStore
}
func NewUpdateOrganizationFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_organization"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateOrganization(ctx, ps, c, sqlstore)
})
}
func newUpdateOrganization(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateOrganization{
store: store,
}, nil
}
func (migration *updateOrganization) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateOrganization) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// update apdex settings table
if err := updateApdexSettings(ctx, tx); err != nil {
return err
}
// drop user_flags table
if _, err := tx.NewDropTable().IfExists().Table("user_flags").Exec(ctx); err != nil {
return err
}
// add org id to groups table
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, "groups", "org_id"); err != nil {
return err
} else if !exists {
if _, err := tx.NewAddColumn().Table("groups").ColumnExpr("org_id TEXT").Exec(ctx); err != nil {
return err
}
}
// add created_at to groups table
for _, table := range []string{"groups"} {
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, table, "created_at"); err != nil {
return err
} else if !exists {
if _, err := tx.NewAddColumn().Table(table).ColumnExpr("created_at TIMESTAMP").Exec(ctx); err != nil {
return err
}
}
}
// add updated_at to organizations, users, groups table
for _, table := range []string{"organizations", "users", "groups"} {
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, table, "updated_at"); err != nil {
return err
} else if !exists {
if _, err := tx.NewAddColumn().Table(table).ColumnExpr("updated_at TIMESTAMP").Exec(ctx); err != nil {
return err
}
}
}
// since organizations, users has created_at as integer instead of timestamp
for _, table := range []string{"organizations", "users", "invites"} {
if err := migration.store.Dialect().IntToTimestamp(ctx, tx, table, "created_at"); err != nil {
return err
}
}
// migrate is_anonymous and has_opted_updates to boolean from int
for _, column := range []string{"is_anonymous", "has_opted_updates"} {
if err := migration.store.Dialect().IntToBoolean(ctx, tx, "organizations", column); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *updateOrganization) Down(ctx context.Context, db *bun.DB) error {
return nil
}
func updateApdexSettings(ctx context.Context, tx bun.Tx) error {
if _, err := tx.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:apdex_settings_new"`
OrgID string `bun:"org_id,pk,type:text"`
ServiceName string `bun:"service_name,pk,type:text"`
Threshold float64 `bun:"threshold,type:float,notnull"`
ExcludeStatusCodes string `bun:"exclude_status_codes,type:text,notnull"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// get org id from organizations table
var orgID string
if err := tx.QueryRowContext(ctx, `SELECT id FROM organizations LIMIT 1`).Scan(&orgID); err != nil && !errors.Is(err, sql.ErrNoRows) {
return err
}
if orgID != "" {
// copy old data
if _, err := tx.ExecContext(ctx, `INSERT INTO apdex_settings_new (org_id, service_name, threshold, exclude_status_codes) SELECT ?, service_name, threshold, exclude_status_codes FROM apdex_settings`, orgID); err != nil {
return err
}
}
// drop old table
if _, err := tx.NewDropTable().IfExists().Table("apdex_settings").Exec(ctx); err != nil {
return err
}
// rename new table to old table
if _, err := tx.ExecContext(ctx, `ALTER TABLE apdex_settings_new RENAME TO apdex_settings`); err != nil {
return err
}
return nil
}

View File

@@ -1,293 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"encoding/json"
"strconv"
"time"
"github.com/SigNoz/signoz/pkg/alertmanager/alertmanagerserver"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/alertmanagertypes"
"github.com/prometheus/alertmanager/config"
"github.com/tidwall/gjson"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addAlertmanager struct {
store sqlstore.SQLStore
}
func NewAddAlertmanagerFactory(store sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_alertmanager"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newAddAlertmanager(ctx, ps, c, store)
})
}
func newAddAlertmanager(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &addAlertmanager{
store: store,
}, nil
}
func (migration *addAlertmanager) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addAlertmanager) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, "notification_channels", "deleted"); err != nil {
return err
} else if exists {
if _, err := tx.
NewDropColumn().
Table("notification_channels").
ColumnExpr("deleted").
Exec(ctx); err != nil {
return err
}
}
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, "notification_channels", "org_id"); err != nil {
return err
} else if !exists {
if _, err := tx.
NewAddColumn().
Table("notification_channels").
ColumnExpr("org_id TEXT REFERENCES organizations(id) ON DELETE CASCADE").
Exec(ctx); err != nil {
return err
}
}
if _, err := tx.
NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:alertmanager_config"`
ID uint64 `bun:"id,pk,autoincrement"`
Config string `bun:"config,notnull,type:text"`
Hash string `bun:"hash,notnull,type:text"`
CreatedAt time.Time `bun:"created_at,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
OrgID string `bun:"org_id,notnull,unique"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
if _, err := tx.
NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:alertmanager_state"`
ID uint64 `bun:"id,pk,autoincrement"`
Silences string `bun:"silences,nullzero,type:text"`
NFLog string `bun:"nflog,nullzero,type:text"`
CreatedAt time.Time `bun:"created_at,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
OrgID string `bun:"org_id,notnull,unique"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
var orgID string
err = tx.
NewSelect().
ColumnExpr("id").
Table("organizations").
Limit(1).
Scan(ctx, &orgID)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil {
if err := migration.populateOrgIDInChannels(ctx, tx, orgID); err != nil {
return err
}
if err := migration.populateAlertmanagerConfig(ctx, tx, orgID); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *addAlertmanager) populateOrgIDInChannels(ctx context.Context, tx bun.Tx, orgID string) error {
if _, err := tx.
NewUpdate().
Table("notification_channels").
Set("org_id = ?", orgID).
Where("org_id IS NULL").
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addAlertmanager) populateAlertmanagerConfig(ctx context.Context, tx bun.Tx, orgID string) error {
var channels []*alertmanagertypes.Channel
err := tx.
NewSelect().
Model(&channels).
Where("org_id = ?", orgID).
Scan(ctx)
if err != nil {
return err
}
var receiversFromChannels []string
for _, channel := range channels {
receiversFromChannels = append(receiversFromChannels, channel.Name)
}
type matcher struct {
bun.BaseModel `bun:"table:rules"`
ID int `bun:"id,pk"`
Data string `bun:"data"`
}
matchers := []matcher{}
err = tx.
NewSelect().
Column("id", "data").
Model(&matchers).
Scan(ctx)
if err != nil {
return err
}
matchersMap := make(map[string][]string)
for _, matcher := range matchers {
receivers := gjson.Get(matcher.Data, "preferredChannels").Array()
for _, receiver := range receivers {
matchersMap[strconv.Itoa(matcher.ID)] = append(matchersMap[strconv.Itoa(matcher.ID)], receiver.String())
}
if len(receivers) == 0 {
matchersMap[strconv.Itoa(matcher.ID)] = append(matchersMap[strconv.Itoa(matcher.ID)], receiversFromChannels...)
}
}
for _, channel := range channels {
if err := migration.msTeamsChannelToMSTeamsV2Channel(channel); err != nil {
return err
}
}
config, err := alertmanagertypes.NewConfigFromChannels(alertmanagerserver.NewConfig().Global, alertmanagerserver.NewConfig().Route, channels, orgID)
if err != nil {
return err
}
for ruleID, receivers := range matchersMap {
err = config.CreateRuleIDMatcher(ruleID, receivers)
if err != nil {
return err
}
}
if _, err := tx.
NewInsert().
Model(config.StoreableConfig()).
On("CONFLICT (org_id) DO UPDATE").
Set("config = ?", config.StoreableConfig().Config).
Set("hash = ?", config.StoreableConfig().Hash).
Set("updated_at = ?", config.StoreableConfig().UpdatedAt).
Exec(ctx); err != nil {
return err
}
for _, channel := range channels {
if channel.Type == "msteamsv2" {
if _, err := tx.
NewUpdate().
Model(channel).
WherePK().
Exec(ctx); err != nil {
return err
}
}
}
return nil
}
func (migration *addAlertmanager) Down(ctx context.Context, db *bun.DB) error {
return nil
}
func (migration *addAlertmanager) msTeamsChannelToMSTeamsV2Channel(c *alertmanagertypes.Channel) error {
if c.Type != "msteams" {
return nil
}
receiver, err := alertmanagertypes.NewReceiver(c.Data)
if err != nil {
return err
}
receiver = migration.msTeamsReceiverToMSTeamsV2Receiver(receiver)
data, err := json.Marshal(receiver)
if err != nil {
return err
}
c.Type = "msteamsv2"
c.Data = string(data)
c.UpdatedAt = time.Now()
return nil
}
func (migration *addAlertmanager) msTeamsReceiverToMSTeamsV2Receiver(receiver alertmanagertypes.Receiver) alertmanagertypes.Receiver {
if receiver.MSTeamsConfigs == nil {
return receiver
}
var msTeamsV2Configs []*config.MSTeamsV2Config
for _, cfg := range receiver.MSTeamsConfigs {
msTeamsV2Configs = append(msTeamsV2Configs, &config.MSTeamsV2Config{
NotifierConfig: cfg.NotifierConfig,
HTTPConfig: cfg.HTTPConfig,
WebhookURL: cfg.WebhookURL,
WebhookURLFile: cfg.WebhookURLFile,
Title: cfg.Title,
Text: cfg.Text,
})
}
receiver.MSTeamsConfigs = nil
receiver.MSTeamsV2Configs = msTeamsV2Configs
return receiver
}

View File

@@ -1,81 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateDashboardAndSavedViews struct {
store sqlstore.SQLStore
}
func NewUpdateDashboardAndSavedViewsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_group"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateDashboardAndSavedViews(ctx, ps, c, sqlstore)
})
}
func newUpdateDashboardAndSavedViews(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateDashboardAndSavedViews{
store: store,
}, nil
}
func (migration *updateDashboardAndSavedViews) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateDashboardAndSavedViews) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// get all org ids
var orgIDs []string
if err := migration.store.BunDB().NewSelect().Model((*types.Organization)(nil)).Column("id").Scan(ctx, &orgIDs); err != nil {
return err
}
// add org id to dashboards table
for _, table := range []string{"dashboards", "saved_views"} {
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, table, "org_id"); err != nil {
return err
} else if !exists {
if _, err := tx.NewAddColumn().Table(table).ColumnExpr("org_id TEXT REFERENCES organizations(id) ON DELETE CASCADE").Exec(ctx); err != nil {
return err
}
// check if there is one org ID if yes then set it to all dashboards.
if len(orgIDs) == 1 {
orgID := orgIDs[0]
if _, err := tx.NewUpdate().Table(table).Set("org_id = ?", orgID).Where("org_id IS NULL").Exec(ctx); err != nil {
return err
}
}
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *updateDashboardAndSavedViews) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,133 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updatePatAndOrgDomains struct {
store sqlstore.SQLStore
}
func NewUpdatePatAndOrgDomainsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_pat_and_org_domains"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdatePatAndOrgDomains(ctx, ps, c, sqlstore)
})
}
func newUpdatePatAndOrgDomains(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updatePatAndOrgDomains{
store: store,
}, nil
}
func (migration *updatePatAndOrgDomains) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updatePatAndOrgDomains) Up(ctx context.Context, db *bun.DB) error {
// begin transaction
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// get all org ids
var orgIDs []string
if err := tx.NewSelect().Model((*types.Organization)(nil)).Column("id").Scan(ctx, &orgIDs); err != nil {
return err
}
// add org id to pat and org_domains table
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, "personal_access_tokens", "org_id"); err != nil {
return err
} else if !exists {
if _, err := tx.NewAddColumn().Table("personal_access_tokens").ColumnExpr("org_id TEXT REFERENCES organizations(id) ON DELETE CASCADE").Exec(ctx); err != nil {
return err
}
// check if there is one org ID if yes then set it to all personal_access_tokens.
if len(orgIDs) == 1 {
orgID := orgIDs[0]
if _, err := tx.NewUpdate().Table("personal_access_tokens").Set("org_id = ?", orgID).Where("org_id IS NULL").Exec(ctx); err != nil {
return err
}
}
}
if err := updateOrgId(ctx, tx); err != nil {
return err
}
// change created_at and updated_at from integer to timestamp
for _, table := range []string{"personal_access_tokens", "org_domains"} {
if err := migration.store.Dialect().IntToTimestamp(ctx, tx, table, "created_at"); err != nil {
return err
}
if err := migration.store.Dialect().IntToTimestamp(ctx, tx, table, "updated_at"); err != nil {
return err
}
}
// drop table if exists ingestion_keys
if _, err := tx.NewDropTable().IfExists().Table("ingestion_keys").Exec(ctx); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *updatePatAndOrgDomains) Down(ctx context.Context, db *bun.DB) error {
return nil
}
func updateOrgId(ctx context.Context, tx bun.Tx) error {
if _, err := tx.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:org_domains_new"`
ID string `bun:"id,pk,type:text"`
OrgID string `bun:"org_id,type:text,notnull"`
Name string `bun:"name,type:varchar(50),notnull,unique"`
CreatedAt int `bun:"created_at,notnull"`
UpdatedAt int `bun:"updated_at"`
Data string `bun:"data,type:text,notnull"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// copy data from org_domains to org_domains_new
if _, err := tx.ExecContext(ctx, `INSERT INTO org_domains_new (id, org_id, name, created_at, updated_at, data) SELECT id, org_id, name, created_at, updated_at, data FROM org_domains`); err != nil {
return err
}
// delete old table
if _, err := tx.NewDropTable().IfExists().Table("org_domains").Exec(ctx); err != nil {
return err
}
// rename new table to org_domains
if _, err := tx.ExecContext(ctx, `ALTER TABLE org_domains_new RENAME TO org_domains`); err != nil {
return err
}
return nil
}

View File

@@ -1,97 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updatePipelines struct {
store sqlstore.SQLStore
}
func NewUpdatePipelines(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_pipelines"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdatePipelines(ctx, ps, c, sqlstore)
})
}
func newUpdatePipelines(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updatePipelines{
store: store,
}, nil
}
func (migration *updatePipelines) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updatePipelines) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// get all org ids
var orgIDs []string
if err := migration.store.BunDB().NewSelect().Model((*types.Organization)(nil)).Column("id").Scan(ctx, &orgIDs); err != nil {
return err
}
// add org id to pipelines table
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, "pipelines", "org_id"); err != nil {
return err
} else if !exists {
if _, err := tx.NewAddColumn().Table("pipelines").ColumnExpr("org_id TEXT REFERENCES organizations(id) ON DELETE CASCADE").Exec(ctx); err != nil {
return err
}
// check if there is one org ID if yes then set it to all pipelines.
if len(orgIDs) == 1 {
orgID := orgIDs[0]
if _, err := tx.NewUpdate().Table("pipelines").Set("org_id = ?", orgID).Where("org_id IS NULL").Exec(ctx); err != nil {
return err
}
}
}
// add updated_by to pipelines table
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, "pipelines", "updated_by"); err != nil {
return err
} else if !exists {
if _, err := tx.NewAddColumn().Table("pipelines").ColumnExpr("updated_by TEXT").Exec(ctx); err != nil {
return err
}
}
// add updated_at to pipelines table
if exists, err := migration.store.Dialect().ColumnExists(ctx, tx, "pipelines", "updated_at"); err != nil {
return err
} else if !exists {
if _, err := tx.NewAddColumn().Table("pipelines").ColumnExpr("updated_at TIMESTAMP").Exec(ctx); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *updatePipelines) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,77 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type dropLicensesSites struct {
store sqlstore.SQLStore
}
func NewDropLicensesSitesFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("drop_licenses_sites"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newDropLicensesSites(ctx, ps, c, sqlstore)
})
}
func newDropLicensesSites(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &dropLicensesSites{store: store}, nil
}
func (migration *dropLicensesSites) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *dropLicensesSites) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
if _, err := tx.
NewDropTable().
IfExists().
Table("sites").
Exec(ctx); err != nil {
return err
}
if _, err := tx.
NewDropTable().
IfExists().
Table("licenses").
Exec(ctx); err != nil {
return err
}
_, err = migration.
store.
Dialect().
RenameColumn(ctx, tx, "saved_views", "uuid", "id")
if err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *dropLicensesSites) Down(context.Context, *bun.DB) error {
return nil
}

View File

@@ -1,135 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateInvites struct {
store sqlstore.SQLStore
}
type existingInvite struct {
bun.BaseModel `bun:"table:invites"`
OrgID string `bun:"org_id,type:text,notnull" json:"orgId"`
ID int `bun:"id,pk,autoincrement" json:"id"`
Name string `bun:"name,type:text,notnull" json:"name"`
Email string `bun:"email,type:text,notnull,unique" json:"email"`
Token string `bun:"token,type:text,notnull" json:"token"`
CreatedAt time.Time `bun:"created_at,notnull" json:"createdAt"`
Role string `bun:"role,type:text,notnull" json:"role"`
}
type newInvite struct {
bun.BaseModel `bun:"table:user_invite"`
types.Identifiable
types.TimeAuditable
Name string `bun:"name,type:text,notnull" json:"name"`
Email string `bun:"email,type:text,notnull,unique" json:"email"`
Token string `bun:"token,type:text,notnull" json:"token"`
Role string `bun:"role,type:text,notnull" json:"role"`
OrgID string `bun:"org_id,type:text,notnull" json:"orgId"`
}
func NewUpdateInvitesFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_invites"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateInvites(ctx, ps, c, sqlstore)
})
}
func newUpdateInvites(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateInvites{store: store}, nil
}
func (migration *updateInvites) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateInvites) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingInvite), new(newInvite), []string{OrgReference}, func(ctx context.Context) error {
existingInvites := make([]*existingInvite, 0)
err = tx.
NewSelect().
Model(&existingInvites).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingInvites) > 0 {
newInvites := migration.CopyOldInvitesToNewInvites(existingInvites)
_, err = tx.
NewInsert().
Model(&newInvites).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateInvites) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *updateInvites) CopyOldInvitesToNewInvites(existingInvites []*existingInvite) []*newInvite {
newInvites := make([]*newInvite, 0)
for _, invite := range existingInvites {
newInvites = append(newInvites, &newInvite{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: invite.CreatedAt,
UpdatedAt: time.Now(),
},
Name: invite.Name,
Email: invite.Email,
Token: invite.Token,
Role: invite.Role,
OrgID: invite.OrgID,
})
}
return newInvites
}

View File

@@ -1,76 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updatePat struct {
store sqlstore.SQLStore
}
func NewUpdatePatFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_pat"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdatePat(ctx, ps, c, sqlstore)
})
}
func newUpdatePat(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updatePat{store: store}, nil
}
func (migration *updatePat) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updatePat) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
for _, column := range []string{"last_used", "expires_at"} {
if err := migration.
store.
Dialect().
AddNotNullDefaultToColumn(ctx, tx, "personal_access_tokens", column, "INTEGER", "0"); err != nil {
return err
}
}
if err := migration.
store.
Dialect().
AddNotNullDefaultToColumn(ctx, tx, "personal_access_tokens", "revoked", "BOOLEAN", "false"); err != nil {
return err
}
if err := migration.
store.
Dialect().
AddNotNullDefaultToColumn(ctx, tx, "personal_access_tokens", "updated_by_user_id", "TEXT", "''"); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *updatePat) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,274 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateAlertmanager struct {
store sqlstore.SQLStore
}
type existingChannel struct {
bun.BaseModel `bun:"table:notification_channels"`
ID int `json:"id" bun:"id,pk,autoincrement"`
Name string `json:"name" bun:"name"`
Type string `json:"type" bun:"type"`
Data string `json:"data" bun:"data"`
CreatedAt time.Time `json:"created_at" bun:"created_at"`
UpdatedAt time.Time `json:"updated_at" bun:"updated_at"`
OrgID string `json:"org_id" bun:"org_id"`
}
type newChannel struct {
bun.BaseModel `bun:"table:notification_channel"`
types.Identifiable
types.TimeAuditable
Name string `json:"name" bun:"name"`
Type string `json:"type" bun:"type"`
Data string `json:"data" bun:"data"`
OrgID string `json:"org_id" bun:"org_id"`
}
type existingAlertmanagerConfig struct {
bun.BaseModel `bun:"table:alertmanager_config"`
ID uint64 `bun:"id,pk,autoincrement"`
Config string `bun:"config,notnull,type:text"`
Hash string `bun:"hash,notnull,type:text"`
CreatedAt time.Time `bun:"created_at,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
OrgID string `bun:"org_id,notnull,unique"`
}
type newAlertmanagerConfig struct {
bun.BaseModel `bun:"table:alertmanager_config_new"`
types.Identifiable
types.TimeAuditable
Config string `bun:"config,notnull,type:text"`
Hash string `bun:"hash,notnull,type:text"`
OrgID string `bun:"org_id,notnull,unique"`
}
type existingAlertmanagerState struct {
bun.BaseModel `bun:"table:alertmanager_state"`
ID uint64 `bun:"id,pk,autoincrement"`
Silences string `bun:"silences,nullzero,type:text"`
NFLog string `bun:"nflog,nullzero,type:text"`
CreatedAt time.Time `bun:"created_at,notnull"`
UpdatedAt time.Time `bun:"updated_at,notnull"`
OrgID string `bun:"org_id,notnull,unique"`
}
type newAlertmanagerState struct {
bun.BaseModel `bun:"table:alertmanager_state_new"`
types.Identifiable
types.TimeAuditable
Silences string `bun:"silences,nullzero,type:text"`
NFLog string `bun:"nflog,nullzero,type:text"`
OrgID string `bun:"org_id,notnull,unique"`
}
func NewUpdateAlertmanagerFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_alertmanager"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateAlertmanager(ctx, ps, c, sqlstore)
})
}
func newUpdateAlertmanager(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateAlertmanager{store: store}, nil
}
func (migration *updateAlertmanager) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateAlertmanager) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingChannel), new(newChannel), []string{OrgReference}, func(ctx context.Context) error {
existingChannels := make([]*existingChannel, 0)
err = tx.
NewSelect().
Model(&existingChannels).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingChannels) > 0 {
newChannels := migration.
CopyOldChannelToNewChannel(existingChannels)
_, err = tx.
NewInsert().
Model(&newChannels).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = migration.
store.
Dialect().
UpdatePrimaryKey(ctx, tx, new(existingAlertmanagerConfig), new(newAlertmanagerConfig), OrgReference, func(ctx context.Context) error {
existingAlertmanagerConfigs := make([]*existingAlertmanagerConfig, 0)
err = tx.
NewSelect().
Model(&existingAlertmanagerConfigs).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingAlertmanagerConfigs) > 0 {
newAlertmanagerConfigs := migration.
CopyOldConfigToNewConfig(existingAlertmanagerConfigs)
_, err = tx.
NewInsert().
Model(&newAlertmanagerConfigs).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = migration.
store.
Dialect().
UpdatePrimaryKey(ctx, tx, new(existingAlertmanagerState), new(newAlertmanagerState), OrgReference, func(ctx context.Context) error {
existingAlertmanagerStates := make([]*existingAlertmanagerState, 0)
err = tx.
NewSelect().
Model(&existingAlertmanagerStates).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingAlertmanagerStates) > 0 {
newAlertmanagerStates := migration.
CopyOldStateToNewState(existingAlertmanagerStates)
_, err = tx.
NewInsert().
Model(&newAlertmanagerStates).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateAlertmanager) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *updateAlertmanager) CopyOldChannelToNewChannel(existingChannels []*existingChannel) []*newChannel {
newChannels := make([]*newChannel, 0)
for _, channel := range existingChannels {
newChannels = append(newChannels, &newChannel{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: channel.CreatedAt,
UpdatedAt: channel.UpdatedAt,
},
Name: channel.Name,
Type: channel.Type,
Data: channel.Data,
OrgID: channel.OrgID,
})
}
return newChannels
}
func (migration *updateAlertmanager) CopyOldConfigToNewConfig(existingAlertmanagerConfigs []*existingAlertmanagerConfig) []*newAlertmanagerConfig {
newAlertmanagerConfigs := make([]*newAlertmanagerConfig, 0)
for _, config := range existingAlertmanagerConfigs {
newAlertmanagerConfigs = append(newAlertmanagerConfigs, &newAlertmanagerConfig{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: config.CreatedAt,
UpdatedAt: config.UpdatedAt,
},
Config: config.Config,
Hash: config.Hash,
OrgID: config.OrgID,
})
}
return newAlertmanagerConfigs
}
func (migration *updateAlertmanager) CopyOldStateToNewState(existingAlertmanagerStates []*existingAlertmanagerState) []*newAlertmanagerState {
newAlertmanagerStates := make([]*newAlertmanagerState, 0)
for _, state := range existingAlertmanagerStates {
newAlertmanagerStates = append(newAlertmanagerStates, &newAlertmanagerState{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: state.CreatedAt,
UpdatedAt: state.UpdatedAt,
},
Silences: state.Silences,
NFLog: state.NFLog,
OrgID: state.OrgID,
})
}
return newAlertmanagerStates
}

View File

@@ -1,198 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"fmt"
"reflect"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updatePreferences struct {
store sqlstore.SQLStore
}
type existingOrgPreference struct {
bun.BaseModel `bun:"table:org_preference"`
PreferenceID string `bun:"preference_id,pk,type:text,notnull"`
PreferenceValue string `bun:"preference_value,type:text,notnull"`
OrgID string `bun:"org_id,pk,type:text,notnull"`
}
type newOrgPreference struct {
bun.BaseModel `bun:"table:org_preference_new"`
types.Identifiable
PreferenceID string `bun:"preference_id,type:text,notnull"`
PreferenceValue string `bun:"preference_value,type:text,notnull"`
OrgID string `bun:"org_id,type:text,notnull"`
}
type existingUserPreference struct {
bun.BaseModel `bun:"table:user_preference"`
PreferenceID string `bun:"preference_id,type:text,pk"`
PreferenceValue string `bun:"preference_value,type:text"`
UserID string `bun:"user_id,type:text,pk"`
}
type newUserPreference struct {
bun.BaseModel `bun:"table:user_preference_new"`
types.Identifiable
PreferenceID string `bun:"preference_id,type:text,notnull"`
PreferenceValue string `bun:"preference_value,type:text,notnull"`
UserID string `bun:"user_id,type:text,notnull"`
}
func NewUpdatePreferencesFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_preferences"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdatePreferences(ctx, ps, c, sqlstore)
})
}
func newUpdatePreferences(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updatePreferences{store: store}, nil
}
func (migration *updatePreferences) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updatePreferences) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.
store.
Dialect().
AddPrimaryKey(ctx, tx, new(existingOrgPreference), new(newOrgPreference), OrgReference, func(ctx context.Context) error {
existingOrgPreferences := make([]*existingOrgPreference, 0)
err = tx.
NewSelect().
Model(&existingOrgPreferences).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingOrgPreferences) > 0 {
newOrgPreferences := migration.
CopyOldOrgPreferencesToNewOrgPreferences(existingOrgPreferences)
_, err = tx.
NewInsert().
Model(&newOrgPreferences).
Exec(ctx)
if err != nil {
return err
}
}
tableName := tx.Dialect().Tables().Get(reflect.TypeOf(new(existingOrgPreference))).Name
_, err = tx.
ExecContext(ctx, fmt.Sprintf("CREATE UNIQUE INDEX IF NOT EXISTS %s_unique_idx ON %s (preference_id, org_id)", tableName, fmt.Sprintf("%s_new", tableName)))
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
err = migration.
store.
Dialect().
AddPrimaryKey(ctx, tx, new(existingUserPreference), new(newUserPreference), UserReference, func(ctx context.Context) error {
existingUserPreferences := make([]*existingUserPreference, 0)
err = tx.
NewSelect().
Model(&existingUserPreferences).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingUserPreferences) > 0 {
newUserPreferences := migration.CopyOldUserPreferencesToNewUserPreferences(existingUserPreferences)
_, err = tx.
NewInsert().
Model(&newUserPreferences).
Exec(ctx)
if err != nil {
return err
}
}
tableName := tx.Dialect().Tables().Get(reflect.TypeOf(new(existingUserPreference))).Name
_, err = tx.
ExecContext(ctx, fmt.Sprintf("CREATE UNIQUE INDEX IF NOT EXISTS %s_unique_idx ON %s (preference_id, user_id)", tableName, fmt.Sprintf("%s_new", tableName)))
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updatePreferences) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *updatePreferences) CopyOldOrgPreferencesToNewOrgPreferences(existingOrgPreferences []*existingOrgPreference) []*newOrgPreference {
newOrgPreferences := make([]*newOrgPreference, 0)
for _, preference := range existingOrgPreferences {
newOrgPreferences = append(newOrgPreferences, &newOrgPreference{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
PreferenceID: preference.PreferenceID,
PreferenceValue: preference.PreferenceValue,
OrgID: preference.OrgID,
})
}
return newOrgPreferences
}
func (migration *updatePreferences) CopyOldUserPreferencesToNewUserPreferences(existingUserPreferences []*existingUserPreference) []*newUserPreference {
newUserPreferences := make([]*newUserPreference, 0)
for _, preference := range existingUserPreferences {
newUserPreferences = append(newUserPreferences, &newUserPreference{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
PreferenceID: preference.PreferenceID,
PreferenceValue: preference.PreferenceValue,
UserID: preference.UserID,
})
}
return newUserPreferences
}

View File

@@ -1,229 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"fmt"
"reflect"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateApdexTtl struct {
store sqlstore.SQLStore
}
type existingApdexSettings struct {
bun.BaseModel `bun:"table:apdex_settings"`
OrgID string `bun:"org_id,pk,type:text" json:"orgId"`
ServiceName string `bun:"service_name,pk,type:text" json:"serviceName"`
Threshold float64 `bun:"threshold,type:float,notnull" json:"threshold"`
ExcludeStatusCodes string `bun:"exclude_status_codes,type:text,notnull" json:"excludeStatusCodes"`
}
type newApdexSettings struct {
bun.BaseModel `bun:"table:apdex_setting"`
types.Identifiable
OrgID string `bun:"org_id,type:text" json:"orgId"`
ServiceName string `bun:"service_name,type:text" json:"serviceName"`
Threshold float64 `bun:"threshold,type:float,notnull" json:"threshold"`
ExcludeStatusCodes string `bun:"exclude_status_codes,type:text,notnull" json:"excludeStatusCodes"`
}
type existingTTLStatus struct {
bun.BaseModel `bun:"table:ttl_status"`
ID int `bun:"id,pk,autoincrement"`
TransactionID string `bun:"transaction_id,type:text,notnull"`
CreatedAt time.Time `bun:"created_at,type:datetime,notnull"`
UpdatedAt time.Time `bun:"updated_at,type:datetime,notnull"`
TableName string `bun:"table_name,type:text,notnull"`
TTL int `bun:"ttl,notnull,default:0"`
ColdStorageTTL int `bun:"cold_storage_ttl,notnull,default:0"`
Status string `bun:"status,type:text,notnull"`
}
type newTTLStatus struct {
bun.BaseModel `bun:"table:ttl_setting"`
types.Identifiable
types.TimeAuditable
TransactionID string `bun:"transaction_id,type:text,notnull"`
TableName string `bun:"table_name,type:text,notnull"`
TTL int `bun:"ttl,notnull,default:0"`
ColdStorageTTL int `bun:"cold_storage_ttl,notnull,default:0"`
Status string `bun:"status,type:text,notnull"`
OrgID string `json:"-" bun:"org_id,notnull"`
}
func NewUpdateApdexTtlFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_apdex_ttl"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateApdexTtl(ctx, ps, c, sqlstore)
})
}
func newUpdateApdexTtl(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateApdexTtl{store: store}, nil
}
func (migration *updateApdexTtl) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateApdexTtl) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingApdexSettings), new(newApdexSettings), []string{OrgReference}, func(ctx context.Context) error {
existingApdexSettings := make([]*existingApdexSettings, 0)
err = tx.
NewSelect().
Model(&existingApdexSettings).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingApdexSettings) > 0 {
newSettings := migration.
CopyExistingApdexSettingsToNewApdexSettings(existingApdexSettings)
_, err = tx.
NewInsert().
Model(&newSettings).
Exec(ctx)
if err != nil {
return err
}
}
tableName := tx.Dialect().Tables().Get(reflect.TypeOf(new(newApdexSettings))).Name
_, err = tx.
ExecContext(ctx, fmt.Sprintf("CREATE UNIQUE INDEX IF NOT EXISTS %s_unique_idx ON %s (service_name, org_id)", tableName, tableName))
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingTTLStatus), new(newTTLStatus), []string{OrgReference}, func(ctx context.Context) error {
existingTTLStatus := make([]*existingTTLStatus, 0)
err = tx.
NewSelect().
Model(&existingTTLStatus).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingTTLStatus) > 0 {
var orgID string
err := migration.
store.
BunDB().
NewSelect().
Model((*types.Organization)(nil)).
Column("id").
Scan(ctx, &orgID)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil {
newTTLStatus := migration.CopyExistingTTLStatusToNewTTLStatus(existingTTLStatus, orgID)
_, err = tx.
NewInsert().
Model(&newTTLStatus).
Exec(ctx)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateApdexTtl) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *updateApdexTtl) CopyExistingApdexSettingsToNewApdexSettings(existingApdexSettings []*existingApdexSettings) []*newApdexSettings {
newSettings := make([]*newApdexSettings, 0)
for _, apdexSetting := range existingApdexSettings {
newSettings = append(newSettings, &newApdexSettings{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
ServiceName: apdexSetting.ServiceName,
Threshold: apdexSetting.Threshold,
ExcludeStatusCodes: apdexSetting.ExcludeStatusCodes,
OrgID: apdexSetting.OrgID,
})
}
return newSettings
}
func (migration *updateApdexTtl) CopyExistingTTLStatusToNewTTLStatus(existingTTLStatus []*existingTTLStatus, orgID string) []*newTTLStatus {
newTTLStatuses := make([]*newTTLStatus, 0)
for _, ttl := range existingTTLStatus {
newTTLStatuses = append(newTTLStatuses, &newTTLStatus{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: ttl.CreatedAt,
UpdatedAt: ttl.UpdatedAt,
},
TransactionID: ttl.TransactionID,
TTL: ttl.TTL,
TableName: ttl.TableName,
ColdStorageTTL: ttl.ColdStorageTTL,
Status: ttl.Status,
OrgID: orgID,
})
}
return newTTLStatuses
}

View File

@@ -1,195 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateResetPassword struct {
store sqlstore.SQLStore
}
type existingResetPasswordRequest struct {
bun.BaseModel `bun:"table:reset_password_request"`
ID int `bun:"id,pk,autoincrement" json:"id"`
Token string `bun:"token,type:text,notnull" json:"token"`
UserID string `bun:"user_id,type:text,notnull" json:"userId"`
}
type newResetPasswordRequest struct {
bun.BaseModel `bun:"table:reset_password_request_new"`
types.Identifiable
Token string `bun:"token,type:text,notnull" json:"token"`
UserID string `bun:"user_id,type:text,notnull" json:"userId"`
}
type existingPersonalAccessToken struct {
bun.BaseModel `bun:"table:personal_access_tokens"`
types.TimeAuditable
OrgID string `json:"orgId" bun:"org_id,type:text,notnull"`
ID int `json:"id" bun:"id,pk,autoincrement"`
Role string `json:"role" bun:"role,type:text,notnull,default:'ADMIN'"`
UserID string `json:"userId" bun:"user_id,type:text,notnull"`
Token string `json:"token" bun:"token,type:text,notnull,unique"`
Name string `json:"name" bun:"name,type:text,notnull"`
ExpiresAt int64 `json:"expiresAt" bun:"expires_at,notnull,default:0"`
LastUsed int64 `json:"lastUsed" bun:"last_used,notnull,default:0"`
Revoked bool `json:"revoked" bun:"revoked,notnull,default:false"`
UpdatedByUserID string `json:"updatedByUserId" bun:"updated_by_user_id,type:text,notnull,default:''"`
}
type newPersonalAccessToken struct {
bun.BaseModel `bun:"table:personal_access_token"`
types.Identifiable
types.TimeAuditable
OrgID string `json:"orgId" bun:"org_id,type:text,notnull"`
Role string `json:"role" bun:"role,type:text,notnull,default:'ADMIN'"`
UserID string `json:"userId" bun:"user_id,type:text,notnull"`
Token string `json:"token" bun:"token,type:text,notnull,unique"`
Name string `json:"name" bun:"name,type:text,notnull"`
ExpiresAt int64 `json:"expiresAt" bun:"expires_at,notnull,default:0"`
LastUsed int64 `json:"lastUsed" bun:"last_used,notnull,default:0"`
Revoked bool `json:"revoked" bun:"revoked,notnull,default:false"`
UpdatedByUserID string `json:"updatedByUserId" bun:"updated_by_user_id,type:text,notnull,default:''"`
}
func NewUpdateResetPasswordFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_reset_password"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateResetPassword(ctx, ps, c, sqlstore)
})
}
func newUpdateResetPassword(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateResetPassword{store: store}, nil
}
func (migration *updateResetPassword) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateResetPassword) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.store.Dialect().UpdatePrimaryKey(ctx, tx, new(existingResetPasswordRequest), new(newResetPasswordRequest), UserReference, func(ctx context.Context) error {
existingResetPasswordRequests := make([]*existingResetPasswordRequest, 0)
err = tx.
NewSelect().
Model(&existingResetPasswordRequests).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingResetPasswordRequests) > 0 {
newResetPasswordRequests := migration.CopyExistingResetPasswordRequestsToNewResetPasswordRequests(existingResetPasswordRequests)
_, err = tx.
NewInsert().
Model(&newResetPasswordRequests).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = migration.store.Dialect().RenameTableAndModifyModel(ctx, tx, new(existingPersonalAccessToken), new(newPersonalAccessToken), []string{OrgReference, UserReference}, func(ctx context.Context) error {
existingPersonalAccessTokens := make([]*existingPersonalAccessToken, 0)
err = tx.
NewSelect().
Model(&existingPersonalAccessTokens).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingPersonalAccessTokens) > 0 {
newPersonalAccessTokens := migration.CopyExistingPATsToNewPATs(existingPersonalAccessTokens)
_, err = tx.NewInsert().Model(&newPersonalAccessTokens).Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateResetPassword) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *updateResetPassword) CopyExistingResetPasswordRequestsToNewResetPasswordRequests(existingPasswordRequests []*existingResetPasswordRequest) []*newResetPasswordRequest {
newResetPasswordRequests := make([]*newResetPasswordRequest, 0)
for _, request := range existingPasswordRequests {
newResetPasswordRequests = append(newResetPasswordRequests, &newResetPasswordRequest{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
Token: request.Token,
UserID: request.UserID,
})
}
return newResetPasswordRequests
}
func (migration *updateResetPassword) CopyExistingPATsToNewPATs(existingPATs []*existingPersonalAccessToken) []*newPersonalAccessToken {
newPATs := make([]*newPersonalAccessToken, 0)
for _, pat := range existingPATs {
newPATs = append(newPATs, &newPersonalAccessToken{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: pat.CreatedAt,
UpdatedAt: pat.UpdatedAt,
},
Role: pat.Role,
Name: pat.Name,
ExpiresAt: pat.ExpiresAt,
LastUsed: pat.LastUsed,
UserID: pat.UserID,
Token: pat.Token,
Revoked: pat.Revoked,
UpdatedByUserID: pat.UpdatedByUserID,
OrgID: pat.OrgID,
})
}
return newPATs
}

View File

@@ -1,58 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addVirtualFields struct{}
func NewAddVirtualFieldsFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_virtual_fields"), newAddVirtualFields)
}
func newAddVirtualFields(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &addVirtualFields{}, nil
}
func (migration *addVirtualFields) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addVirtualFields) Up(ctx context.Context, db *bun.DB) error {
// table:virtual_field op:create
if _, err := db.NewCreateTable().
Model(&struct {
bun.BaseModel `bun:"table:virtual_field"`
types.Identifiable
types.TimeAuditable
types.UserAuditable
Name string `bun:"name,type:text,notnull"`
Expression string `bun:"expression,type:text,notnull"`
Description string `bun:"description,type:text"`
Signal telemetrytypes.Signal `bun:"signal,type:text,notnull"`
OrgID string `bun:"org_id,type:text,notnull"`
}{}).
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
return nil
}
func (migration *addVirtualFields) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,446 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateIntegrations struct {
store sqlstore.SQLStore
}
func NewUpdateIntegrationsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_integrations"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateIntegrations(ctx, ps, c, sqlstore)
})
}
func newUpdateIntegrations(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateIntegrations{store: store}, nil
}
func (migration *updateIntegrations) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
type existingInstalledIntegration struct {
bun.BaseModel `bun:"table:integrations_installed"`
IntegrationID string `bun:"integration_id,pk,type:text"`
ConfigJSON string `bun:"config_json,type:text"`
InstalledAt time.Time `bun:"installed_at,default:current_timestamp"`
}
type newInstalledIntegration struct {
bun.BaseModel `bun:"table:installed_integration"`
types.Identifiable
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
Config string `json:"config" bun:"config,type:text"`
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type"`
}
type existingCloudIntegration struct {
bun.BaseModel `bun:"table:cloud_integrations_accounts"`
CloudProvider string `bun:"cloud_provider,type:text,unique:cloud_provider_id"`
ID string `bun:"id,type:text,notnull,unique:cloud_provider_id"`
ConfigJSON string `bun:"config_json,type:text"`
CloudAccountID string `bun:"cloud_account_id,type:text"`
LastAgentReportJSON string `bun:"last_agent_report_json,type:text"`
CreatedAt time.Time `bun:"created_at,notnull,default:current_timestamp"`
RemovedAt *time.Time `bun:"removed_at,type:timestamp"`
}
type newCloudIntegration struct {
bun.BaseModel `bun:"table:cloud_integration"`
types.Identifiable
types.TimeAuditable
Provider string `json:"provider" bun:"provider,type:text"`
Config string `json:"config" bun:"config,type:text"`
AccountID string `json:"account_id" bun:"account_id,type:text"`
LastAgentReport string `json:"last_agent_report" bun:"last_agent_report,type:text"`
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp"`
OrgID string `json:"org_id" bun:"org_id,type:text"`
}
type existingCloudIntegrationService struct {
bun.BaseModel `bun:"table:cloud_integrations_service_configs,alias:c1"`
CloudProvider string `bun:"cloud_provider,type:text,notnull,unique:service_cloud_provider_account"`
CloudAccountID string `bun:"cloud_account_id,type:text,notnull,unique:service_cloud_provider_account"`
ServiceID string `bun:"service_id,type:text,notnull,unique:service_cloud_provider_account"`
ConfigJSON string `bun:"config_json,type:text"`
CreatedAt time.Time `bun:"created_at,default:current_timestamp"`
}
type newCloudIntegrationService struct {
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
types.Identifiable
types.TimeAuditable
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
Config string `bun:"config,type:text"`
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type"`
}
type StorablePersonalAccessToken struct {
bun.BaseModel `bun:"table:personal_access_token"`
types.Identifiable
types.TimeAuditable
OrgID string `json:"orgId" bun:"org_id,type:text,notnull"`
Role string `json:"role" bun:"role,type:text,notnull,default:'ADMIN'"`
UserID string `json:"userId" bun:"user_id,type:text,notnull"`
Token string `json:"token" bun:"token,type:text,notnull,unique"`
Name string `json:"name" bun:"name,type:text,notnull"`
ExpiresAt int64 `json:"expiresAt" bun:"expires_at,notnull,default:0"`
LastUsed int64 `json:"lastUsed" bun:"last_used,notnull,default:0"`
Revoked bool `json:"revoked" bun:"revoked,notnull,default:false"`
UpdatedByUserID string `json:"updatedByUserId" bun:"updated_by_user_id,type:text,notnull,default:''"`
}
func (migration *updateIntegrations) Up(ctx context.Context, db *bun.DB) error {
// begin transaction
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// don't run the migration if there are multiple org ids
orgIDs := make([]string, 0)
err = migration.store.BunDB().NewSelect().Model((*types.Organization)(nil)).Column("id").Scan(ctx, &orgIDs)
if err != nil {
return err
}
if len(orgIDs) > 1 {
return nil
}
// installed integrations
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingInstalledIntegration), new(newInstalledIntegration), []string{OrgReference}, func(ctx context.Context) error {
existingIntegrations := make([]*existingInstalledIntegration, 0)
err = tx.
NewSelect().
Model(&existingIntegrations).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingIntegrations) > 0 {
newIntegrations := migration.
CopyOldIntegrationsToNewIntegrations(tx, orgIDs[0], existingIntegrations)
_, err = tx.
NewInsert().
Model(&newIntegrations).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
// cloud integrations
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingCloudIntegration), new(newCloudIntegration), []string{OrgReference}, func(ctx context.Context) error {
existingIntegrations := make([]*existingCloudIntegration, 0)
err = tx.
NewSelect().
Model(&existingIntegrations).
Where("removed_at IS NULL"). // we will only copy the accounts that are not removed
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingIntegrations) > 0 {
newIntegrations := migration.
CopyOldCloudIntegrationsToNewCloudIntegrations(tx, orgIDs[0], existingIntegrations)
_, err = tx.
NewInsert().
Model(&newIntegrations).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
// add unique constraint to cloud_integration table
_, err = tx.ExecContext(ctx, `CREATE UNIQUE INDEX IF NOT EXISTS unique_cloud_integration ON cloud_integration (id, provider, org_id)`)
if err != nil {
return err
}
// cloud integration service
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingCloudIntegrationService), new(newCloudIntegrationService), []string{CloudIntegrationReference}, func(ctx context.Context) error {
existingServices := make([]*existingCloudIntegrationService, 0)
// only one service per provider,account id and type
// so there won't be any duplicates.
// just that these will be enabled as soon as the integration for the account is enabled
err = tx.
NewSelect().
Model(&existingServices).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingServices) > 0 {
newServices := migration.
CopyOldCloudIntegrationServicesToNewCloudIntegrationServices(tx, orgIDs[0], existingServices)
if len(newServices) > 0 {
_, err = tx.
NewInsert().
Model(&newServices).
Exec(ctx)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
return err
}
if len(orgIDs) == 0 {
err = tx.Commit()
if err != nil {
return err
}
return nil
}
// copy the old aws integration user to the new user
err = migration.copyOldAwsIntegrationUser(tx, orgIDs[0])
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateIntegrations) Down(ctx context.Context, db *bun.DB) error {
return nil
}
func (migration *updateIntegrations) CopyOldIntegrationsToNewIntegrations(tx bun.IDB, orgID string, existingIntegrations []*existingInstalledIntegration) []*newInstalledIntegration {
newIntegrations := make([]*newInstalledIntegration, 0)
for _, integration := range existingIntegrations {
newIntegrations = append(newIntegrations, &newInstalledIntegration{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
Type: integration.IntegrationID,
Config: integration.ConfigJSON,
InstalledAt: integration.InstalledAt,
OrgID: orgID,
})
}
return newIntegrations
}
func (migration *updateIntegrations) CopyOldCloudIntegrationsToNewCloudIntegrations(tx bun.IDB, orgID string, existingIntegrations []*existingCloudIntegration) []*newCloudIntegration {
newIntegrations := make([]*newCloudIntegration, 0)
for _, integration := range existingIntegrations {
newIntegrations = append(newIntegrations, &newCloudIntegration{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: integration.CreatedAt,
UpdatedAt: integration.CreatedAt,
},
Provider: integration.CloudProvider,
AccountID: integration.CloudAccountID,
Config: integration.ConfigJSON,
LastAgentReport: integration.LastAgentReportJSON,
RemovedAt: integration.RemovedAt,
OrgID: orgID,
})
}
return newIntegrations
}
func (migration *updateIntegrations) CopyOldCloudIntegrationServicesToNewCloudIntegrationServices(tx bun.IDB, orgID string, existingServices []*existingCloudIntegrationService) []*newCloudIntegrationService {
newServices := make([]*newCloudIntegrationService, 0)
for _, service := range existingServices {
var cloudIntegrationID string
err := tx.NewSelect().
Model((*newCloudIntegration)(nil)).
Column("id").
Where("account_id = ?", service.CloudAccountID).
Where("provider = ?", service.CloudProvider).
Where("org_id = ?", orgID).
Scan(context.Background(), &cloudIntegrationID)
if err != nil {
if err == sql.ErrNoRows {
continue
}
return nil
}
newServices = append(newServices, &newCloudIntegrationService{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: service.CreatedAt,
UpdatedAt: service.CreatedAt,
},
Type: service.ServiceID,
Config: service.ConfigJSON,
CloudIntegrationID: cloudIntegrationID,
})
}
return newServices
}
func (migration *updateIntegrations) copyOldAwsIntegrationUser(tx bun.IDB, orgID string) error {
type oldUser struct {
bun.BaseModel `bun:"table:users"`
types.TimeAuditable
ID string `bun:"id,pk,type:text" json:"id"`
Name string `bun:"name,type:text,notnull" json:"name"`
Email string `bun:"email,type:text,notnull,unique" json:"email"`
Password string `bun:"password,type:text,notnull" json:"-"`
ProfilePictureURL string `bun:"profile_picture_url,type:text" json:"profilePictureURL"`
GroupID string `bun:"group_id,type:text,notnull" json:"groupId"`
OrgID string `bun:"org_id,type:text,notnull" json:"orgId"`
}
user := &oldUser{}
err := tx.NewSelect().Model(user).Where("email = ?", "aws-integration@signoz.io").Scan(context.Background())
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
// check if the id is already an uuid
if _, err := uuid.Parse(user.ID); err == nil {
return nil
}
// new user
newUser := &oldUser{
ID: uuid.New().String(),
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
OrgID: orgID,
Name: user.Name,
Email: user.Email,
GroupID: user.GroupID,
Password: user.Password,
}
// get the pat for old user
pat := &StorablePersonalAccessToken{}
err = tx.NewSelect().Model(pat).Where("user_id = ? and revoked = false", "aws-integration").Scan(context.Background())
if err != nil {
if err == sql.ErrNoRows {
// delete the old user
_, err = tx.ExecContext(context.Background(), `DELETE FROM users WHERE id = ?`, user.ID)
if err != nil {
return err
}
return nil
}
return err
}
// new pat
newPAT := &StorablePersonalAccessToken{
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
OrgID: orgID,
UserID: newUser.ID,
Token: pat.Token,
Name: pat.Name,
ExpiresAt: pat.ExpiresAt,
LastUsed: pat.LastUsed,
Revoked: pat.Revoked,
Role: pat.Role,
}
// delete old user
_, err = tx.ExecContext(context.Background(), `DELETE FROM users WHERE id = ?`, user.ID)
if err != nil {
return err
}
// insert the new user
_, err = tx.NewInsert().Model(newUser).Exec(context.Background())
if err != nil {
return err
}
// insert the new pat
_, err = tx.NewInsert().Model(newPAT).Exec(context.Background())
if err != nil {
return err
}
return nil
}

View File

@@ -1,345 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"database/sql/driver"
"encoding/json"
"strconv"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateRules struct {
store sqlstore.SQLStore
}
type AlertIds []string
func (a *AlertIds) Scan(src interface{}) error {
if data, ok := src.([]byte); ok {
return json.Unmarshal(data, a)
}
return nil
}
func (a *AlertIds) Value() (driver.Value, error) {
return json.Marshal(a)
}
type existingRule struct {
bun.BaseModel `bun:"table:rules"`
ID int `bun:"id,pk,autoincrement"`
CreatedAt time.Time `bun:"created_at,type:datetime,notnull"`
CreatedBy string `bun:"created_by,type:text,notnull"`
UpdatedAt time.Time `bun:"updated_at,type:datetime,notnull"`
UpdatedBy string `bun:"updated_by,type:text,notnull"`
Deleted int `bun:"deleted,notnull,default:0"`
Data string `bun:"data,type:text,notnull"`
}
type newRule struct {
bun.BaseModel `bun:"table:rule"`
types.Identifiable
types.TimeAuditable
types.UserAuditable
Deleted int `bun:"deleted,notnull,default:0"`
Data string `bun:"data,type:text,notnull"`
OrgID string `bun:"org_id,type:text"`
}
type existingMaintenance struct {
bun.BaseModel `bun:"table:planned_maintenance"`
ID int `bun:"id,pk,autoincrement"`
Name string `bun:"name,type:text,notnull"`
Description string `bun:"description,type:text"`
AlertIDs *AlertIds `bun:"alert_ids,type:text"`
Schedule *ruletypes.Schedule `bun:"schedule,type:text,notnull"`
CreatedAt time.Time `bun:"created_at,type:datetime,notnull"`
CreatedBy string `bun:"created_by,type:text,notnull"`
UpdatedAt time.Time `bun:"updated_at,type:datetime,notnull"`
UpdatedBy string `bun:"updated_by,type:text,notnull"`
}
type newMaintenance struct {
bun.BaseModel `bun:"table:planned_maintenance_new"`
types.Identifiable
types.TimeAuditable
types.UserAuditable
Name string `bun:"name,type:text,notnull"`
Description string `bun:"description,type:text"`
Schedule *ruletypes.Schedule `bun:"schedule,type:text,notnull"`
OrgID string `bun:"org_id,type:text"`
}
type storablePlannedMaintenanceRule struct {
bun.BaseModel `bun:"table:planned_maintenance_rule"`
types.Identifiable
PlannedMaintenanceID valuer.UUID `bun:"planned_maintenance_id,type:text"`
RuleID valuer.UUID `bun:"rule_id,type:text"`
}
type ruleHistory struct {
bun.BaseModel `bun:"table:rule_history"`
RuleID int `bun:"rule_id"`
RuleUUID valuer.UUID `bun:"rule_uuid"`
}
func NewUpdateRulesFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_rules"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateRules(ctx, ps, c, sqlstore)
})
}
func newUpdateRules(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateRules{store: store}, nil
}
func (migration *updateRules) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateRules) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
ruleIDToRuleUUIDMap := map[int]valuer.UUID{}
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingRule), new(newRule), []string{OrgReference}, func(ctx context.Context) error {
existingRules := make([]*existingRule, 0)
err := tx.
NewSelect().
Model(&existingRules).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingRules) > 0 {
var orgID string
err := migration.
store.
BunDB().
NewSelect().
Model((*types.Organization)(nil)).
Column("id").
Scan(ctx, &orgID)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil {
newRules, idUUIDMap := migration.CopyExistingRulesToNewRules(existingRules, orgID)
ruleIDToRuleUUIDMap = idUUIDMap
_, err = tx.
NewInsert().
Model(&newRules).
Exec(ctx)
if err != nil {
return err
}
}
}
err = migration.store.Dialect().UpdatePrimaryKey(ctx, tx, new(existingMaintenance), new(newMaintenance), OrgReference, func(ctx context.Context) error {
_, err := tx.
NewCreateTable().
IfNotExists().
Model(new(storablePlannedMaintenanceRule)).
ForeignKey(`("planned_maintenance_id") REFERENCES "planned_maintenance_new" ("id") ON DELETE CASCADE ON UPDATE CASCADE`).
ForeignKey(`("rule_id") REFERENCES "rule" ("id")`).
Exec(ctx)
if err != nil {
return err
}
existingMaintenances := make([]*existingMaintenance, 0)
err = tx.
NewSelect().
Model(&existingMaintenances).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingMaintenances) > 0 {
var orgID string
err := migration.
store.
BunDB().
NewSelect().
Model((*types.Organization)(nil)).
Column("id").
Scan(ctx, &orgID)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil {
newMaintenances, newMaintenancesRules, err := migration.CopyExistingMaintenancesToNewMaintenancesAndRules(existingMaintenances, orgID, ruleIDToRuleUUIDMap)
if err != nil {
return err
}
_, err = tx.
NewInsert().
Model(&newMaintenances).
Exec(ctx)
if err != nil {
return err
}
if len(newMaintenancesRules) > 0 {
_, err = tx.
NewInsert().
Model(&newMaintenancesRules).
Exec(ctx)
if err != nil {
return err
}
}
}
}
return nil
})
if err != nil {
return err
}
ruleHistories := make([]*ruleHistory, 0)
for ruleID, ruleUUID := range ruleIDToRuleUUIDMap {
ruleHistories = append(ruleHistories, &ruleHistory{
RuleID: ruleID,
RuleUUID: ruleUUID,
})
}
_, err = tx.
NewCreateTable().
IfNotExists().
Model(&ruleHistories).
Exec(ctx)
if err != nil {
return err
}
if len(ruleHistories) > 0 {
_, err = tx.
NewInsert().
Model(&ruleHistories).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateRules) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *updateRules) CopyExistingRulesToNewRules(existingRules []*existingRule, orgID string) ([]*newRule, map[int]valuer.UUID) {
newRules := make([]*newRule, 0)
idUUIDMap := map[int]valuer.UUID{}
for _, rule := range existingRules {
uuid := valuer.GenerateUUID()
idUUIDMap[rule.ID] = uuid
newRules = append(newRules, &newRule{
Identifiable: types.Identifiable{
ID: uuid,
},
TimeAuditable: types.TimeAuditable{
CreatedAt: rule.CreatedAt,
UpdatedAt: rule.UpdatedAt,
},
UserAuditable: types.UserAuditable{
CreatedBy: rule.CreatedBy,
UpdatedBy: rule.UpdatedBy,
},
Deleted: rule.Deleted,
Data: rule.Data,
OrgID: orgID,
})
}
return newRules, idUUIDMap
}
func (migration *updateRules) CopyExistingMaintenancesToNewMaintenancesAndRules(existingMaintenances []*existingMaintenance, orgID string, ruleIDToRuleUUIDMap map[int]valuer.UUID) ([]*newMaintenance, []*storablePlannedMaintenanceRule, error) {
newMaintenances := make([]*newMaintenance, 0)
newMaintenanceRules := make([]*storablePlannedMaintenanceRule, 0)
for _, maintenance := range existingMaintenances {
ruleIDs := maintenance.AlertIDs
maintenanceUUID := valuer.GenerateUUID()
newMaintenance := newMaintenance{
Identifiable: types.Identifiable{
ID: maintenanceUUID,
},
TimeAuditable: types.TimeAuditable{
CreatedAt: maintenance.CreatedAt,
UpdatedAt: maintenance.UpdatedAt,
},
UserAuditable: types.UserAuditable{
CreatedBy: maintenance.CreatedBy,
UpdatedBy: maintenance.UpdatedBy,
},
Name: maintenance.Name,
Description: maintenance.Description,
Schedule: maintenance.Schedule,
OrgID: orgID,
}
newMaintenances = append(newMaintenances, &newMaintenance)
for _, ruleIDStr := range *ruleIDs {
ruleID, err := strconv.Atoi(ruleIDStr)
if err != nil {
return nil, nil, err
}
newMaintenanceRules = append(newMaintenanceRules, &storablePlannedMaintenanceRule{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
PlannedMaintenanceID: maintenanceUUID,
RuleID: ruleIDToRuleUUIDMap[ruleID],
})
}
}
return newMaintenances, newMaintenanceRules, nil
}

View File

@@ -1,117 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateOrganizations struct {
store sqlstore.SQLStore
}
func NewUpdateOrganizationsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_organizations"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateOrganizations(ctx, ps, c, sqlstore)
})
}
func newUpdateOrganizations(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateOrganizations{store: store}, nil
}
func (migration *updateOrganizations) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateOrganizations) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.
store.
Dialect().
DropColumn(ctx, tx, "organizations", "is_anonymous")
if err != nil {
return err
}
err = migration.
store.
Dialect().
DropColumn(ctx, tx, "organizations", "has_opted_updates")
if err != nil {
return err
}
_, err = migration.
store.
Dialect().
RenameColumn(ctx, tx, "organizations", "name", "display_name")
if err != nil {
return err
}
err = migration.
store.
Dialect().
AddColumn(ctx, tx, "organizations", "name", "TEXT")
if err != nil {
return err
}
_, err = tx.
NewCreateIndex().
Unique().
IfNotExists().
Index("idx_unique_name").
Table("organizations").
Column("name").
Exec(ctx)
if err != nil {
return err
}
err = migration.
store.
Dialect().
AddColumn(ctx, tx, "organizations", "alias", "TEXT")
if err != nil {
return err
}
_, err = tx.
NewCreateIndex().
Unique().
IfNotExists().
Index("idx_unique_alias").
Table("organizations").
Column("alias").
Exec(ctx)
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateOrganizations) Down(context.Context, *bun.DB) error {
return nil
}

View File

@@ -1,162 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type dropGroups struct {
sqlstore sqlstore.SQLStore
}
func NewDropGroupsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("drop_groups"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newDropGroups(ctx, providerSettings, config, sqlstore)
})
}
func newDropGroups(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore) (SQLMigration, error) {
return &dropGroups{sqlstore: sqlstore}, nil
}
func (migration *dropGroups) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *dropGroups) Up(ctx context.Context, db *bun.DB) error {
type Group struct {
bun.BaseModel `bun:"table:groups"`
types.TimeAuditable
OrgID string `bun:"org_id,type:text"`
ID string `bun:"id,pk,type:text" json:"id"`
Name string `bun:"name,type:text,notnull,unique" json:"name"`
}
exists, err := migration.sqlstore.Dialect().TableExists(ctx, db, new(Group))
if err != nil {
return err
}
if !exists {
return nil
}
// Disable foreign keys temporarily
if err := migration.sqlstore.Dialect().ToggleForeignKeyConstraint(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
type existingUser struct {
bun.BaseModel `bun:"table:users"`
types.TimeAuditable
ID string `bun:"id,pk,type:text" json:"id"`
Name string `bun:"name,type:text,notnull" json:"name"`
Email string `bun:"email,type:text,notnull,unique" json:"email"`
Password string `bun:"password,type:text,notnull" json:"-"`
ProfilePictureURL string `bun:"profile_picture_url,type:text" json:"profilePictureURL"`
GroupID string `bun:"group_id,type:text,notnull" json:"groupId"`
OrgID string `bun:"org_id,type:text,notnull" json:"orgId"`
}
var existingUsers []*existingUser
if err := tx.
NewSelect().
Model(&existingUsers).
Scan(ctx); err != nil {
return err
}
var groups []*Group
if err := tx.
NewSelect().
Model(&groups).
Scan(ctx); err != nil {
return err
}
groupIDToRoleMap := make(map[string]string)
for _, group := range groups {
groupIDToRoleMap[group.ID] = group.Name
}
roleToUserIDMap := make(map[string][]string)
for _, user := range existingUsers {
roleToUserIDMap[groupIDToRoleMap[user.GroupID]] = append(roleToUserIDMap[groupIDToRoleMap[user.GroupID]], user.ID)
}
if err := migration.sqlstore.Dialect().DropColumnWithForeignKeyConstraint(ctx, tx, new(struct {
bun.BaseModel `bun:"table:users"`
types.TimeAuditable
ID string `bun:"id,pk,type:text"`
Name string `bun:"name,type:text,notnull"`
Email string `bun:"email,type:text,notnull,unique"`
Password string `bun:"password,type:text,notnull"`
ProfilePictureURL string `bun:"profile_picture_url,type:text"`
OrgID string `bun:"org_id,type:text,notnull"`
}), "group_id"); err != nil {
return err
}
if err := migration.sqlstore.Dialect().AddColumn(ctx, tx, "users", "role", "TEXT"); err != nil {
return err
}
for role, userIDs := range roleToUserIDMap {
if _, err := tx.
NewUpdate().
Table("users").
Set("role = ?", role).
Where("id IN (?)", bun.In(userIDs)).
Exec(ctx); err != nil {
return err
}
}
if err := migration.sqlstore.Dialect().AddNotNullDefaultToColumn(ctx, tx, "users", "role", "TEXT", "'VIEWER'"); err != nil {
return err
}
if _, err := tx.
NewDropTable().
Table("groups").
IfExists().
Exec(ctx); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
// Enable foreign keys
if err := migration.sqlstore.Dialect().ToggleForeignKeyConstraint(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *dropGroups) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,103 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/quickfiltertypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type createQuickFilters struct {
store sqlstore.SQLStore
}
type quickFilter struct {
bun.BaseModel `bun:"table:quick_filter"`
types.Identifiable
OrgID string `bun:"org_id,notnull,unique:org_id_signal,type:text"`
Filter string `bun:"filter,notnull,type:text"`
Signal string `bun:"signal,notnull,unique:org_id_signal,type:text"`
types.TimeAuditable
types.UserAuditable
}
func NewCreateQuickFiltersFactory(store sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("create_quick_filters"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return &createQuickFilters{store: store}, nil
})
}
func (m *createQuickFilters) Register(migrations *migrate.Migrations) error {
return migrations.Register(m.Up, m.Down)
}
func (m *createQuickFilters) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// Create table if not exists
_, err = tx.NewCreateTable().
Model((*quickFilter)(nil)).
IfNotExists().
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE ON UPDATE CASCADE`).
Exec(ctx)
if err != nil {
return err
}
// Get default organization ID
var defaultOrg valuer.UUID
err = tx.NewSelect().Table("organizations").Column("id").Limit(1).Scan(ctx, &defaultOrg)
if err != nil {
if err == sql.ErrNoRows {
// No organizations found, nothing to insert, commit and return
err := tx.Commit()
if err != nil {
return err
}
return nil
}
return err
}
// Get the default quick filters
storableQuickFilters, err := quickfiltertypes.NewDefaultQuickFilter(defaultOrg)
if err != nil {
return err
}
// Insert all filters at once
_, err = tx.NewInsert().
Model(&storableQuickFilters).
Exec(ctx)
if err != nil {
if errors.Ast(m.store.WrapAlreadyExistsErrf(err, errors.CodeAlreadyExists, "Quick Filter already exists"), errors.TypeAlreadyExists) {
err := tx.Commit()
if err != nil {
return err
}
return nil
}
return err
}
// Commit the transaction
return tx.Commit()
}
func (m *createQuickFilters) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,106 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/quickfiltertypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateQuickFilters struct {
store sqlstore.SQLStore
}
func NewUpdateQuickFiltersFactory(store sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_quick_filters"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateQuickFilters(ctx, ps, c, store)
})
}
func newUpdateQuickFilters(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateQuickFilters{
store: store,
}, nil
}
func (migration *updateQuickFilters) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateQuickFilters) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// Delete all existing quick filters
_, err = tx.NewDelete().
Table("quick_filter").
Where("1=1"). // Delete all rows
Exec(ctx)
if err != nil {
return err
}
// Get all organization IDs as strings
var orgIDs []string
err = tx.NewSelect().
Table("organizations").
Column("id").
Scan(ctx, &orgIDs)
if err != nil {
if err == sql.ErrNoRows {
// No organizations found, commit the transaction (deletion is done) and return
if err := tx.Commit(); err != nil {
return err
}
return nil
}
return err
}
// For each organization, create new quick filters with the updated NewDefaultQuickFilter function
for _, orgID := range orgIDs {
// Get the updated default quick filters
storableQuickFilters, err := quickfiltertypes.NewDefaultQuickFilter(valuer.MustNewUUID(orgID))
if err != nil {
return err
}
// Insert all filters for this organization
_, err = tx.NewInsert().
Model(&storableQuickFilters).
Exec(ctx)
if err != nil {
if errors.Ast(migration.store.WrapAlreadyExistsErrf(err, errors.CodeAlreadyExists, "Quick Filter already exists"), errors.TypeAlreadyExists) {
// Skip if filters already exist for this org
continue
}
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *updateQuickFilters) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,235 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type authRefactor struct {
store sqlstore.SQLStore
}
func NewAuthRefactorFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("auth_refactor"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newAuthRefactor(ctx, ps, c, sqlstore)
})
}
func newAuthRefactor(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &authRefactor{store: store}, nil
}
func (migration *authRefactor) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
type existingUser32 struct {
bun.BaseModel `bun:"table:users"`
types.TimeAuditable
ID string `bun:"id,pk,type:text" json:"id"`
Name string `bun:"name,type:text,notnull" json:"name"`
Email string `bun:"email,type:text,notnull,unique" json:"email"`
Password string `bun:"password,type:text,notnull" json:"-"`
ProfilePictureURL string `bun:"profile_picture_url,type:text" json:"profilePictureURL"`
Role string `bun:"role,type:text,notnull" json:"role"`
OrgID string `bun:"org_id,type:text,notnull" json:"orgId"`
}
type factorPassword32 struct {
bun.BaseModel `bun:"table:factor_password"`
types.Identifiable
types.TimeAuditable
Password string `bun:"password,type:text,notnull" json:"password"`
Temporary bool `bun:"temporary,type:boolean,notnull" json:"temporary"`
UserID string `bun:"user_id,type:text,notnull" json:"userID"`
}
type existingResetPasswordRequest32 struct {
bun.BaseModel `bun:"table:reset_password_request"`
types.Identifiable
Token string `bun:"token,type:text,notnull" json:"token"`
UserID string `bun:"user_id,type:text,notnull,unique" json:"userId"`
}
type newResetPasswordRequest32 struct {
bun.BaseModel `bun:"table:reset_password_token"`
types.Identifiable
Token string `bun:"token,type:text,notnull" json:"token"`
PasswordID string `bun:"password_id,type:text,notnull" json:"passwordID"`
}
func (migration *authRefactor) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
if _, err := tx.NewCreateTable().
Model(new(factorPassword32)).
ForeignKey(`("user_id") REFERENCES "users" ("id")`).
IfNotExists().
Exec(ctx); err != nil {
return err
}
// copy passwords from users table to factor_password table
err = migration.CopyOldPasswordToNewPassword(ctx, tx)
if err != nil {
return err
}
// delete profile picture url
err = migration.store.Dialect().DropColumn(ctx, tx, "users", "profile_picture_url")
if err != nil {
return err
}
// delete password
err = migration.store.Dialect().DropColumn(ctx, tx, "users", "password")
if err != nil {
return err
}
// rename name to display name
_, err = migration.store.Dialect().RenameColumn(ctx, tx, "users", "name", "display_name")
if err != nil {
return err
}
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingResetPasswordRequest32), new(newResetPasswordRequest32), []string{FactorPasswordReference}, func(ctx context.Context) error {
existingRequests := make([]*existingResetPasswordRequest32, 0)
err = tx.
NewSelect().
Model(&existingRequests).
Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingRequests) > 0 {
// copy users and their passwords to new table
newRequests, err := migration.
CopyOldResetPasswordToNewResetPassword(ctx, tx, existingRequests)
if err != nil {
return err
}
_, err = tx.
NewInsert().
Model(&newRequests).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *authRefactor) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *authRefactor) CopyOldPasswordToNewPassword(ctx context.Context, tx bun.IDB) error {
// check if data already in factor_password table
var count int64
err := tx.NewSelect().Model(new(factorPassword32)).ColumnExpr("COUNT(*)").Scan(ctx, &count)
if err != nil {
return err
}
if count > 0 {
return nil
}
// check if password column exist in the users table.
exists, err := migration.store.Dialect().ColumnExists(ctx, tx, "users", "password")
if err != nil {
return err
}
if !exists {
return nil
}
// get all users from users table
existingUsers := make([]*existingUser32, 0)
err = tx.NewSelect().Model(&existingUsers).Scan(ctx)
if err != nil {
return err
}
newPasswords := make([]*factorPassword32, 0)
for _, user := range existingUsers {
newPasswords = append(newPasswords, &factorPassword32{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
Password: user.Password,
Temporary: false,
UserID: user.ID,
})
}
// insert
if len(newPasswords) > 0 {
_, err = tx.NewInsert().Model(&newPasswords).Exec(ctx)
if err != nil {
return err
}
}
return nil
}
func (migration *authRefactor) CopyOldResetPasswordToNewResetPassword(ctx context.Context, tx bun.IDB, existingRequests []*existingResetPasswordRequest32) ([]*newResetPasswordRequest32, error) {
newRequests := make([]*newResetPasswordRequest32, 0)
for _, request := range existingRequests {
// get password id from user id
var passwordID string
err := tx.NewSelect().Table("factor_password").Column("id").Where("user_id = ?", request.UserID).Scan(ctx, &passwordID)
if err != nil {
return nil, err
}
newRequests = append(newRequests, &newResetPasswordRequest32{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
Token: request.Token,
PasswordID: passwordID,
})
}
return newRequests, nil
}

View File

@@ -1,164 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type migratePATToFactorAPIKey struct {
store sqlstore.SQLStore
}
func NewMigratePATToFactorAPIKey(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("migrate_pat_to_factor_api_key"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newMigratePATToFactorAPIKey(ctx, ps, c, sqlstore)
})
}
func newMigratePATToFactorAPIKey(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &migratePATToFactorAPIKey{store: store}, nil
}
func (migration *migratePATToFactorAPIKey) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
type existingPersonalAccessToken33 struct {
bun.BaseModel `bun:"table:personal_access_token"`
types.Identifiable
types.TimeAuditable
OrgID string `json:"orgId" bun:"org_id,type:text,notnull"`
Role string `json:"role" bun:"role,type:text,notnull,default:'ADMIN'"`
UserID string `json:"userId" bun:"user_id,type:text,notnull"`
Token string `json:"token" bun:"token,type:text,notnull,unique"`
Name string `json:"name" bun:"name,type:text,notnull"`
ExpiresAt int64 `json:"expiresAt" bun:"expires_at,notnull,default:0"`
LastUsed int64 `json:"lastUsed" bun:"last_used,notnull,default:0"`
Revoked bool `json:"revoked" bun:"revoked,notnull,default:false"`
UpdatedByUserID string `json:"updatedByUserId" bun:"updated_by_user_id,type:text,notnull,default:''"`
}
// we are removing the connection with org,
// the reason we are doing this is the api keys should just have
// one foreign key, we don't want a dangling state where, an API key
// belongs to one org and some user which doesn't belong to that org.
// so going ahead with directly attaching it to user will help dangling states.
type newFactorAPIKey33 struct {
bun.BaseModel `bun:"table:factor_api_key"`
types.Identifiable
CreatedAt time.Time `bun:"created_at,notnull,nullzero,type:timestamptz" json:"createdAt"`
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero,type:timestamptz" json:"updatedAt"`
CreatedBy string `bun:"created_by,notnull" json:"createdBy"`
UpdatedBy string `bun:"updated_by,notnull" json:"updatedBy"`
Token string `json:"token" bun:"token,type:text,notnull,unique"`
Role string `json:"role" bun:"role,type:text,notnull"`
Name string `json:"name" bun:"name,type:text,notnull"`
ExpiresAt time.Time `json:"expiresAt" bun:"expires_at,notnull,nullzero,type:timestamptz"`
LastUsed time.Time `json:"lastUsed" bun:"last_used,notnull,nullzero,type:timestamptz"`
Revoked bool `json:"revoked" bun:"revoked,notnull,default:false"`
UserID string `json:"userId" bun:"user_id,type:text,notnull"`
}
func (migration *migratePATToFactorAPIKey) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingPersonalAccessToken33), new(newFactorAPIKey33), []string{UserReferenceNoCascade}, func(ctx context.Context) error {
existingAPIKeys := make([]*existingPersonalAccessToken33, 0)
err = tx.
NewSelect().
Model(&existingAPIKeys).
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
return err
}
if err == nil && len(existingAPIKeys) > 0 {
newAPIKeys, err := migration.
CopyOldPatToFactorAPIKey(ctx, tx, existingAPIKeys)
if err != nil {
return err
}
_, err = tx.
NewInsert().
Model(&newAPIKeys).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *migratePATToFactorAPIKey) Down(ctx context.Context, db *bun.DB) error {
return nil
}
func (migration *migratePATToFactorAPIKey) CopyOldPatToFactorAPIKey(ctx context.Context, tx bun.IDB, existingAPIKeys []*existingPersonalAccessToken33) ([]*newFactorAPIKey33, error) {
newAPIKeys := make([]*newFactorAPIKey33, 0)
for _, apiKey := range existingAPIKeys {
if apiKey.CreatedAt.IsZero() {
apiKey.CreatedAt = time.Now()
}
if apiKey.UpdatedAt.IsZero() {
apiKey.UpdatedAt = time.Now()
}
// convert expiresAt and lastUsed to time.Time
expiresAt := time.Unix(apiKey.ExpiresAt, 0)
lastUsed := time.Unix(apiKey.LastUsed, 0)
if apiKey.LastUsed == 0 {
lastUsed = apiKey.CreatedAt
}
newAPIKeys = append(newAPIKeys, &newFactorAPIKey33{
Identifiable: apiKey.Identifiable,
CreatedAt: apiKey.CreatedAt,
UpdatedAt: apiKey.UpdatedAt,
CreatedBy: apiKey.UserID,
UpdatedBy: apiKey.UpdatedByUserID,
Token: apiKey.Token,
Role: apiKey.Role,
Name: apiKey.Name,
ExpiresAt: expiresAt,
LastUsed: lastUsed,
Revoked: apiKey.Revoked,
UserID: apiKey.UserID,
})
}
return newAPIKeys, nil
}

View File

@@ -1,149 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateLicense struct {
store sqlstore.SQLStore
}
type existingLicense34 struct {
bun.BaseModel `bun:"table:licenses_v3"`
ID string `bun:"id,pk,type:text"`
Key string `bun:"key,type:text,notnull,unique"`
Data string `bun:"data,type:text"`
}
type newLicense34 struct {
bun.BaseModel `bun:"table:license"`
types.Identifiable
types.TimeAuditable
Key string `bun:"key,type:text,notnull,unique"`
Data map[string]any `bun:"data,type:text"`
LastValidatedAt time.Time `bun:"last_validated_at,notnull"`
OrgID string `bun:"org_id,type:text,notnull" json:"orgID"`
}
func NewUpdateLicenseFactory(store sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_license"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateLicense(ctx, ps, c, store)
})
}
func newUpdateLicense(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateLicense{store: store}, nil
}
func (migration *updateLicense) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateLicense) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.store.Dialect().RenameTableAndModifyModel(ctx, tx, new(existingLicense34), new(newLicense34), []string{OrgReference}, func(ctx context.Context) error {
existingLicenses := make([]*existingLicense34, 0)
err = tx.NewSelect().Model(&existingLicenses).Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingLicenses) > 0 {
var orgID string
err := migration.
store.
BunDB().
NewSelect().
Model((*types.Organization)(nil)).
Column("id").
Scan(ctx, &orgID)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil {
newLicenses, err := migration.CopyExistingLicensesToNewLicenses(existingLicenses, orgID)
if err != nil {
return err
}
_, err = tx.
NewInsert().
Model(&newLicenses).
Exec(ctx)
if err != nil {
return err
}
}
return nil
}
return nil
})
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateLicense) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *updateLicense) CopyExistingLicensesToNewLicenses(existingLicenses []*existingLicense34, orgID string) ([]*newLicense34, error) {
newLicenses := make([]*newLicense34, len(existingLicenses))
for idx, existingLicense := range existingLicenses {
licenseID, err := valuer.NewUUID(existingLicense.ID)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "license id is not a valid UUID: %s", existingLicense.ID)
}
licenseData := map[string]any{}
err = json.Unmarshal([]byte(existingLicense.Data), &licenseData)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "unable to unmarshal license data in map[string]any")
}
newLicenses[idx] = &newLicense34{
Identifiable: types.Identifiable{
ID: licenseID,
},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Key: existingLicense.Key,
Data: licenseData,
LastValidatedAt: time.Now(),
OrgID: orgID,
}
}
return newLicenses, nil
}

View File

@@ -1,103 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/quickfiltertypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateApiMonitoringFilters struct {
store sqlstore.SQLStore
}
func NewUpdateApiMonitoringFiltersFactory(store sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_api_monitoring_filters"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateApiMonitoringFilters(ctx, ps, c, store)
})
}
func newUpdateApiMonitoringFilters(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateApiMonitoringFilters{
store: store,
}, nil
}
func (migration *updateApiMonitoringFilters) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateApiMonitoringFilters) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
// Get all organization IDs as strings
var orgIDs []string
err = tx.NewSelect().
Table("organizations").
Column("id").
Scan(ctx, &orgIDs)
if err != nil {
if err == sql.ErrNoRows {
if err := tx.Commit(); err != nil {
return err
}
return nil
}
return err
}
for _, orgID := range orgIDs {
// Get the updated default quick filters which includes the new API monitoring filters
storableQuickFilters, err := quickfiltertypes.NewDefaultQuickFilter(valuer.MustNewUUID(orgID))
if err != nil {
return err
}
// Find the API monitoring filter from the storable quick filters
var apiMonitoringFilterJSON string
for _, filter := range storableQuickFilters {
if filter.Signal == quickfiltertypes.SignalApiMonitoring {
apiMonitoringFilterJSON = filter.Filter
break
}
}
if apiMonitoringFilterJSON != "" {
_, err = tx.NewUpdate().
Table("quick_filter").
Set("filter = ?, updated_at = ?", apiMonitoringFilterJSON, time.Now()).
Where("signal = ? AND org_id = ?", quickfiltertypes.SignalApiMonitoring, orgID).
Exec(ctx)
if err != nil {
return err
}
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *updateApiMonitoringFilters) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,112 +0,0 @@
package sqlmigration
import (
"context"
"hash/fnv"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addKeyOrganization struct {
sqlstore sqlstore.SQLStore
}
func NewAddKeyOrganizationFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_key_organization"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newAddKeyOrganization(ctx, providerSettings, config, sqlstore)
})
}
func newAddKeyOrganization(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore) (SQLMigration, error) {
return &addKeyOrganization{
sqlstore: sqlstore,
}, nil
}
func (migration *addKeyOrganization) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addKeyOrganization) Up(ctx context.Context, db *bun.DB) error {
ok, err := migration.sqlstore.Dialect().ColumnExists(ctx, db, "organizations", "key")
if err != nil {
return err
}
if ok {
return nil
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
if _, err := tx.
NewAddColumn().
Table("organizations").
ColumnExpr("key BIGINT").
Exec(ctx); err != nil {
return err
}
var existingOrgIDs []string
if err := tx.NewSelect().
Table("organizations").
Column("id").
Scan(ctx, &existingOrgIDs); err != nil {
return err
}
for _, orgID := range existingOrgIDs {
key := migration.getHash(ctx, orgID)
if _, err := tx.
NewUpdate().
Table("organizations").
Set("key = ?", key).
Where("id = ?", orgID).
Exec(ctx); err != nil {
return err
}
}
if _, err := tx.
NewCreateIndex().
Unique().
IfNotExists().
Index("idx_unique_key").
Table("organizations").
Column("key").
Exec(ctx); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *addKeyOrganization) Down(ctx context.Context, db *bun.DB) error {
return nil
}
func (migration *addKeyOrganization) getHash(_ context.Context, orgID string) uint32 {
hasher := fnv.New32a()
// Hasher never returns err.
_, _ = hasher.Write([]byte(orgID))
return hasher.Sum32()
}

View File

@@ -1,89 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
// funnel Core Data Structure (funnel and funnelStep)
type funnel struct {
bun.BaseModel `bun:"table:trace_funnel"`
types.Identifiable // funnel id
types.TimeAuditable
types.UserAuditable
Name string `json:"funnel_name" bun:"name,type:text,notnull"` // funnel name
Description string `json:"description" bun:"description,type:text"` // funnel description
OrgID valuer.UUID `json:"org_id" bun:"org_id,type:varchar,notnull"`
Steps []funnelStep `json:"steps" bun:"steps,type:text,notnull"`
Tags string `json:"tags" bun:"tags,type:text"`
CreatedByUser *types.User `json:"user" bun:"rel:belongs-to,join:created_by=id"`
}
type funnelStep struct {
types.Identifiable
Name string `json:"name,omitempty"` // step name
Description string `json:"description,omitempty"` // step description
Order int64 `json:"step_order"`
ServiceName string `json:"service_name"`
SpanName string `json:"span_name"`
Filters string `json:"filters,omitempty"`
LatencyPointer string `json:"latency_pointer,omitempty"`
LatencyType string `json:"latency_type,omitempty"`
HasErrors bool `json:"has_errors"`
}
type addTraceFunnels struct {
sqlstore sqlstore.SQLStore
}
func NewAddTraceFunnelsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_trace_funnels"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newAddTraceFunnels(ctx, providerSettings, config, sqlstore)
})
}
func newAddTraceFunnels(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore) (SQLMigration, error) {
return &addTraceFunnels{sqlstore: sqlstore}, nil
}
func (migration *addTraceFunnels) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addTraceFunnels) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
_, err = tx.NewCreateTable().
Model(new(funnel)).
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE`).
IfNotExists().
Exec(ctx)
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *addTraceFunnels) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,141 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateDashboard struct {
store sqlstore.SQLStore
}
type existingDashboard36 struct {
bun.BaseModel `bun:"table:dashboards"`
types.TimeAuditable
types.UserAuditable
OrgID string `json:"-" bun:"org_id,notnull"`
ID int `json:"id" bun:"id,pk,autoincrement"`
UUID string `json:"uuid" bun:"uuid,type:text,notnull,unique"`
Data map[string]interface{} `json:"data" bun:"data,type:text,notnull"`
Locked *int `json:"isLocked" bun:"locked,notnull,default:0"`
}
type newDashboard36 struct {
bun.BaseModel `bun:"table:dashboard"`
types.Identifiable
types.TimeAuditable
types.UserAuditable
Data map[string]interface{} `bun:"data,type:text,notnull"`
Locked bool `bun:"locked,notnull,default:false"`
OrgID valuer.UUID `bun:"org_id,type:text,notnull"`
}
func NewUpdateDashboardFactory(store sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_dashboards"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateDashboard(ctx, ps, c, store)
})
}
func newUpdateDashboard(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateDashboard{store: store}, nil
}
func (migration *updateDashboard) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateDashboard) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
err = migration.store.Dialect().RenameTableAndModifyModel(ctx, tx, new(existingDashboard36), new(newDashboard36), []string{OrgReference}, func(ctx context.Context) error {
existingDashboards := make([]*existingDashboard36, 0)
err = tx.NewSelect().Model(&existingDashboards).Scan(ctx)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if err == nil && len(existingDashboards) > 0 {
newDashboards, err := migration.CopyExistingDashboardsToNewDashboards(existingDashboards)
if err != nil {
return err
}
_, err = tx.
NewInsert().
Model(&newDashboards).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (migration *updateDashboard) Down(context.Context, *bun.DB) error {
return nil
}
func (migration *updateDashboard) CopyExistingDashboardsToNewDashboards(existingDashboards []*existingDashboard36) ([]*newDashboard36, error) {
newDashboards := make([]*newDashboard36, len(existingDashboards))
for idx, existingDashboard := range existingDashboards {
dashboardID, err := valuer.NewUUID(existingDashboard.UUID)
if err != nil {
return nil, err
}
orgID, err := valuer.NewUUID(existingDashboard.OrgID)
if err != nil {
return nil, err
}
locked := false
if existingDashboard.Locked != nil && *existingDashboard.Locked == 1 {
locked = true
}
newDashboards[idx] = &newDashboard36{
Identifiable: types.Identifiable{
ID: dashboardID,
},
TimeAuditable: existingDashboard.TimeAuditable,
UserAuditable: existingDashboard.UserAuditable,
Data: existingDashboard.Data,
Locked: locked,
OrgID: orgID,
}
}
return newDashboards, nil
}

View File

@@ -1,58 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type dropFeatureSet struct{}
func NewDropFeatureSetFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("drop_feature_set"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newDropFeatureSet(ctx, ps, c)
})
}
func newDropFeatureSet(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &dropFeatureSet{}, nil
}
func (migration *dropFeatureSet) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *dropFeatureSet) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
if _, err := tx.
NewDropTable().
IfExists().
Table("feature_status").
Exec(ctx); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *dropFeatureSet) Down(context.Context, *bun.DB) error {
return nil
}

View File

@@ -1,66 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type dropDeprecatedTables struct{}
func NewDropDeprecatedTablesFactory() factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("drop_deprecated_tables"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newDropDeprecatedTables(ctx, ps, c)
})
}
func newDropDeprecatedTables(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
return &dropDeprecatedTables{}, nil
}
func (migration *dropDeprecatedTables) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *dropDeprecatedTables) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
if _, err := tx.
NewDropTable().
IfExists().
Table("rule_history").
Exec(ctx); err != nil {
return err
}
if _, err := tx.
NewDropTable().
IfExists().
Table("data_migrations").
Exec(ctx); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *dropDeprecatedTables) Down(context.Context, *bun.DB) error {
return nil
}

View File

@@ -1,278 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateAgents struct {
store sqlstore.SQLStore
}
type newAgent41 struct {
bun.BaseModel `bun:"table:agent"`
types.Identifiable
types.TimeAuditable
// AgentID is needed as the ID from opamp client is ULID and not UUID, so we are keeping it like this
AgentID string `json:"agentId" yaml:"agentId" bun:"agent_id,type:text,notnull,unique"`
OrgID string `json:"orgId" yaml:"orgId" bun:"org_id,type:text,notnull"`
TerminatedAt time.Time `json:"terminatedAt" yaml:"terminatedAt" bun:"terminated_at"`
Status opamptypes.AgentStatus `json:"currentStatus" yaml:"currentStatus" bun:"status,type:text,notnull"`
Config string `bun:"config,type:text,notnull"`
}
type existingAgentConfigVersions41 struct {
bun.BaseModel `bun:"table:agent_config_versions"`
ID string `bun:"id,pk,type:text"`
CreatedBy string `bun:"created_by,type:text"`
CreatedAt time.Time `bun:"created_at,default:CURRENT_TIMESTAMP"`
UpdatedBy string `bun:"updated_by,type:text"`
UpdatedAt time.Time `bun:"updated_at,default:CURRENT_TIMESTAMP"`
Version int `bun:"version,default:1,unique:element_version_idx"`
Active int `bun:"active"`
IsValid int `bun:"is_valid"`
Disabled int `bun:"disabled"`
ElementType opamptypes.ElementType `bun:"element_type,notnull,type:varchar(120),unique:element_version_idx"`
DeployStatus opamptypes.DeployStatus `bun:"deploy_status,notnull,type:varchar(80),default:'DIRTY'"`
DeploySequence int `bun:"deploy_sequence"`
DeployResult string `bun:"deploy_result,type:text"`
LastHash string `bun:"last_hash,type:text"`
LastConfig string `bun:"last_config,type:text"`
}
type newAgentConfigVersion41 struct {
bun.BaseModel `bun:"table:agent_config_version"`
types.Identifiable
types.TimeAuditable
types.UserAuditable
OrgID string `json:"orgId" bun:"org_id,type:text,notnull,unique:element_version_org_idx"`
Version int `json:"version" bun:"version,unique:element_version_org_idx"`
ElementType opamptypes.ElementType `json:"elementType" bun:"element_type,type:text,notnull,unique:element_version_org_idx"`
DeployStatus opamptypes.DeployStatus `json:"deployStatus" bun:"deploy_status,type:text,notnull,default:'dirty'"`
DeploySequence int `json:"deploySequence" bun:"deploy_sequence"`
DeployResult string `json:"deployResult" bun:"deploy_result,type:text"`
Hash string `json:"lastHash" bun:"hash,type:text"`
Config string `json:"config" bun:"config,type:text"`
}
type existingAgentConfigElement41 struct {
bun.BaseModel `bun:"table:agent_config_elements"`
ID string `bun:"id,pk,type:text"`
CreatedBy string `bun:"created_by,type:text"`
CreatedAt time.Time `bun:"created_at,default:CURRENT_TIMESTAMP"`
UpdatedBy string `bun:"updated_by,type:text"`
UpdatedAt time.Time `bun:"updated_at,default:CURRENT_TIMESTAMP"`
ElementID string `bun:"element_id,type:text,notnull,unique:agent_config_elements_u1"`
ElementType string `bun:"element_type,type:varchar(120),notnull,unique:agent_config_elements_u1"`
VersionID string `bun:"version_id,type:text,notnull,unique:agent_config_elements_u1"`
}
type newAgentConfigElement41 struct {
bun.BaseModel `bun:"table:agent_config_element"`
types.Identifiable
types.TimeAuditable
ElementID string `bun:"element_id,type:text,notnull,unique:element_type_version_idx"`
ElementType string `bun:"element_type,type:text,notnull,unique:element_type_version_idx"`
VersionID string `bun:"version_id,type:text,notnull,unique:element_type_version_idx"`
}
func NewUpdateAgentsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_agents"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newUpdateAgents(ctx, ps, c, sqlstore)
})
}
func newUpdateAgents(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
return &updateAgents{
store: store,
}, nil
}
func (migration *updateAgents) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateAgents) Up(ctx context.Context, db *bun.DB) error {
// begin transaction
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
var orgID string
err = tx.
NewSelect().
ColumnExpr("id").
Table("organizations").
Limit(1).
Scan(ctx, &orgID)
if err != nil {
if err != sql.ErrNoRows {
return err
}
}
if _, err := tx.
NewDropTable().
IfExists().
Table("agents").
Exec(ctx); err != nil {
return err
}
if _, err := tx.
NewCreateTable().
IfNotExists().
Model(new(newAgent41)).
Exec(ctx); err != nil {
return err
}
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingAgentConfigVersions41), new(newAgentConfigVersion41), []string{OrgReference}, func(ctx context.Context) error {
existingAgentConfigVersions := make([]*existingAgentConfigVersions41, 0)
err = tx.
NewSelect().
Model(&existingAgentConfigVersions).
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
return err
}
if err == nil && len(existingAgentConfigVersions) > 0 {
newAgentConfigVersions, err := migration.
CopyOldAgentConfigVersionToNewAgentConfigVersion(ctx, tx, existingAgentConfigVersions, orgID)
if err != nil {
return err
}
_, err = tx.
NewInsert().
Model(&newAgentConfigVersions).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
err = migration.
store.
Dialect().
RenameTableAndModifyModel(ctx, tx, new(existingAgentConfigElement41), new(newAgentConfigElement41), []string{AgentConfigVersionReference}, func(ctx context.Context) error {
existingAgentConfigElements := make([]*existingAgentConfigElement41, 0)
err = tx.
NewSelect().
Model(&existingAgentConfigElements).
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
return err
}
if err == nil && len(existingAgentConfigElements) > 0 {
newAgentConfigElements, err := migration.
CopyOldAgentConfigElementToNewAgentConfigElement(ctx, tx, existingAgentConfigElements, orgID)
if err != nil {
return err
}
_, err = tx.
NewInsert().
Model(&newAgentConfigElements).
Exec(ctx)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *updateAgents) Down(ctx context.Context, db *bun.DB) error {
return nil
}
func (migration *updateAgents) CopyOldAgentConfigVersionToNewAgentConfigVersion(ctx context.Context, tx bun.IDB, existingAgentConfigVersions []*existingAgentConfigVersions41, orgID string) ([]*newAgentConfigVersion41, error) {
newAgentConfigVersions := make([]*newAgentConfigVersion41, 0)
for _, existingAgentConfigVersion := range existingAgentConfigVersions {
versionID, err := valuer.NewUUID(existingAgentConfigVersion.ID)
if err != nil {
return nil, err
}
newAgentConfigVersions = append(newAgentConfigVersions, &newAgentConfigVersion41{
Identifiable: types.Identifiable{ID: versionID},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Unix(existingAgentConfigVersion.CreatedAt.Unix(), 0),
UpdatedAt: time.Unix(existingAgentConfigVersion.UpdatedAt.Unix(), 0),
},
UserAuditable: types.UserAuditable{
CreatedBy: existingAgentConfigVersion.CreatedBy,
UpdatedBy: existingAgentConfigVersion.UpdatedBy,
},
OrgID: orgID,
Version: existingAgentConfigVersion.Version,
ElementType: existingAgentConfigVersion.ElementType,
DeployStatus: existingAgentConfigVersion.DeployStatus,
DeploySequence: existingAgentConfigVersion.DeploySequence,
DeployResult: existingAgentConfigVersion.DeployResult,
Hash: orgID + existingAgentConfigVersion.LastHash,
Config: existingAgentConfigVersion.LastConfig,
})
}
return newAgentConfigVersions, nil
}
func (migration *updateAgents) CopyOldAgentConfigElementToNewAgentConfigElement(ctx context.Context, tx bun.IDB, existingAgentConfigElements []*existingAgentConfigElement41, orgID string) ([]*newAgentConfigElement41, error) {
newAgentConfigElements := make([]*newAgentConfigElement41, 0)
for _, existingAgentConfigElement := range existingAgentConfigElements {
elementID, err := valuer.NewUUID(existingAgentConfigElement.ElementID)
if err != nil {
return nil, err
}
newAgentConfigElements = append(newAgentConfigElements, &newAgentConfigElement41{
Identifiable: types.Identifiable{ID: elementID},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Unix(existingAgentConfigElement.CreatedAt.Unix(), 0),
UpdatedAt: time.Unix(existingAgentConfigElement.UpdatedAt.Unix(), 0),
},
VersionID: existingAgentConfigElement.VersionID,
ElementID: existingAgentConfigElement.ElementID,
ElementType: existingAgentConfigElement.ElementType,
})
}
return newAgentConfigElements, nil
}

View File

@@ -1,85 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateUsers struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewUpdateUsersFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_users"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newUpdateUsers(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newUpdateUsers(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &updateUsers{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *updateUsers) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateUsers) Up(ctx context.Context, db *bun.DB) error {
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, sqlschema.TableName("users"))
if err != nil {
return err
}
sqls := [][]byte{}
dropSQLs := migration.sqlschema.Operator().DropConstraint(table, uniqueConstraints, &sqlschema.UniqueConstraint{ColumnNames: []sqlschema.ColumnName{"email"}})
sqls = append(sqls, dropSQLs...)
indexSQLs := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "users", ColumnNames: []sqlschema.ColumnName{"email", "org_id"}})
sqls = append(sqls, indexSQLs...)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *updateUsers) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,88 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateUserInvite struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewUpdateUserInviteFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_user_invite"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newUpdateUserInvite(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newUpdateUserInvite(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &updateUserInvite{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *updateUserInvite) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateUserInvite) Up(ctx context.Context, db *bun.DB) error {
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, sqlschema.TableName("user_invite"))
if err != nil {
return err
}
sqls := [][]byte{}
dropSQLs := migration.sqlschema.Operator().DropConstraint(table, uniqueConstraints, &sqlschema.UniqueConstraint{ColumnNames: []sqlschema.ColumnName{"email"}})
sqls = append(sqls, dropSQLs...)
indexSQLs := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "user_invite", ColumnNames: []sqlschema.ColumnName{"email", "org_id"}})
sqls = append(sqls, indexSQLs...)
indexSQLs = migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "user_invite", ColumnNames: []sqlschema.ColumnName{"token"}})
sqls = append(sqls, indexSQLs...)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *updateUserInvite) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,85 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type updateOrgDomain struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewUpdateOrgDomainFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("update_org_domain"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newUpdateOrgDomain(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newUpdateOrgDomain(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &updateOrgDomain{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *updateOrgDomain) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *updateOrgDomain) Up(ctx context.Context, db *bun.DB) error {
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, sqlschema.TableName("org_domains"))
if err != nil {
return err
}
sqls := [][]byte{}
dropSQLs := migration.sqlschema.Operator().DropConstraint(table, uniqueConstraints, &sqlschema.UniqueConstraint{ColumnNames: []sqlschema.ColumnName{"name"}})
sqls = append(sqls, dropSQLs...)
indexSQLs := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "org_domains", ColumnNames: []sqlschema.ColumnName{"name", "org_id"}})
sqls = append(sqls, indexSQLs...)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *updateOrgDomain) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,75 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addFactorIndexes struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewAddFactorIndexesFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_factor_indexes"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newAddFactorIndexes(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newAddFactorIndexes(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &addFactorIndexes{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *addFactorIndexes) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addFactorIndexes) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
sqls := [][]byte{}
indexSQLs := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "factor_password", ColumnNames: []sqlschema.ColumnName{"user_id"}})
sqls = append(sqls, indexSQLs...)
indexSQLs = migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "reset_password_token", ColumnNames: []sqlschema.ColumnName{"password_id"}})
sqls = append(sqls, indexSQLs...)
indexSQLs = migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "reset_password_token", ColumnNames: []sqlschema.ColumnName{"token"}})
sqls = append(sqls, indexSQLs...)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *addFactorIndexes) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,302 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"encoding/json"
"log/slog"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/transition"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type queryBuilderV5Migration struct {
store sqlstore.SQLStore
telemetryStore telemetrystore.TelemetryStore
logger *slog.Logger
}
func NewQueryBuilderV5MigrationFactory(
store sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(
factory.MustNewName("query_builder_v5_migration"),
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return newQueryBuilderV5Migration(ctx, c, store, telemetryStore, ps.Logger)
})
}
func newQueryBuilderV5Migration(
_ context.Context,
_ Config, store sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
logger *slog.Logger,
) (SQLMigration, error) {
return &queryBuilderV5Migration{store: store, telemetryStore: telemetryStore, logger: logger}, nil
}
func (migration *queryBuilderV5Migration) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *queryBuilderV5Migration) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT tagKey
FROM signoz_traces.distributed_span_attributes_keys
WHERE tagType IN ('tag', 'resource')
GROUP BY tagKey
HAVING COUNT(DISTINCT tagType) > 1
ORDER BY tagKey
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *queryBuilderV5Migration) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT name
FROM (
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
INTERSECT
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
)
ORDER BY name
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *queryBuilderV5Migration) Up(ctx context.Context, db *bun.DB) error {
// fetch keys that have both attribute and resource attribute types
logsKeys, err := migration.getLogDuplicateKeys(ctx)
if err != nil {
return err
}
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
if err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
if err := migration.migrateDashboards(ctx, tx, logsKeys, tracesKeys); err != nil {
return err
}
if err := migration.migrateSavedViews(ctx, tx, logsKeys, tracesKeys); err != nil {
return err
}
if err := migration.migrateRules(ctx, tx, logsKeys, tracesKeys); err != nil {
return err
}
return tx.Commit()
}
func (migration *queryBuilderV5Migration) Down(ctx context.Context, db *bun.DB) error {
// this migration is not reversible as we're transforming the structure
return nil
}
func (migration *queryBuilderV5Migration) migrateDashboards(
ctx context.Context,
tx bun.Tx,
logsKeys []string,
tracesKeys []string,
) error {
var dashboards []struct {
ID string `bun:"id"`
Data map[string]any `bun:"data"`
}
err := tx.NewSelect().
Table("dashboard").
Column("id", "data").
Scan(ctx, &dashboards)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
dashboardMigrator := transition.NewDashboardMigrateV5(migration.logger, logsKeys, tracesKeys)
for _, dashboard := range dashboards {
updated := dashboardMigrator.Migrate(ctx, dashboard.Data)
if updated {
dataJSON, err := json.Marshal(dashboard.Data)
if err != nil {
return err
}
_, err = tx.NewUpdate().
Table("dashboard").
Set("data = ?", string(dataJSON)).
Where("id = ?", dashboard.ID).
Exec(ctx)
if err != nil {
return err
}
}
}
return nil
}
func (migration *queryBuilderV5Migration) migrateSavedViews(
ctx context.Context,
tx bun.Tx,
logsKeys []string,
tracesKeys []string,
) error {
var savedViews []struct {
ID string `bun:"id"`
Data string `bun:"data"`
}
err := tx.NewSelect().
Table("saved_views").
Column("id", "data").
Scan(ctx, &savedViews)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
savedViewsMigrator := transition.NewSavedViewMigrateV5(migration.logger, logsKeys, tracesKeys)
for _, savedView := range savedViews {
var data map[string]any
if err := json.Unmarshal([]byte(savedView.Data), &data); err != nil {
continue // invalid JSON
}
updated := savedViewsMigrator.Migrate(ctx, data)
if updated {
dataJSON, err := json.Marshal(data)
if err != nil {
return err
}
_, err = tx.NewUpdate().
Table("saved_views").
Set("data = ?", string(dataJSON)).
Where("id = ?", savedView.ID).
Exec(ctx)
if err != nil {
return err
}
}
}
return nil
}
func (migration *queryBuilderV5Migration) migrateRules(
ctx context.Context,
tx bun.Tx,
logsKeys []string,
tracesKeys []string,
) error {
// Fetch all rules
var rules []struct {
ID string `bun:"id"`
Data map[string]any `bun:"data"`
}
err := tx.NewSelect().
Table("rule").
Column("id", "data").
Scan(ctx, &rules)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
for _, rule := range rules {
migration.logger.InfoContext(ctx, "migrating rule", "rule_id", rule.ID)
updated := alertsMigrator.Migrate(ctx, rule.Data)
if updated {
dataJSON, err := json.Marshal(rule.Data)
if err != nil {
return err
}
_, err = tx.NewUpdate().
Table("rule").
Set("data = ?", string(dataJSON)).
Where("id = ?", rule.ID).
Exec(ctx)
if err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,137 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addMeterQuickFilters struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewAddMeterQuickFiltersFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_meter_quick_filters"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return newAddMeterQuickFilters(ctx, providerSettings, config, sqlstore, sqlschema)
})
}
func newAddMeterQuickFilters(_ context.Context, _ factory.ProviderSettings, _ Config, sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) (SQLMigration, error) {
return &addMeterQuickFilters{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
}
func (migration *addMeterQuickFilters) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addMeterQuickFilters) Up(ctx context.Context, db *bun.DB) error {
meterFilters := []map[string]interface{}{
{"key": "deployment.environment", "dataType": "float64", "type": "Sum"},
{"key": "service.name", "dataType": "float64", "type": "Sum"},
{"key": "host.name", "dataType": "float64", "type": "Sum"},
}
meterJSON, err := json.Marshal(meterFilters)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to marshal meter filters")
}
type signal struct {
valuer.String
}
type identifiable struct {
ID valuer.UUID `json:"id" bun:"id,pk,type:text"`
}
type timeAuditable struct {
CreatedAt time.Time `bun:"created_at" json:"createdAt"`
UpdatedAt time.Time `bun:"updated_at" json:"updatedAt"`
}
type quickFilterType struct {
bun.BaseModel `bun:"table:quick_filter"`
identifiable
OrgID valuer.UUID `bun:"org_id,type:text,notnull"`
Filter string `bun:"filter,type:text,notnull"`
Signal signal `bun:"signal,type:text,notnull"`
timeAuditable
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
var orgIDs []string
err = tx.NewSelect().
Table("organizations").
Column("id").
Scan(ctx, &orgIDs)
if err != nil && err != sql.ErrNoRows {
return err
}
var meterFiltersToInsert []quickFilterType
for _, orgIDStr := range orgIDs {
orgID, err := valuer.NewUUID(orgIDStr)
if err != nil {
return err
}
meterFiltersToInsert = append(meterFiltersToInsert, quickFilterType{
identifiable: identifiable{
ID: valuer.GenerateUUID(),
},
OrgID: orgID,
Filter: string(meterJSON),
Signal: signal{valuer.NewString("meter")},
timeAuditable: timeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
})
}
if len(meterFiltersToInsert) > 0 {
_, err = tx.NewInsert().
Model(&meterFiltersToInsert).
On("CONFLICT (org_id, signal) DO UPDATE").
Set("filter = EXCLUDED.filter, updated_at = EXCLUDED.updated_at").
Exec(ctx)
if err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *addMeterQuickFilters) Down(ctx context.Context, db *bun.DB) error {
return nil
}

Some files were not shown because too many files have changed in this diff Show More