diff --git a/scnserver/TODO.md b/scnserver/TODO.md index 129cc43..c21348b 100644 --- a/scnserver/TODO.md +++ b/scnserver/TODO.md @@ -12,11 +12,6 @@ - exerr.New | exerr.Wrap - - Properly handle UNREGISTERED firebase error (remove token from client?) - WRN logic/application.go:284 > FCM Delivery failed error="FCM-Request returned 404: - { \"error\": {\n \"code\": 404,\n \"message\": \"Requested entity was not found.\",\n \"status\": \"NOT_FOUND\",\n \"details\": [\n {\n \"@type\": \"type.googleapis.com/google.firebase.fcm.v1.FcmError\",\n \"errorCode\": \"UNREGISTERED\"\n }\n ]\n }\n}\n" - ClientID=CLNGOSVIaCnm5cQmCI0pC5kR MessageID=MSG8w7NvVRm0OtJERnJlEe3C - #### UNSURE - (?) default-priority for channels diff --git a/scnserver/api/handler/apiUser.go b/scnserver/api/handler/apiUser.go index 76a30fa..0538093 100644 --- a/scnserver/api/handler/apiUser.go +++ b/scnserver/api/handler/apiUser.go @@ -80,7 +80,7 @@ func (h APIHandler) CreateUser(pctx ginext.PreContext) ginext.HTTPResponse { sendKey := h.app.GenerateRandomAuthKey() adminKey := h.app.GenerateRandomAuthKey() - err := h.database.ClearFCMTokens(ctx, b.FCMToken) + err := h.database.DeleteClientsByFCM(ctx, b.FCMToken) if err != nil { return ginresp.APIError(g, 500, apierr.DATABASE_ERROR, "Failed to clear existing fcm tokens", err) } diff --git a/scnserver/api/handler/compat.go b/scnserver/api/handler/compat.go index 8d4de49..24a7c46 100644 --- a/scnserver/api/handler/compat.go +++ b/scnserver/api/handler/compat.go @@ -189,7 +189,7 @@ func (h CompatHandler) Register(pctx ginext.PreContext) ginext.HTTPResponse { adminKey := h.app.GenerateRandomAuthKey() - err := h.database.ClearFCMTokens(ctx, *data.FCMToken) + err := h.database.DeleteClientsByFCM(ctx, *data.FCMToken) if err != nil { return ginresp.CompatAPIError(0, "Failed to clear existing fcm tokens") } diff --git a/scnserver/db/impl/logs/database.go b/scnserver/db/impl/logs/database.go index 56254fe..c9f1aa1 100644 --- a/scnserver/db/impl/logs/database.go +++ b/scnserver/db/impl/logs/database.go @@ -13,15 +13,21 @@ import ( "github.com/glebarez/go-sqlite" "github.com/jmoiron/sqlx" "github.com/rs/zerolog/log" + "gogs.mikescher.com/BlackForestBytes/goext/exerr" "gogs.mikescher.com/BlackForestBytes/goext/langext" "gogs.mikescher.com/BlackForestBytes/goext/sq" + "os" + "path/filepath" "time" ) type Database struct { - db sq.DB - pp *dbtools.DBPreprocessor - wal bool + db sq.DB + pp *dbtools.DBPreprocessor + wal bool + name string + schemaVersion int + schema map[int]schema.Def } func NewLogsDatabase(cfg server.Config) (*Database, error) { @@ -66,7 +72,14 @@ func NewLogsDatabase(cfg server.Config) (*Database, error) { qqdb.AddListener(pp) - scndb := &Database{db: qqdb, pp: pp, wal: conf.Journal == "WAL"} + scndb := &Database{ + db: qqdb, + pp: pp, + wal: conf.Journal == "WAL", + schemaVersion: schema.LogsSchemaVersion, + schema: schema.LogsSchema, + name: "logs", + } return scndb, nil } @@ -99,52 +112,49 @@ func (db *Database) Migrate(outerctx context.Context) error { return err } - if currschema == 0 { - schemastr := schema.LogsSchema[schema.LogsSchemaVersion].SQL - schemahash := schema.LogsSchema[schema.LogsSchemaVersion].Hash - - _, err = tx.Exec(tctx, schemastr, sq.PP{}) - if err != nil { - return err - } - - err = db.WriteMetaInt(tctx, "schema", int64(schema.LogsSchemaVersion)) - if err != nil { - return err - } - - err = db.WriteMetaString(tctx, "schema_hash", schemahash) - if err != nil { - return err - } - - ppReInit = true - - currschema = schema.LogsSchemaVersion + if currschema == db.schemaVersion { + log.Info().Msgf("Database [%s] is up-to-date (%d == %d)", db.name, currschema, db.schemaVersion) } - if currschema == 1 { - schemHashDB, err := sq.HashSqliteDatabase(tctx, tx) - if err != nil { - return err - } + for currschema < db.schemaVersion { - schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") - if err != nil { - return err - } + if currschema == 0 { + log.Info().Msgf("Migrate database (initialize) [%s] %d -> %d", db.name, currschema, db.schemaVersion) - if schemHashDB != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != schema.LogsSchema[currschema].Hash { - log.Debug().Str("schemHashDB", schemHashDB).Msg("Schema (logs db)") - log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (logs db)") - log.Debug().Str("schemaHashAsset", schema.LogsSchema[currschema].Hash).Msg("Schema (logs db)") - return errors.New("database schema does not match (logs db)") + schemastr := db.schema[1].SQL + schemahash := db.schema[1].Hash + + _, err = tx.Exec(tctx, schemastr, sq.PP{}) + if err != nil { + return err + } + + err = db.WriteMetaInt(tctx, "schema", int64(db.schemaVersion)) + if err != nil { + return err + } + + err = db.WriteMetaString(tctx, "schema_hash", schemahash) + if err != nil { + return err + } + + ppReInit = true + + currschema = db.schemaVersion } else { - log.Debug().Str("schemHash", schemHashDB).Msg("Verified Schema consistency (logs db)") + log.Info().Msgf("Migrate database [%s] %d -> %d", db.name, currschema, currschema+1) + + err = db.migrateSingle(tctx, tx, currschema, currschema+1) + if err != nil { + return err + } + + currschema = currschema + 1 } } - if currschema != schema.LogsSchemaVersion { + if currschema != db.schemaVersion { return errors.New(fmt.Sprintf("Unknown DB schema: %d", currschema)) } @@ -164,6 +174,100 @@ func (db *Database) Migrate(outerctx context.Context) error { return nil } +//goland:noinspection SqlConstantCondition,SqlWithoutWhere +func (db *Database) migrateSingle(tctx *simplectx.SimpleContext, tx sq.Tx, schemaFrom int, schemaTo int) error { + + // ADD MIGRATIONS HERE ... + + return exerr.New(exerr.TypeInternal, fmt.Sprintf("missing %s migration from %d to %d", db.name, schemaFrom, schemaTo)).Build() +} + +func (db *Database) migrateBySQL(tctx *simplectx.SimpleContext, tx sq.Tx, stmts string, currSchemaVers int, resultSchemVers int, resultHash string, post func(tctx *simplectx.SimpleContext, tx sq.Tx) error) error { + + schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") + if err != nil { + return err + } + + schemHashDBBefore, err := sq.HashSqliteDatabase(tctx, tx) + if err != nil { + return err + } + + if schemHashDBBefore != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != db.schema[currSchemaVers].Hash { + log.Debug().Str("schemHashDB", schemHashDBBefore).Msg("Schema (primary db)") + log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (primary db)") + log.Debug().Str("schemaHashAsset", db.schema[currSchemaVers].Hash).Msg("Schema (primary db)") + return errors.New("database schema does not match (primary db)") + } else { + log.Debug().Str("schemHash", schemHashDBBefore).Msg("Verified Schema consistency (primary db)") + } + + log.Info().Msgf("Upgrade schema from %d -> %d", currSchemaVers, resultSchemVers) + + _, err = tx.Exec(tctx, stmts, sq.PP{}) + if err != nil { + return err + } + + schemHashDBAfter, err := sq.HashSqliteDatabase(tctx, tx) + if err != nil { + return err + } + + if schemHashDBAfter != resultHash { + + schemaDBStr := langext.Must(createSqliteDatabaseSchemaStringFromSQL(tctx, db.schema[resultSchemVers].SQL)) + resultDBStr := langext.Must(sq.CreateSqliteDatabaseSchemaString(tctx, tx)) + + fmt.Printf("========================================= SQL SCHEMA-DUMP STR (CORRECT | FROM COMPILED SCHEMA):%s\n=========================================\n\n", schemaDBStr) + fmt.Printf("========================================= SQL SCHEMA-DUMP STR (CURRNET | AFTER MIGRATION):%s\n=========================================\n\n", resultDBStr) + + return fmt.Errorf("database [%s] schema does not match after [%d -> %d] migration (expected: %s | actual: %s)", db.name, currSchemaVers, resultSchemVers, resultHash, schemHashDBBefore) + } + + err = db.WriteMetaInt(tctx, "schema", int64(resultSchemVers)) + if err != nil { + return err + } + + err = db.WriteMetaString(tctx, "schema_hash", resultHash) + if err != nil { + return err + } + + log.Info().Msgf("Upgrade schema from %d -> %d succesfully", currSchemaVers, resultSchemVers) + + return nil +} + +func createSqliteDatabaseSchemaStringFromSQL(ctx context.Context, schemaStr string) (string, error) { + dbdir := os.TempDir() + dbfile1 := filepath.Join(dbdir, langext.MustHexUUID()+".sqlite3") + defer func() { _ = os.Remove(dbfile1) }() + + err := os.MkdirAll(dbdir, os.ModePerm) + if err != nil { + return "", err + } + + url := fmt.Sprintf("file:%s?_pragma=journal_mode(%s)&_pragma=timeout(%d)&_pragma=foreign_keys(%s)&_pragma=busy_timeout(%d)", dbfile1, "DELETE", 1000, "true", 1000) + + xdb, err := sqlx.Open("sqlite", url) + if err != nil { + return "", err + } + + db := sq.NewDB(xdb, sq.DBOptions{}) + + _, err = db.Exec(ctx, schemaStr, sq.PP{}) + if err != nil { + return "", err + } + + return sq.CreateSqliteDatabaseSchemaString(ctx, db) +} + func (db *Database) Ping(ctx context.Context) error { return db.db.Ping(ctx) } diff --git a/scnserver/db/impl/primary/clients.go b/scnserver/db/impl/primary/clients.go index 13ee113..468cb84 100644 --- a/scnserver/db/impl/primary/clients.go +++ b/scnserver/db/impl/primary/clients.go @@ -31,27 +31,13 @@ func (db *Database) CreateClient(ctx db.TxContext, userid models.UserID, ctype m return entity, nil } -func (db *Database) ClearFCMTokens(ctx db.TxContext, fcmtoken string) error { - tx, err := ctx.GetOrCreateTransaction(db) - if err != nil { - return err - } - - _, err = tx.Exec(ctx, "DELETE FROM clients WHERE fcm_token = :fcm", sq.PP{"fcm": fcmtoken}) - if err != nil { - return err - } - - return nil -} - func (db *Database) ListClients(ctx db.TxContext, userid models.UserID) ([]models.Client, error) { tx, err := ctx.GetOrCreateTransaction(db) if err != nil { return nil, err } - return sq.QueryAll[models.Client](ctx, tx, "SELECT * FROM clients WHERE user_id = :uid ORDER BY clients.timestamp_created DESC, clients.client_id ASC", sq.PP{"uid": userid}, sq.SModeExtended, sq.Safe) + return sq.QueryAll[models.Client](ctx, tx, "SELECT * FROM clients WHERE deleted=0 AND user_id = :uid ORDER BY clients.timestamp_created DESC, clients.client_id ASC", sq.PP{"uid": userid}, sq.SModeExtended, sq.Safe) } func (db *Database) GetClient(ctx db.TxContext, userid models.UserID, clientid models.ClientID) (models.Client, error) { @@ -60,7 +46,7 @@ func (db *Database) GetClient(ctx db.TxContext, userid models.UserID, clientid m return models.Client{}, err } - return sq.QuerySingle[models.Client](ctx, tx, "SELECT * FROM clients WHERE user_id = :uid AND client_id = :cid LIMIT 1", sq.PP{ + return sq.QuerySingle[models.Client](ctx, tx, "SELECT * FROM clients WHERE deleted=0 AND user_id = :uid AND client_id = :cid LIMIT 1", sq.PP{ "uid": userid, "cid": clientid, }, sq.SModeExtended, sq.Safe) @@ -72,7 +58,7 @@ func (db *Database) DeleteClient(ctx db.TxContext, clientid models.ClientID) err return err } - _, err = tx.Exec(ctx, "DELETE FROM clients WHERE client_id = :cid", sq.PP{"cid": clientid}) + _, err = tx.Exec(ctx, "UPDATE clients SET deleted=1 WHERE deleted=0 AND client_id = :cid", sq.PP{"cid": clientid}) if err != nil { return err } @@ -86,7 +72,7 @@ func (db *Database) DeleteClientsByFCM(ctx db.TxContext, fcmtoken string) error return err } - _, err = tx.Exec(ctx, "DELETE FROM clients WHERE fcm_token = :fcm", sq.PP{"fcm": fcmtoken}) + _, err = tx.Exec(ctx, "UPDATE clients SET deleted=1 WHERE deleted=0 AND fcm_token = :fcm", sq.PP{"fcm": fcmtoken}) if err != nil { return err } @@ -100,7 +86,7 @@ func (db *Database) UpdateClientFCMToken(ctx db.TxContext, clientid models.Clien return err } - _, err = tx.Exec(ctx, "UPDATE clients SET fcm_token = :vvv WHERE client_id = :cid", sq.PP{ + _, err = tx.Exec(ctx, "UPDATE clients SET fcm_token = :vvv WHERE deleted=0 AND client_id = :cid", sq.PP{ "vvv": fcmtoken, "cid": clientid, }) @@ -117,7 +103,7 @@ func (db *Database) UpdateClientAgentModel(ctx db.TxContext, clientid models.Cli return err } - _, err = tx.Exec(ctx, "UPDATE clients SET agent_model = :vvv WHERE client_id = :cid", sq.PP{ + _, err = tx.Exec(ctx, "UPDATE clients SET agent_model = :vvv WHERE deleted=0 AND client_id = :cid", sq.PP{ "vvv": agentModel, "cid": clientid, }) @@ -134,7 +120,7 @@ func (db *Database) UpdateClientAgentVersion(ctx db.TxContext, clientid models.C return err } - _, err = tx.Exec(ctx, "UPDATE clients SET agent_version = :vvv WHERE client_id = :cid", sq.PP{ + _, err = tx.Exec(ctx, "UPDATE clients SET agent_version = :vvv WHERE deleted=0 AND client_id = :cid", sq.PP{ "vvv": agentVersion, "cid": clientid, }) @@ -151,7 +137,7 @@ func (db *Database) UpdateClientDescriptionName(ctx db.TxContext, clientid model return err } - _, err = tx.Exec(ctx, "UPDATE clients SET name = :vvv WHERE client_id = :cid", sq.PP{ + _, err = tx.Exec(ctx, "UPDATE clients SET name = :vvv WHERE deleted=0 AND client_id = :cid", sq.PP{ "vvv": descriptionName, "cid": clientid, }) diff --git a/scnserver/db/impl/primary/database.go b/scnserver/db/impl/primary/database.go index a03a96a..a928e72 100644 --- a/scnserver/db/impl/primary/database.go +++ b/scnserver/db/impl/primary/database.go @@ -13,15 +13,21 @@ import ( "github.com/glebarez/go-sqlite" "github.com/jmoiron/sqlx" "github.com/rs/zerolog/log" + "gogs.mikescher.com/BlackForestBytes/goext/exerr" "gogs.mikescher.com/BlackForestBytes/goext/langext" "gogs.mikescher.com/BlackForestBytes/goext/sq" + "os" + "path/filepath" "time" ) type Database struct { - db sq.DB - pp *dbtools.DBPreprocessor - wal bool + db sq.DB + pp *dbtools.DBPreprocessor + wal bool + name string + schemaVersion int + schema map[int]schema.Def } func NewPrimaryDatabase(cfg server.Config) (*Database, error) { @@ -66,7 +72,14 @@ func NewPrimaryDatabase(cfg server.Config) (*Database, error) { qqdb.AddListener(pp) - scndb := &Database{db: qqdb, pp: pp, wal: conf.Journal == "WAL"} + scndb := &Database{ + db: qqdb, + pp: pp, + wal: conf.Journal == "WAL", + schemaVersion: schema.PrimarySchemaVersion, + schema: schema.PrimarySchema, + name: "primary", + } return scndb, nil } @@ -99,196 +112,49 @@ func (db *Database) Migrate(outerctx context.Context) error { return err } - if currschema == 0 { - schemastr := schema.PrimarySchema[schema.PrimarySchemaVersion].SQL - schemahash := schema.PrimarySchema[schema.PrimarySchemaVersion].Hash - - _, err = tx.Exec(tctx, schemastr, sq.PP{}) - if err != nil { - return err - } - - err = db.WriteMetaInt(tctx, "schema", int64(schema.PrimarySchemaVersion)) - if err != nil { - return err - } - - err = db.WriteMetaString(tctx, "schema_hash", schemahash) - if err != nil { - return err - } - - ppReInit = true - - currschema = schema.PrimarySchemaVersion + if currschema == db.schemaVersion { + log.Info().Msgf("Database [%s] is up-to-date (%d == %d)", db.name, currschema, db.schemaVersion) } - if currschema == 1 { - return errors.New("cannot autom. upgrade schema 1") - } + for currschema < db.schemaVersion { - if currschema == 2 { - return errors.New("cannot autom. upgrade schema 2") - } + if currschema == 0 { + log.Info().Msgf("Migrate database (initialize) [%s] %d -> %d", db.name, currschema, db.schemaVersion) - if currschema == 3 { + schemastr := db.schema[1].SQL + schemahash := db.schema[1].Hash - schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") - if err != nil { - return err - } + _, err = tx.Exec(tctx, schemastr, sq.PP{}) + if err != nil { + return err + } - schemHashDB, err := sq.HashSqliteDatabase(tctx, tx) - if err != nil { - return err - } + err = db.WriteMetaInt(tctx, "schema", int64(db.schemaVersion)) + if err != nil { + return err + } - if schemHashDB != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != schema.PrimarySchema[currschema].Hash { - log.Debug().Str("schemHashDB", schemHashDB).Msg("Schema (primary db)") - log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (primary db)") - log.Debug().Str("schemaHashAsset", schema.PrimarySchema[currschema].Hash).Msg("Schema (primary db)") - return errors.New("database schema does not match (primary db)") + err = db.WriteMetaString(tctx, "schema_hash", schemahash) + if err != nil { + return err + } + + ppReInit = true + + currschema = db.schemaVersion } else { - log.Debug().Str("schemHash", schemHashDB).Msg("Verified Schema consistency (primary db)") - } + log.Info().Msgf("Migrate database [%s] %d -> %d", db.name, currschema, currschema+1) - log.Info().Int("currschema", currschema).Msg("Upgrade schema from 3 -> 4") + err = db.migrateSingle(tctx, tx, currschema, currschema+1) + if err != nil { + return err + } - _, err = tx.Exec(tctx, schema.PrimaryMigration_3_4, sq.PP{}) - if err != nil { - return err - } - - currschema = 4 - - err = db.WriteMetaInt(tctx, "schema", int64(currschema)) - if err != nil { - return err - } - - err = db.WriteMetaString(tctx, "schema_hash", schema.PrimarySchema[currschema].Hash) - if err != nil { - return err - } - - log.Info().Int("currschema", currschema).Msg("Upgrade schema from 3 -> 4 succesfully") - - ppReInit = true - } - - if currschema == 4 { - - schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") - if err != nil { - return err - } - - schemHashDB, err := sq.HashSqliteDatabase(tctx, tx) - if err != nil { - return err - } - - if schemHashDB != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != schema.PrimarySchema[currschema].Hash { - log.Debug().Str("schemHashDB", schemHashDB).Msg("Schema (primary db)") - log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (primary db)") - log.Debug().Str("schemaHashAsset", schema.PrimarySchema[currschema].Hash).Msg("Schema (primary db)") - return errors.New("database schema does not match (primary db)") - } else { - log.Debug().Str("schemHash", schemHashDB).Msg("Verified Schema consistency (primary db)") - } - - log.Info().Int("currschema", currschema).Msg("Upgrade schema from 4 -> 5") - - _, err = tx.Exec(tctx, schema.PrimaryMigration_4_5, sq.PP{}) - if err != nil { - return err - } - - currschema = 5 - - err = db.WriteMetaInt(tctx, "schema", int64(currschema)) - if err != nil { - return err - } - - err = db.WriteMetaString(tctx, "schema_hash", schema.PrimarySchema[currschema].Hash) - if err != nil { - return err - } - - log.Info().Int("currschema", currschema).Msg("Upgrade schema from 4 -> 5 succesfully") - - ppReInit = true - } - - if currschema == 5 { - - schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") - if err != nil { - return err - } - - schemHashDB, err := sq.HashSqliteDatabase(tctx, tx) - if err != nil { - return err - } - - if schemHashDB != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != schema.PrimarySchema[currschema].Hash { - log.Debug().Str("schemHashDB", schemHashDB).Msg("Schema (primary db)") - log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (primary db)") - log.Debug().Str("schemaHashAsset", schema.PrimarySchema[currschema].Hash).Msg("Schema (primary db)") - return errors.New("database schema does not match (primary db)") - } else { - log.Debug().Str("schemHash", schemHashDB).Msg("Verified Schema consistency (primary db)") - } - - log.Info().Int("currschema", currschema).Msg("Upgrade schema from 5 -> 6") - - _, err = tx.Exec(tctx, schema.PrimaryMigration_5_6, sq.PP{}) - if err != nil { - return err - } - - currschema = 6 - - err = db.WriteMetaInt(tctx, "schema", int64(currschema)) - if err != nil { - return err - } - - err = db.WriteMetaString(tctx, "schema_hash", schema.PrimarySchema[currschema].Hash) - if err != nil { - return err - } - - log.Info().Int("currschema", currschema).Msg("Upgrade schema from 5 -> 6 succesfully") - - ppReInit = true - } - - if currschema == 6 { - - schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") - if err != nil { - return err - } - - schemHashDB, err := sq.HashSqliteDatabase(tctx, tx) - if err != nil { - return err - } - - if schemHashDB != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != schema.PrimarySchema[currschema].Hash { - log.Debug().Str("schemHashDB", schemHashDB).Msg("Schema (primary db)") - log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (primary db)") - log.Debug().Str("schemaHashAsset", schema.PrimarySchema[currschema].Hash).Msg("Schema (primary db)") - return errors.New("database schema does not match (primary db)") - } else { - log.Debug().Str("schemHash", schemHashDB).Msg("Verified Schema consistency (primary db)") + currschema = currschema + 1 } } - if currschema != schema.PrimarySchemaVersion { + if currschema != db.schemaVersion { return errors.New(fmt.Sprintf("Unknown DB schema: %d", currschema)) } @@ -308,6 +174,114 @@ func (db *Database) Migrate(outerctx context.Context) error { return nil } +//goland:noinspection SqlConstantCondition,SqlWithoutWhere +func (db *Database) migrateSingle(tctx *simplectx.SimpleContext, tx sq.Tx, schemaFrom int, schemaTo int) error { + + if schemaFrom == 3 && schemaTo == 4 { + return db.migrateBySQL(tctx, tx, schema.PrimaryMigration_3_4, schemaFrom, schemaTo, db.schema[schemaTo].Hash, nil) + } + + if schemaFrom == 4 && schemaTo == 5 { + return db.migrateBySQL(tctx, tx, schema.PrimaryMigration_4_5, schemaFrom, schemaTo, db.schema[schemaTo].Hash, nil) + } + + if schemaFrom == 5 && schemaTo == 6 { + return db.migrateBySQL(tctx, tx, schema.PrimaryMigration_5_6, schemaFrom, schemaTo, db.schema[schemaTo].Hash, nil) + } + + if schemaFrom == 6 && schemaTo == 7 { + return db.migrateBySQL(tctx, tx, schema.PrimaryMigration_6_7, schemaFrom, schemaTo, db.schema[schemaTo].Hash, nil) + } + + return exerr.New(exerr.TypeInternal, fmt.Sprintf("missing %s migration from %d to %d", db.name, schemaFrom, schemaTo)).Build() +} + +func (db *Database) migrateBySQL(tctx *simplectx.SimpleContext, tx sq.Tx, stmts string, currSchemaVers int, resultSchemVers int, resultHash string, post func(tctx *simplectx.SimpleContext, tx sq.Tx) error) error { + + schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") + if err != nil { + return err + } + + schemHashDBBefore, err := sq.HashSqliteDatabase(tctx, tx) + if err != nil { + return err + } + + if schemHashDBBefore != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != db.schema[currSchemaVers].Hash { + log.Debug().Str("schemHashDB", schemHashDBBefore).Msg("Schema (primary db)") + log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (primary db)") + log.Debug().Str("schemaHashAsset", db.schema[currSchemaVers].Hash).Msg("Schema (primary db)") + return errors.New("database schema does not match (primary db)") + } else { + log.Debug().Str("schemHash", schemHashDBBefore).Msg("Verified Schema consistency (primary db)") + } + + log.Info().Msgf("Upgrade schema from %d -> %d", currSchemaVers, resultSchemVers) + + _, err = tx.Exec(tctx, stmts, sq.PP{}) + if err != nil { + return err + } + + schemHashDBAfter, err := sq.HashSqliteDatabase(tctx, tx) + if err != nil { + return err + } + + if schemHashDBAfter != resultHash { + + schemaDBStr := langext.Must(createSqliteDatabaseSchemaStringFromSQL(tctx, db.schema[resultSchemVers].SQL)) + resultDBStr := langext.Must(sq.CreateSqliteDatabaseSchemaString(tctx, tx)) + + fmt.Printf("========================================= SQL SCHEMA-DUMP STR (CORRECT | FROM COMPILED SCHEMA):%s\n=========================================\n\n", schemaDBStr) + fmt.Printf("========================================= SQL SCHEMA-DUMP STR (CURRNET | AFTER MIGRATION):%s\n=========================================\n\n", resultDBStr) + + return fmt.Errorf("database [%s] schema does not match after [%d -> %d] migration (expected: %s | actual: %s)", db.name, currSchemaVers, resultSchemVers, resultHash, schemHashDBBefore) + } + + err = db.WriteMetaInt(tctx, "schema", int64(resultSchemVers)) + if err != nil { + return err + } + + err = db.WriteMetaString(tctx, "schema_hash", resultHash) + if err != nil { + return err + } + + log.Info().Msgf("Upgrade schema from %d -> %d succesfully", currSchemaVers, resultSchemVers) + + return nil +} + +func createSqliteDatabaseSchemaStringFromSQL(ctx context.Context, schemaStr string) (string, error) { + dbdir := os.TempDir() + dbfile1 := filepath.Join(dbdir, langext.MustHexUUID()+".sqlite3") + defer func() { _ = os.Remove(dbfile1) }() + + err := os.MkdirAll(dbdir, os.ModePerm) + if err != nil { + return "", err + } + + url := fmt.Sprintf("file:%s?_pragma=journal_mode(%s)&_pragma=timeout(%d)&_pragma=foreign_keys(%s)&_pragma=busy_timeout(%d)", dbfile1, "DELETE", 1000, "true", 1000) + + xdb, err := sqlx.Open("sqlite", url) + if err != nil { + return "", err + } + + db := sq.NewDB(xdb, sq.DBOptions{}) + + _, err = db.Exec(ctx, schemaStr, sq.PP{}) + if err != nil { + return "", err + } + + return sq.CreateSqliteDatabaseSchemaString(ctx, db) +} + func (db *Database) Ping(ctx context.Context) error { return db.db.Ping(ctx) } diff --git a/scnserver/db/impl/requests/database.go b/scnserver/db/impl/requests/database.go index f554744..98e54e7 100644 --- a/scnserver/db/impl/requests/database.go +++ b/scnserver/db/impl/requests/database.go @@ -13,15 +13,21 @@ import ( "github.com/glebarez/go-sqlite" "github.com/jmoiron/sqlx" "github.com/rs/zerolog/log" + "gogs.mikescher.com/BlackForestBytes/goext/exerr" "gogs.mikescher.com/BlackForestBytes/goext/langext" "gogs.mikescher.com/BlackForestBytes/goext/sq" + "os" + "path/filepath" "time" ) type Database struct { - db sq.DB - pp *dbtools.DBPreprocessor - wal bool + db sq.DB + pp *dbtools.DBPreprocessor + wal bool + name string + schemaVersion int + schema map[int]schema.Def } func NewRequestsDatabase(cfg server.Config) (*Database, error) { @@ -66,7 +72,14 @@ func NewRequestsDatabase(cfg server.Config) (*Database, error) { qqdb.AddListener(pp) - scndb := &Database{db: qqdb, pp: pp, wal: conf.Journal == "WAL"} + scndb := &Database{ + db: qqdb, + pp: pp, + wal: conf.Journal == "WAL", + schemaVersion: schema.RequestsSchemaVersion, + schema: schema.RequestsSchema, + name: "requests", + } return scndb, nil } @@ -99,57 +112,49 @@ func (db *Database) Migrate(outerctx context.Context) error { return err } - if currschema == 0 { - schemastr := schema.RequestsSchema[schema.RequestsSchemaVersion].SQL - schemahash := schema.RequestsSchema[schema.RequestsSchemaVersion].Hash - - schemahash, err := sq.HashGoSqliteSchema(tctx, schemastr) - if err != nil { - return err - } - - _, err = tx.Exec(tctx, schemastr, sq.PP{}) - if err != nil { - return err - } - - err = db.WriteMetaInt(tctx, "schema", int64(schema.RequestsSchemaVersion)) - if err != nil { - return err - } - - err = db.WriteMetaString(tctx, "schema_hash", schemahash) - if err != nil { - return err - } - - ppReInit = true - - currschema = schema.LogsSchemaVersion + if currschema == db.schemaVersion { + log.Info().Msgf("Database [%s] is up-to-date (%d == %d)", db.name, currschema, db.schemaVersion) } - if currschema == 1 { - schemHashDB, err := sq.HashSqliteDatabase(tctx, tx) - if err != nil { - return err - } + for currschema < db.schemaVersion { - schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") - if err != nil { - return err - } + if currschema == 0 { + log.Info().Msgf("Migrate database (initialize) [%s] %d -> %d", db.name, currschema, db.schemaVersion) - if schemHashDB != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != schema.RequestsSchema[currschema].Hash { - log.Debug().Str("schemHashDB", schemHashDB).Msg("Schema (requests db)") - log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (requests db)") - log.Debug().Str("schemaHashAsset", schema.RequestsSchema[currschema].Hash).Msg("Schema (requests db)") - return errors.New("database schema does not match (requests db)") + schemastr := db.schema[1].SQL + schemahash := db.schema[1].Hash + + _, err = tx.Exec(tctx, schemastr, sq.PP{}) + if err != nil { + return err + } + + err = db.WriteMetaInt(tctx, "schema", int64(db.schemaVersion)) + if err != nil { + return err + } + + err = db.WriteMetaString(tctx, "schema_hash", schemahash) + if err != nil { + return err + } + + ppReInit = true + + currschema = db.schemaVersion } else { - log.Debug().Str("schemHash", schemHashDB).Msg("Verified Schema consistency (requests db)") + log.Info().Msgf("Migrate database [%s] %d -> %d", db.name, currschema, currschema+1) + + err = db.migrateSingle(tctx, tx, currschema, currschema+1) + if err != nil { + return err + } + + currschema = currschema + 1 } } - if currschema != schema.RequestsSchemaVersion { + if currschema != db.schemaVersion { return errors.New(fmt.Sprintf("Unknown DB schema: %d", currschema)) } @@ -169,6 +174,100 @@ func (db *Database) Migrate(outerctx context.Context) error { return nil } +//goland:noinspection SqlConstantCondition,SqlWithoutWhere +func (db *Database) migrateSingle(tctx *simplectx.SimpleContext, tx sq.Tx, schemaFrom int, schemaTo int) error { + + // ADD MIGRATIONS HERE ... + + return exerr.New(exerr.TypeInternal, fmt.Sprintf("missing %s migration from %d to %d", db.name, schemaFrom, schemaTo)).Build() +} + +func (db *Database) migrateBySQL(tctx *simplectx.SimpleContext, tx sq.Tx, stmts string, currSchemaVers int, resultSchemVers int, resultHash string, post func(tctx *simplectx.SimpleContext, tx sq.Tx) error) error { + + schemaHashMeta, err := db.ReadMetaString(tctx, "schema_hash") + if err != nil { + return err + } + + schemHashDBBefore, err := sq.HashSqliteDatabase(tctx, tx) + if err != nil { + return err + } + + if schemHashDBBefore != langext.Coalesce(schemaHashMeta, "") || langext.Coalesce(schemaHashMeta, "") != db.schema[currSchemaVers].Hash { + log.Debug().Str("schemHashDB", schemHashDBBefore).Msg("Schema (primary db)") + log.Debug().Str("schemaHashMeta", langext.Coalesce(schemaHashMeta, "")).Msg("Schema (primary db)") + log.Debug().Str("schemaHashAsset", db.schema[currSchemaVers].Hash).Msg("Schema (primary db)") + return errors.New("database schema does not match (primary db)") + } else { + log.Debug().Str("schemHash", schemHashDBBefore).Msg("Verified Schema consistency (primary db)") + } + + log.Info().Msgf("Upgrade schema from %d -> %d", currSchemaVers, resultSchemVers) + + _, err = tx.Exec(tctx, stmts, sq.PP{}) + if err != nil { + return err + } + + schemHashDBAfter, err := sq.HashSqliteDatabase(tctx, tx) + if err != nil { + return err + } + + if schemHashDBAfter != resultHash { + + schemaDBStr := langext.Must(createSqliteDatabaseSchemaStringFromSQL(tctx, db.schema[resultSchemVers].SQL)) + resultDBStr := langext.Must(sq.CreateSqliteDatabaseSchemaString(tctx, tx)) + + fmt.Printf("========================================= SQL SCHEMA-DUMP STR (CORRECT | FROM COMPILED SCHEMA):%s\n=========================================\n\n", schemaDBStr) + fmt.Printf("========================================= SQL SCHEMA-DUMP STR (CURRNET | AFTER MIGRATION):%s\n=========================================\n\n", resultDBStr) + + return fmt.Errorf("database [%s] schema does not match after [%d -> %d] migration (expected: %s | actual: %s)", db.name, currSchemaVers, resultSchemVers, resultHash, schemHashDBBefore) + } + + err = db.WriteMetaInt(tctx, "schema", int64(resultSchemVers)) + if err != nil { + return err + } + + err = db.WriteMetaString(tctx, "schema_hash", resultHash) + if err != nil { + return err + } + + log.Info().Msgf("Upgrade schema from %d -> %d succesfully", currSchemaVers, resultSchemVers) + + return nil +} + +func createSqliteDatabaseSchemaStringFromSQL(ctx context.Context, schemaStr string) (string, error) { + dbdir := os.TempDir() + dbfile1 := filepath.Join(dbdir, langext.MustHexUUID()+".sqlite3") + defer func() { _ = os.Remove(dbfile1) }() + + err := os.MkdirAll(dbdir, os.ModePerm) + if err != nil { + return "", err + } + + url := fmt.Sprintf("file:%s?_pragma=journal_mode(%s)&_pragma=timeout(%d)&_pragma=foreign_keys(%s)&_pragma=busy_timeout(%d)", dbfile1, "DELETE", 1000, "true", 1000) + + xdb, err := sqlx.Open("sqlite", url) + if err != nil { + return "", err + } + + db := sq.NewDB(xdb, sq.DBOptions{}) + + _, err = db.Exec(ctx, schemaStr, sq.PP{}) + if err != nil { + return "", err + } + + return sq.CreateSqliteDatabaseSchemaString(ctx, db) +} + func (db *Database) Ping(ctx context.Context) error { return db.db.Ping(ctx) } diff --git a/scnserver/db/schema/assets.go b/scnserver/db/schema/assets.go index 7c23909..4572ad4 100644 --- a/scnserver/db/schema/assets.go +++ b/scnserver/db/schema/assets.go @@ -25,6 +25,9 @@ var primarySchema5 string //go:embed primary_6.ddl var primarySchema6 string +//go:embed primary_7.ddl +var primarySchema7 string + //go:embed primary_migration_3_4.ddl var PrimaryMigration_3_4 string @@ -34,6 +37,9 @@ var PrimaryMigration_4_5 string //go:embed primary_migration_5_6.ddl var PrimaryMigration_5_6 string +//go:embed primary_migration_6_7.ddl +var PrimaryMigration_6_7 string + //go:embed requests_1.ddl var requestsSchema1 string @@ -48,9 +54,10 @@ var PrimarySchema = map[int]Def{ 4: {primarySchema4, "cb022156ab0e7aea39dd0c985428c43cae7d60e41ca8e9e5a84c774b3019d2ca"}, 5: {primarySchema5, "9d6217ba4a3503cfe090f72569367f95a413bb14e9effe49ffeabbf255bce8dd"}, 6: {primarySchema6, "8e83d20bcd008082713f248ae8cd558335a37a37ce90bd8c86e782da640ee160"}, + 7: {primarySchema7, "90d8dbc460afe025f9b74cda5c16bb8e58b178df275223bd2531907a8d8c36c3"}, } -var PrimarySchemaVersion = 6 +var PrimarySchemaVersion = 7 var RequestsSchema = map[int]Def{ 0: {"", ""}, diff --git a/scnserver/db/schema/primary_7.ddl b/scnserver/db/schema/primary_7.ddl new file mode 100644 index 0000000..4ca237a --- /dev/null +++ b/scnserver/db/schema/primary_7.ddl @@ -0,0 +1,238 @@ +CREATE TABLE users +( + user_id TEXT NOT NULL, + + username TEXT NULL DEFAULT NULL, + + timestamp_created INTEGER NOT NULL, + timestamp_lastread INTEGER NULL DEFAULT NULL, + timestamp_lastsent INTEGER NULL DEFAULT NULL, + + messages_sent INTEGER NOT NULL DEFAULT '0', + + quota_used INTEGER NOT NULL DEFAULT '0', + quota_used_day TEXT NULL DEFAULT NULL, + + is_pro INTEGER CHECK(is_pro IN (0, 1)) NOT NULL DEFAULT 0, + pro_token TEXT NULL DEFAULT NULL, + + PRIMARY KEY (user_id) +) STRICT; +CREATE UNIQUE INDEX "idx_users_protoken" ON users (pro_token) WHERE pro_token IS NOT NULL; + + +CREATE TABLE keytokens +( + keytoken_id TEXT NOT NULL, + + timestamp_created INTEGER NOT NULL, + timestamp_lastused INTEGER NULL DEFAULT NULL, + + name TEXT NOT NULL, + + owner_user_id TEXT NOT NULL, + + all_channels INTEGER CHECK(all_channels IN (0, 1)) NOT NULL, + channels TEXT NOT NULL, + token TEXT NOT NULL, + permissions TEXT NOT NULL, + + messages_sent INTEGER NOT NULL DEFAULT '0', + + PRIMARY KEY (keytoken_id) +) STRICT; +CREATE UNIQUE INDEX "idx_keytokens_token" ON keytokens (token); + + +CREATE TABLE clients +( + client_id TEXT NOT NULL, + + user_id TEXT NOT NULL, + type TEXT CHECK(type IN ('ANDROID','IOS','LINUX','MACOS','WINDOWS')) NOT NULL, + fcm_token TEXT NOT NULL, + name TEXT NULL, + + timestamp_created INTEGER NOT NULL, + + agent_model TEXT NOT NULL, + agent_version TEXT NOT NULL, + + + deleted INTEGER CHECK(deleted IN (0, 1)) NOT NULL DEFAULT '0', + + PRIMARY KEY (client_id) +) STRICT; +CREATE INDEX "idx_clients_userid" ON clients (user_id); +CREATE INDEX "idx_clients_deleted" ON clients (deleted); +CREATE UNIQUE INDEX "idx_clients_fcmtoken" ON clients (fcm_token); + + +CREATE TABLE channels +( + channel_id TEXT NOT NULL, + + owner_user_id TEXT NOT NULL, + + internal_name TEXT NOT NULL, + display_name TEXT NOT NULL, + description_name TEXT NULL, + + subscribe_key TEXT NOT NULL, + + timestamp_created INTEGER NOT NULL, + timestamp_lastsent INTEGER NULL DEFAULT NULL, + + messages_sent INTEGER NOT NULL DEFAULT '0', + + PRIMARY KEY (channel_id) +) STRICT; +CREATE UNIQUE INDEX "idx_channels_identity" ON channels (owner_user_id, internal_name); + +CREATE TABLE subscriptions +( + subscription_id TEXT NOT NULL, + + subscriber_user_id TEXT NOT NULL, + channel_owner_user_id TEXT NOT NULL, + channel_internal_name TEXT NOT NULL, + channel_id TEXT NOT NULL, + + timestamp_created INTEGER NOT NULL, + + confirmed INTEGER CHECK(confirmed IN (0, 1)) NOT NULL, + + PRIMARY KEY (subscription_id) +) STRICT; +CREATE UNIQUE INDEX "idx_subscriptions_ref" ON subscriptions (subscriber_user_id, channel_owner_user_id, channel_internal_name); +CREATE INDEX "idx_subscriptions_chan" ON subscriptions (channel_id); +CREATE INDEX "idx_subscriptions_subuser" ON subscriptions (subscriber_user_id); +CREATE INDEX "idx_subscriptions_ownuser" ON subscriptions (channel_owner_user_id); +CREATE INDEX "idx_subscriptions_tsc" ON subscriptions (timestamp_created); +CREATE INDEX "idx_subscriptions_conf" ON subscriptions (confirmed); + + +CREATE TABLE messages +( + message_id TEXT NOT NULL, + sender_user_id TEXT NOT NULL, + channel_internal_name TEXT NOT NULL, + channel_id TEXT NOT NULL, + sender_ip TEXT NOT NULL, + sender_name TEXT NULL, + + timestamp_real INTEGER NOT NULL, + timestamp_client INTEGER NULL, + + title TEXT NOT NULL, + content TEXT NULL, + priority INTEGER CHECK(priority IN (0, 1, 2)) NOT NULL, + usr_message_id TEXT NULL, + + used_key_id TEXT NOT NULL, + + deleted INTEGER CHECK(deleted IN (0, 1)) NOT NULL DEFAULT '0', + + PRIMARY KEY (message_id) +) STRICT; +CREATE INDEX "idx_messages_channel" ON messages (channel_internal_name COLLATE BINARY); +CREATE INDEX "idx_messages_channel_nc" ON messages (channel_internal_name COLLATE NOCASE); +CREATE UNIQUE INDEX "idx_messages_idempotency" ON messages (sender_user_id, usr_message_id COLLATE BINARY); +CREATE INDEX "idx_messages_senderip" ON messages (sender_ip COLLATE BINARY); +CREATE INDEX "idx_messages_sendername" ON messages (sender_name COLLATE BINARY); +CREATE INDEX "idx_messages_sendername_nc" ON messages (sender_name COLLATE NOCASE); +CREATE INDEX "idx_messages_title" ON messages (title COLLATE BINARY); +CREATE INDEX "idx_messages_title_nc" ON messages (title COLLATE NOCASE); +CREATE INDEX "idx_messages_usedkey" ON messages (sender_user_id, used_key_id); +CREATE INDEX "idx_messages_deleted" ON messages (deleted); + + +CREATE VIRTUAL TABLE messages_fts USING fts5 +( + channel_internal_name, + sender_name, + title, + content, + + tokenize = unicode61, + content = 'messages', + content_rowid = 'rowid' +); + +CREATE TRIGGER fts_insert AFTER INSERT ON messages BEGIN + INSERT INTO messages_fts (rowid, channel_internal_name, sender_name, title, content) VALUES (new.rowid, new.channel_internal_name, new.sender_name, new.title, new.content); +END; + +CREATE TRIGGER fts_update AFTER UPDATE ON messages BEGIN + INSERT INTO messages_fts (messages_fts, rowid, channel_internal_name, sender_name, title, content) VALUES ('delete', old.rowid, old.channel_internal_name, old.sender_name, old.title, old.content); + INSERT INTO messages_fts ( rowid, channel_internal_name, sender_name, title, content) VALUES ( new.rowid, new.channel_internal_name, new.sender_name, new.title, new.content); +END; + +CREATE TRIGGER fts_delete AFTER DELETE ON messages BEGIN + INSERT INTO messages_fts (messages_fts, rowid, channel_internal_name, sender_name, title, content) VALUES ('delete', old.rowid, old.channel_internal_name, old.sender_name, old.title, old.content); +END; + + +CREATE TABLE deliveries +( + delivery_id TEXT NOT NULL, + + message_id TEXT NOT NULL, + receiver_user_id TEXT NOT NULL, + receiver_client_id TEXT NOT NULL, + + timestamp_created INTEGER NOT NULL, + timestamp_finalized INTEGER NULL, + + + status TEXT CHECK(status IN ('RETRY','SUCCESS','FAILED')) NOT NULL, + retry_count INTEGER NOT NULL DEFAULT 0, + next_delivery INTEGER NULL DEFAULT NULL, + + fcm_message_id TEXT NULL, + + PRIMARY KEY (delivery_id) +) STRICT; +CREATE INDEX "idx_deliveries_receiver" ON deliveries (message_id, receiver_client_id); + + +CREATE TABLE compat_ids +( + old INTEGER NOT NULL, + new TEXT NOT NULL, + type TEXT NOT NULL +) STRICT; +CREATE UNIQUE INDEX "idx_compatids_new" ON compat_ids (new); +CREATE UNIQUE INDEX "idx_compatids_old" ON compat_ids (old, type); + + +CREATE TABLE compat_acks +( + user_id TEXT NOT NULL, + message_id TEXT NOT NULL +) STRICT; +CREATE INDEX "idx_compatacks_userid" ON compat_acks (user_id); +CREATE UNIQUE INDEX "idx_compatacks_messageid" ON compat_acks (message_id); +CREATE UNIQUE INDEX "idx_compatacks_userid_messageid" ON compat_acks (user_id, message_id); + + +CREATE TABLE compat_clients +( + client_id TEXT NOT NULL +) STRICT; +CREATE UNIQUE INDEX "idx_compatclient_clientid" ON compat_clients (client_id); + + +CREATE TABLE `meta` +( + meta_key TEXT NOT NULL, + value_int INTEGER NULL, + value_txt TEXT NULL, + value_real REAL NULL, + value_blob BLOB NULL, + + PRIMARY KEY (meta_key) +) STRICT; + + +INSERT INTO meta (meta_key, value_int) VALUES ('schema', 3) \ No newline at end of file diff --git a/scnserver/db/schema/primary_migration_6_7.ddl b/scnserver/db/schema/primary_migration_6_7.ddl new file mode 100644 index 0000000..a7bb0dc --- /dev/null +++ b/scnserver/db/schema/primary_migration_6_7.ddl @@ -0,0 +1,52 @@ + + + +DROP INDEX "idx_clients_userid"; +DROP INDEX "idx_clients_fcmtoken"; + + +CREATE TABLE clients_new +( + client_id TEXT NOT NULL, + + user_id TEXT NOT NULL, + type TEXT CHECK(type IN ('ANDROID','IOS','LINUX','MACOS','WINDOWS')) NOT NULL, + fcm_token TEXT NOT NULL, + name TEXT NULL, + + timestamp_created INTEGER NOT NULL, + + agent_model TEXT NOT NULL, + agent_version TEXT NOT NULL, + + + deleted INTEGER CHECK(deleted IN (0, 1)) NOT NULL DEFAULT '0', + + PRIMARY KEY (client_id) +) STRICT; + + +INSERT INTO clients_new +SELECT + client_id, + user_id, + type, + fcm_token, + name, + timestamp_created, + agent_model, + agent_version, + 0 AS deleted +FROM clients; + + +DROP TABLE clients; +ALTER TABLE clients_new RENAME TO clients; + + +CREATE INDEX "idx_clients_userid" ON clients (user_id); +CREATE INDEX "idx_clients_deleted" ON clients (deleted); +CREATE UNIQUE INDEX "idx_clients_fcmtoken" ON clients (fcm_token); + + + diff --git a/scnserver/db/simplectx/simplecontext.go b/scnserver/db/simplectx/simplecontext.go index 05d2492..54e3d17 100644 --- a/scnserver/db/simplectx/simplecontext.go +++ b/scnserver/db/simplectx/simplecontext.go @@ -95,3 +95,20 @@ func (sc *SimpleContext) RollbackTransaction() { sc.transaction = nil return } + +func Run[TResp any](outctx context.Context, f func(ctx db.TxContext) (TResp, error)) (TResp, error) { + sctx := CreateSimpleContext(outctx, nil) + defer sctx.Cancel() + + res, err := f(sctx) + if err != nil { + return *new(TResp), err + } + + err = sctx.CommitTransaction() + if err != nil { + return *new(TResp), err + } + + return res, nil +} diff --git a/scnserver/jobs/DeliveryRetryJob.go b/scnserver/jobs/deliveryRetryJob.go similarity index 100% rename from scnserver/jobs/DeliveryRetryJob.go rename to scnserver/jobs/deliveryRetryJob.go diff --git a/scnserver/jobs/RequestLogCleanupJob.go b/scnserver/jobs/requestLogCleanupJob.go similarity index 100% rename from scnserver/jobs/RequestLogCleanupJob.go rename to scnserver/jobs/requestLogCleanupJob.go diff --git a/scnserver/jobs/RequestLogCollectorJob.go b/scnserver/jobs/requestLogCollectorJob.go similarity index 100% rename from scnserver/jobs/RequestLogCollectorJob.go rename to scnserver/jobs/requestLogCollectorJob.go diff --git a/scnserver/logic/application.go b/scnserver/logic/application.go index a240928..968de55 100644 --- a/scnserver/logic/application.go +++ b/scnserver/logic/application.go @@ -9,25 +9,19 @@ import ( "blackforestbytes.com/simplecloudnotifier/push" "context" "errors" + "fmt" "github.com/rs/zerolog/log" golock "github.com/viney-shih/go-lock" "gogs.mikescher.com/BlackForestBytes/goext/ginext" - "gogs.mikescher.com/BlackForestBytes/goext/rext" "gogs.mikescher.com/BlackForestBytes/goext/syncext" "net" "os" "os/signal" - "regexp" "strings" "syscall" "time" ) -var rexWhitespaceStart = rext.W(regexp.MustCompile("^\\s+")) -var rexWhitespaceEnd = rext.W(regexp.MustCompile("\\s+$")) -var rexNormalizeUsername = rext.W(regexp.MustCompile("[^[:alnum:]\\-_ ]")) -var rexCompatTitleChannel = rext.W(regexp.MustCompile("^\\[(?P[A-Za-z\\-0-9_ ]+)] (?P(.|\\r|\\n)+)$")) - type Application struct { Config scn.Config Gin *ginext.GinWrapper @@ -279,9 +273,24 @@ func (app *Application) NormalizeUsername(v string) string { } func (app *Application) DeliverMessage(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, error) { - fcmDelivID, err := app.Pusher.SendNotification(ctx, user, client, channel, msg) + fcmDelivID, errCode, err := app.Pusher.SendNotification(ctx, user, client, channel, msg) if err != nil { log.Warn().Str("MessageID", msg.MessageID.String()).Str("ClientID", client.ClientID.String()).Err(err).Msg("FCM Delivery failed") + + if errCode == "UNREGISTERED" { + + log.Warn().Msg(fmt.Sprintf("Auto-Delete client %s of user %s (FCM is UNREGISTERED)", client.ClientID, user.UserID)) + + _, _ = simplectx.Run(ctx, func(ctx db.TxContext) (any, error) { + err = app.Database.Primary.DeleteClient(ctx, client.ClientID) + if err != nil { + log.Err(err).Str("ClientID", client.ClientID.String()).Msg("Failed to delete client") + } + return nil, nil + }) + + } + return "", err } return fcmDelivID, nil diff --git a/scnserver/push/dummy.go b/scnserver/push/dummy.go index 66fe308..42dae51 100644 --- a/scnserver/push/dummy.go +++ b/scnserver/push/dummy.go @@ -12,6 +12,6 @@ func NewDummy() NotificationClient { return &DummyConnector{} } -func (d DummyConnector) SendNotification(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, error) { - return "%DUMMY%", nil +func (d DummyConnector) SendNotification(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, string, error) { + return "%DUMMY%", "", nil } diff --git a/scnserver/push/firebase.go b/scnserver/push/firebase.go index 08db204..3d82fd0 100644 --- a/scnserver/push/firebase.go +++ b/scnserver/push/firebase.go @@ -53,7 +53,7 @@ type Notification struct { Priority int } -func (fb FirebaseConnector) SendNotification(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, error) { +func (fb FirebaseConnector) SendNotification(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, string, error) { uri := "https://fcm.googleapis.com/v1/projects/" + fb.fbProject + "/messages:send" @@ -100,18 +100,18 @@ func (fb FirebaseConnector) SendNotification(ctx context.Context, user models.Us bytesBody, err := json.Marshal(gin.H{"message": jsonBody}) if err != nil { - return "", err + return "", "", err } request, err := http.NewRequestWithContext(ctx, "POST", uri, bytes.NewBuffer(bytesBody)) if err != nil { - return "", err + return "", "", err } tok, err := fb.auth.Token(ctx) if err != nil { log.Err(err).Msg("Refreshing FB token failed") - return "", err + return "", "", err } request.Header.Set("Authorization", "Bearer "+tok) @@ -120,31 +120,53 @@ func (fb FirebaseConnector) SendNotification(ctx context.Context, user models.Us response, err := fb.client.Do(request) if err != nil { - return "", err + return "", "", err } defer func() { _ = response.Body.Close() }() if response.StatusCode < 200 || response.StatusCode >= 300 { if bstr, err := io.ReadAll(response.Body); err == nil { - return "", errors.New(fmt.Sprintf("FCM-Request returned %d: %s", response.StatusCode, string(bstr))) + + var errRespBody struct { + Error struct { + Code int `json:"code"` + Message string `json:"message"` + Status string `json:"status"` + Details []struct { + AtType string `json:"@type"` + ECode string `json:"errorCode"` + } `json:"details"` + } `json:"error"` + } + + if err := json.Unmarshal(bstr, &errRespBody); err == nil { + for _, v := range errRespBody.Error.Details { + return "", v.ECode, errors.New(fmt.Sprintf("FCM-Request returned %d [UNREGISTERED]: %s", response.StatusCode, string(bstr))) + } + } + + return "", "", errors.New(fmt.Sprintf("FCM-Request returned %d: %s", response.StatusCode, string(bstr))) + } else { - return "", errors.New(fmt.Sprintf("FCM-Request returned %d", response.StatusCode)) + + return "", "", errors.New(fmt.Sprintf("FCM-Request returned %d", response.StatusCode)) + } } respBodyBin, err := io.ReadAll(response.Body) if err != nil { - return "", err + return "", "", err } var respBody struct { Name string `json:"name"` } if err := json.Unmarshal(respBodyBin, &respBody); err != nil { - return "", err + return "", "", err } log.Info().Msg(fmt.Sprintf("Sucessfully pushed notification %s", msg.MessageID)) - return respBody.Name, nil + return respBody.Name, "", nil } diff --git a/scnserver/push/notificationClient.go b/scnserver/push/notificationClient.go index c6c86a8..95c6b2e 100644 --- a/scnserver/push/notificationClient.go +++ b/scnserver/push/notificationClient.go @@ -6,5 +6,5 @@ import ( ) type NotificationClient interface { - SendNotification(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, error) + SendNotification(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, string, error) } diff --git a/scnserver/push/testSink.go b/scnserver/push/testSink.go index 901ab5c..1a20de9 100644 --- a/scnserver/push/testSink.go +++ b/scnserver/push/testSink.go @@ -24,10 +24,10 @@ func (d *TestSink) Last() SinkData { return d.Data[len(d.Data)-1] } -func (d *TestSink) SendNotification(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, error) { +func (d *TestSink) SendNotification(ctx context.Context, user models.User, client models.Client, channel models.Channel, msg models.Message) (string, string, error) { id, err := langext.NewHexUUID() if err != nil { - return "", err + return "", "", err } key := "TestSink[" + id + "]" @@ -37,5 +37,5 @@ func (d *TestSink) SendNotification(ctx context.Context, user models.User, clien Client: client, }) - return key, nil + return key, "", nil }