migrate to multiple sqlite db files ( primary + requests + logs )
This commit is contained in:
parent
679277d59e
commit
14bba38324
4
scnserver/.idea/sqldialects.xml
generated
4
scnserver/.idea/sqldialects.xml
generated
@ -1,11 +1,11 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="SqlDialectMappings">
|
||||
<file url="file://$PROJECT_DIR$/db/schema/schema_3.ddl" dialect="SQLite" />
|
||||
<file url="file://$PROJECT_DIR$/db/impl/primary/schema/schema_3.ddl" dialect="SQLite" />
|
||||
<file url="PROJECT" dialect="SQLite" />
|
||||
</component>
|
||||
<component name="SqlResolveMappings">
|
||||
<file url="file://$PROJECT_DIR$/db/database.go" scope="{"node":{ "@negative":"1", "group":{ "@kind":"root", "node":{ "name":{ "@qname":"b3228d61-4c36-41ce-803f-63bd80e198b3" }, "group":{ "@kind":"schema", "node":{ "name":{ "@qname":"schema_3.0.ddl" } } } } } }}" />
|
||||
<file url="file://$PROJECT_DIR$/db/impl/primary/database.go" scope="{"node":{ "@negative":"1", "group":{ "@kind":"root", "node":{ "name":{ "@qname":"b3228d61-4c36-41ce-803f-63bd80e198b3" }, "group":{ "@kind":"schema", "node":{ "name":{ "@qname":"schema_3.0.ddl" } } } } } }}" />
|
||||
<file url="PROJECT" scope="{"node":{ "@negative":"1", "group":{ "@kind":"root", "node":{ "name":{ "@qname":"b3228d61-4c36-41ce-803f-63bd80e198b3" }, "group":{ "@kind":"schema", "node":{ "name":{ "@qname":"schema_3.0.ddl" } } } } } }}" />
|
||||
</component>
|
||||
</project>
|
@ -28,6 +28,12 @@
|
||||
(then all errors end up in _second_ sqlite table)
|
||||
due to message channel etc everything is non blocking and cant fail in main
|
||||
|
||||
- request logging (log all requests with body response, exitcode, headers, uri, route, userid, ..., tx-retries, etc), (trim body/response if too big?)
|
||||
|
||||
- jobs to clear requests-db and logs-db after to only keep X entries...
|
||||
|
||||
-> logs and request-logging into their own sqlite files (sqlite-files are prepped)
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
- in my script: use (backupname || hostname) for sendername
|
||||
|
@ -3,8 +3,8 @@ package handler
|
||||
import (
|
||||
"blackforestbytes.com/simplecloudnotifier/api/apierr"
|
||||
"blackforestbytes.com/simplecloudnotifier/api/ginresp"
|
||||
"blackforestbytes.com/simplecloudnotifier/db"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/cursortoken"
|
||||
primarydb "blackforestbytes.com/simplecloudnotifier/db/impl/primary"
|
||||
"blackforestbytes.com/simplecloudnotifier/logic"
|
||||
"blackforestbytes.com/simplecloudnotifier/models"
|
||||
"database/sql"
|
||||
@ -18,13 +18,13 @@ import (
|
||||
|
||||
type APIHandler struct {
|
||||
app *logic.Application
|
||||
database *db.Database
|
||||
database *primarydb.Database
|
||||
}
|
||||
|
||||
func NewAPIHandler(app *logic.Application) APIHandler {
|
||||
return APIHandler{
|
||||
app: app,
|
||||
database: app.Database,
|
||||
database: app.Database.Primary,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,15 +132,17 @@ func (h CommonHandler) Health(g *gin.Context) ginresp.HTTPResponse {
|
||||
return ginresp.InternalError(err)
|
||||
}
|
||||
|
||||
for _, subdb := range h.app.Database.List() {
|
||||
|
||||
uuidKey, _ := langext.NewHexUUID()
|
||||
uuidWrite, _ := langext.NewHexUUID()
|
||||
|
||||
err = h.app.Database.WriteMetaString(ctx, uuidKey, uuidWrite)
|
||||
err = subdb.WriteMetaString(ctx, uuidKey, uuidWrite)
|
||||
if err != nil {
|
||||
return ginresp.InternalError(err)
|
||||
}
|
||||
|
||||
uuidRead, err := h.app.Database.ReadMetaString(ctx, uuidKey)
|
||||
uuidRead, err := subdb.ReadMetaString(ctx, uuidKey)
|
||||
if err != nil {
|
||||
return ginresp.InternalError(err)
|
||||
}
|
||||
@ -149,11 +151,13 @@ func (h CommonHandler) Health(g *gin.Context) ginresp.HTTPResponse {
|
||||
return ginresp.InternalError(errors.New("writing into DB was not consistent"))
|
||||
}
|
||||
|
||||
err = h.app.Database.DeleteMeta(ctx, uuidKey)
|
||||
err = subdb.DeleteMeta(ctx, uuidKey)
|
||||
if err != nil {
|
||||
return ginresp.InternalError(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return ginresp.JSON(http.StatusOK, response{Status: "ok"})
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ package handler
|
||||
|
||||
import (
|
||||
"blackforestbytes.com/simplecloudnotifier/api/ginresp"
|
||||
"blackforestbytes.com/simplecloudnotifier/db"
|
||||
primarydb "blackforestbytes.com/simplecloudnotifier/db/impl/primary"
|
||||
"blackforestbytes.com/simplecloudnotifier/logic"
|
||||
"blackforestbytes.com/simplecloudnotifier/models"
|
||||
"database/sql"
|
||||
@ -14,13 +14,13 @@ import (
|
||||
|
||||
type CompatHandler struct {
|
||||
app *logic.Application
|
||||
database *db.Database
|
||||
database *primarydb.Database
|
||||
}
|
||||
|
||||
func NewCompatHandler(app *logic.Application) CompatHandler {
|
||||
return CompatHandler{
|
||||
app: app,
|
||||
database: app.Database,
|
||||
database: app.Database.Primary,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"blackforestbytes.com/simplecloudnotifier/api/apierr"
|
||||
hl "blackforestbytes.com/simplecloudnotifier/api/apihighlight"
|
||||
"blackforestbytes.com/simplecloudnotifier/api/ginresp"
|
||||
"blackforestbytes.com/simplecloudnotifier/db"
|
||||
primarydb "blackforestbytes.com/simplecloudnotifier/db/impl/primary"
|
||||
"blackforestbytes.com/simplecloudnotifier/logic"
|
||||
"blackforestbytes.com/simplecloudnotifier/models"
|
||||
"database/sql"
|
||||
@ -21,13 +21,13 @@ import (
|
||||
|
||||
type MessageHandler struct {
|
||||
app *logic.Application
|
||||
database *db.Database
|
||||
database *primarydb.Database
|
||||
}
|
||||
|
||||
func NewMessageHandler(app *logic.Application) MessageHandler {
|
||||
return MessageHandler{
|
||||
app: app,
|
||||
database: app.Database,
|
||||
database: app.Database.Primary,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
scn "blackforestbytes.com/simplecloudnotifier"
|
||||
"blackforestbytes.com/simplecloudnotifier/api"
|
||||
"blackforestbytes.com/simplecloudnotifier/api/ginext"
|
||||
"blackforestbytes.com/simplecloudnotifier/db"
|
||||
"blackforestbytes.com/simplecloudnotifier/google"
|
||||
"blackforestbytes.com/simplecloudnotifier/jobs"
|
||||
"blackforestbytes.com/simplecloudnotifier/logic"
|
||||
@ -20,7 +19,7 @@ func main() {
|
||||
|
||||
log.Info().Msg(fmt.Sprintf("Starting with config-namespace <%s>", conf.Namespace))
|
||||
|
||||
sqlite, err := db.NewDatabase(conf)
|
||||
sqlite, err := logic.NewDBPool(conf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -16,18 +16,13 @@ type Config struct {
|
||||
LogLevel zerolog.Level `env:"SCN_LOGLEVEL"`
|
||||
ServerIP string `env:"SCN_IP"`
|
||||
ServerPort string `env:"SCN_PORT"`
|
||||
DBFile string `env:"SCN_DB_FILE"`
|
||||
DBJournal string `env:"SCN_DB_JOURNAL"`
|
||||
DBTimeout time.Duration `env:"SCN_DB_TIMEOUT"`
|
||||
DBMaxOpenConns int `env:"SCN_DB_MAXOPENCONNECTIONS"`
|
||||
DBMaxIdleConns int `env:"SCN_DB_MAXIDLECONNECTIONS"`
|
||||
DBConnMaxLifetime time.Duration `env:"SCN_DB_CONNEXTIONMAXLIFETIME"`
|
||||
DBConnMaxIdleTime time.Duration `env:"SCN_DB_CONNEXTIONMAXIDLETIME"`
|
||||
DBCheckForeignKeys bool `env:"SCN_DB_CHECKFOREIGNKEYS"`
|
||||
DBSingleConn bool `env:"SCN_DB_SINGLECONNECTION"`
|
||||
DBMain DBConfig `envprefix:"SCN_DB_MAIN_"`
|
||||
DBRequests DBConfig `envprefix:"SCN_DB_REQUESTS_"`
|
||||
DBLogs DBConfig `envprefix:"SCN_DB_LOGS_"`
|
||||
RequestTimeout time.Duration `env:"SCN_REQUEST_TIMEOUT"`
|
||||
RequestMaxRetry int `env:"SCN_REQUEST_MAXRETRY"`
|
||||
RequestRetrySleep time.Duration `env:"SCN_REQUEST_RETRYSLEEP"`
|
||||
Cors bool `env:"SCN_CORS"`
|
||||
ReturnRawErrors bool `env:"SCN_ERROR_RETURN"`
|
||||
DummyFirebase bool `env:"SCN_DUMMY_FB"`
|
||||
DummyGoogleAPI bool `env:"SCN_DUMMY_GOOG"`
|
||||
@ -42,7 +37,18 @@ type Config struct {
|
||||
GoogleAPIPrivateKey string `env:"SCN_GOOG_PRIVATEKEY"`
|
||||
GooglePackageName string `env:"SCN_GOOG_PACKAGENAME"`
|
||||
GoogleProProductID string `env:"SCN_GOOG_PROPRODUCTID"`
|
||||
Cors bool `env:"SCN_CORS"`
|
||||
}
|
||||
|
||||
type DBConfig struct {
|
||||
File string `env:"FILE"`
|
||||
Journal string `env:"JOURNAL"`
|
||||
Timeout time.Duration `env:"TIMEOUT"`
|
||||
MaxOpenConns int `env:"MAXOPENCONNECTIONS"`
|
||||
MaxIdleConns int `env:"MAXIDLECONNECTIONS"`
|
||||
ConnMaxLifetime time.Duration `env:"CONNEXTIONMAXLIFETIME"`
|
||||
ConnMaxIdleTime time.Duration `env:"CONNEXTIONMAXIDLETIME"`
|
||||
CheckForeignKeys bool `env:"CHECKFOREIGNKEYS"`
|
||||
SingleConn bool `env:"SINGLECONNECTION"`
|
||||
}
|
||||
|
||||
var Conf Config
|
||||
@ -55,15 +61,39 @@ var configLocHost = func() Config {
|
||||
LogLevel: zerolog.DebugLevel,
|
||||
ServerIP: "0.0.0.0",
|
||||
ServerPort: "8080",
|
||||
DBFile: ".run-data/db.sqlite3",
|
||||
DBJournal: "WAL",
|
||||
DBTimeout: 5 * time.Second,
|
||||
DBCheckForeignKeys: false,
|
||||
DBSingleConn: false,
|
||||
DBMaxOpenConns: 5,
|
||||
DBMaxIdleConns: 5,
|
||||
DBConnMaxLifetime: 60 * time.Minute,
|
||||
DBConnMaxIdleTime: 60 * time.Minute,
|
||||
DBMain: DBConfig{
|
||||
File: ".run-data/loc_main.sqlite3",
|
||||
Journal: "WAL",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBRequests: DBConfig{
|
||||
File: ".run-data/loc_requests.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBLogs: DBConfig{
|
||||
File: ".run-data/loc_logs.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
RequestTimeout: 16 * time.Second,
|
||||
RequestMaxRetry: 8,
|
||||
RequestRetrySleep: 100 * time.Millisecond,
|
||||
@ -93,15 +123,39 @@ var configLocDocker = func() Config {
|
||||
LogLevel: zerolog.DebugLevel,
|
||||
ServerIP: "0.0.0.0",
|
||||
ServerPort: "80",
|
||||
DBFile: "/data/scn_docker.sqlite3",
|
||||
DBJournal: "WAL",
|
||||
DBTimeout: 5 * time.Second,
|
||||
DBCheckForeignKeys: false,
|
||||
DBSingleConn: false,
|
||||
DBMaxOpenConns: 5,
|
||||
DBMaxIdleConns: 5,
|
||||
DBConnMaxLifetime: 60 * time.Minute,
|
||||
DBConnMaxIdleTime: 60 * time.Minute,
|
||||
DBMain: DBConfig{
|
||||
File: "/data/docker_scn_main.sqlite3",
|
||||
Journal: "WAL",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBRequests: DBConfig{
|
||||
File: "/data/docker_scn_requests.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBLogs: DBConfig{
|
||||
File: "/data/docker_scn_logs.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
RequestTimeout: 16 * time.Second,
|
||||
RequestMaxRetry: 8,
|
||||
RequestRetrySleep: 100 * time.Millisecond,
|
||||
@ -131,15 +185,39 @@ var configDev = func() Config {
|
||||
LogLevel: zerolog.DebugLevel,
|
||||
ServerIP: "0.0.0.0",
|
||||
ServerPort: "80",
|
||||
DBFile: "/data/scn.sqlite3",
|
||||
DBJournal: "WAL",
|
||||
DBTimeout: 5 * time.Second,
|
||||
DBCheckForeignKeys: false,
|
||||
DBSingleConn: false,
|
||||
DBMaxOpenConns: 5,
|
||||
DBMaxIdleConns: 5,
|
||||
DBConnMaxLifetime: 60 * time.Minute,
|
||||
DBConnMaxIdleTime: 60 * time.Minute,
|
||||
DBMain: DBConfig{
|
||||
File: "/data/scn_main.sqlite3",
|
||||
Journal: "WAL",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBRequests: DBConfig{
|
||||
File: "/data/scn_requests.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBLogs: DBConfig{
|
||||
File: "/data/scn_logs.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
RequestTimeout: 16 * time.Second,
|
||||
RequestMaxRetry: 8,
|
||||
RequestRetrySleep: 100 * time.Millisecond,
|
||||
@ -169,15 +247,39 @@ var configStag = func() Config {
|
||||
LogLevel: zerolog.DebugLevel,
|
||||
ServerIP: "0.0.0.0",
|
||||
ServerPort: "80",
|
||||
DBFile: "/data/scn.sqlite3",
|
||||
DBJournal: "WAL",
|
||||
DBTimeout: 5 * time.Second,
|
||||
DBCheckForeignKeys: false,
|
||||
DBSingleConn: false,
|
||||
DBMaxOpenConns: 5,
|
||||
DBMaxIdleConns: 5,
|
||||
DBConnMaxLifetime: 60 * time.Minute,
|
||||
DBConnMaxIdleTime: 60 * time.Minute,
|
||||
DBMain: DBConfig{
|
||||
File: "/data/scn_main.sqlite3",
|
||||
Journal: "WAL",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBRequests: DBConfig{
|
||||
File: "/data/scn_requests.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBLogs: DBConfig{
|
||||
File: "/data/scn_logs.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
RequestTimeout: 16 * time.Second,
|
||||
RequestMaxRetry: 8,
|
||||
RequestRetrySleep: 100 * time.Millisecond,
|
||||
@ -207,15 +309,39 @@ var configProd = func() Config {
|
||||
LogLevel: zerolog.InfoLevel,
|
||||
ServerIP: "0.0.0.0",
|
||||
ServerPort: "80",
|
||||
DBFile: "/data/scn.sqlite3",
|
||||
DBJournal: "WAL",
|
||||
DBTimeout: 5 * time.Second,
|
||||
DBCheckForeignKeys: false,
|
||||
DBSingleConn: false,
|
||||
DBMaxOpenConns: 5,
|
||||
DBMaxIdleConns: 5,
|
||||
DBConnMaxLifetime: 60 * time.Minute,
|
||||
DBConnMaxIdleTime: 60 * time.Minute,
|
||||
DBMain: DBConfig{
|
||||
File: "/data/scn_main.sqlite3",
|
||||
Journal: "WAL",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBRequests: DBConfig{
|
||||
File: "/data/scn_requests.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
DBLogs: DBConfig{
|
||||
File: "/data/scn_logs.sqlite3",
|
||||
Journal: "DELETE",
|
||||
Timeout: 5 * time.Second,
|
||||
CheckForeignKeys: false,
|
||||
SingleConn: false,
|
||||
MaxOpenConns: 5,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 60 * time.Minute,
|
||||
ConnMaxIdleTime: 60 * time.Minute,
|
||||
},
|
||||
RequestTimeout: 16 * time.Second,
|
||||
RequestMaxRetry: 8,
|
||||
RequestRetrySleep: 100 * time.Millisecond,
|
||||
@ -245,7 +371,7 @@ var allConfig = map[string]func() Config{
|
||||
"production": configProd,
|
||||
}
|
||||
|
||||
func getConfig(ns string) (Config, bool) {
|
||||
func GetConfig(ns string) (Config, bool) {
|
||||
if ns == "" {
|
||||
ns = "local-host"
|
||||
}
|
||||
@ -272,7 +398,7 @@ func confEnv(key string) string {
|
||||
func init() {
|
||||
ns := os.Getenv("SCN_NAMESPACE")
|
||||
|
||||
cfg, ok := getConfig(ns)
|
||||
cfg, ok := GetConfig(ns)
|
||||
if !ok {
|
||||
log.Fatal().Str("ns", ns).Msg("Unknown config-namespace")
|
||||
}
|
||||
|
@ -1,110 +1,27 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
server "blackforestbytes.com/simplecloudnotifier"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/dbtools"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/schema"
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/sq"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
db sq.DB
|
||||
pp *dbtools.DBPreprocessor
|
||||
}
|
||||
|
||||
func NewDatabase(conf server.Config) (*Database, error) {
|
||||
url := fmt.Sprintf("file:%s?_journal=%s&_timeout=%d&_fk=%s", conf.DBFile, conf.DBJournal, conf.DBTimeout.Milliseconds(), langext.FormatBool(conf.DBCheckForeignKeys, "true", "false"))
|
||||
|
||||
xdb, err := sqlx.Open("sqlite3", url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conf.DBSingleConn {
|
||||
xdb.SetMaxOpenConns(1)
|
||||
} else {
|
||||
xdb.SetMaxOpenConns(5)
|
||||
xdb.SetMaxIdleConns(5)
|
||||
xdb.SetConnMaxLifetime(60 * time.Minute)
|
||||
xdb.SetConnMaxIdleTime(60 * time.Minute)
|
||||
}
|
||||
|
||||
qqdb := sq.NewDB(xdb)
|
||||
|
||||
qqdb.AddListener(dbtools.DBLogger{})
|
||||
|
||||
pp, err := dbtools.NewDBPreprocessor(qqdb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qqdb.AddListener(pp)
|
||||
|
||||
scndb := &Database{db: qqdb, pp: pp}
|
||||
|
||||
return scndb, nil
|
||||
}
|
||||
|
||||
func (db *Database) Migrate(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 24*time.Second)
|
||||
defer cancel()
|
||||
|
||||
currschema, err := db.ReadSchema(ctx)
|
||||
if currschema == 0 {
|
||||
|
||||
_, err = db.db.Exec(ctx, schema.Schema3, sq.PP{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.WriteMetaInt(ctx, "schema", 3)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.pp.Init(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
} else if currschema == 1 {
|
||||
return errors.New("cannot autom. upgrade schema 1")
|
||||
} else if currschema == 2 {
|
||||
return errors.New("cannot autom. upgrade schema 2") //TODO
|
||||
} else if currschema == 3 {
|
||||
return nil // current
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Unknown DB schema: %d", currschema))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (db *Database) Ping(ctx context.Context) error {
|
||||
return db.db.Ping(ctx)
|
||||
}
|
||||
|
||||
func (db *Database) BeginTx(ctx context.Context) (sq.Tx, error) {
|
||||
return db.db.BeginTransaction(ctx, sql.LevelDefault)
|
||||
}
|
||||
|
||||
func (db *Database) Stop(ctx context.Context) error {
|
||||
_, err := db.db.Exec(ctx, "PRAGMA wal_checkpoint;", sq.PP{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = db.db.Exit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
type DatabaseImpl interface {
|
||||
Migrate(ctx context.Context) error
|
||||
Ping(ctx context.Context) error
|
||||
BeginTx(ctx context.Context) (sq.Tx, error)
|
||||
Stop(ctx context.Context) error
|
||||
|
||||
ReadSchema(ctx context.Context) (int, error)
|
||||
|
||||
WriteMetaString(ctx context.Context, key string, value string) error
|
||||
WriteMetaInt(ctx context.Context, key string, value int64) error
|
||||
WriteMetaReal(ctx context.Context, key string, value float64) error
|
||||
WriteMetaBlob(ctx context.Context, key string, value []byte) error
|
||||
|
||||
ReadMetaString(ctx context.Context, key string) (*string, error)
|
||||
ReadMetaInt(ctx context.Context, key string) (*int64, error)
|
||||
ReadMetaReal(ctx context.Context, key string) (*float64, error)
|
||||
ReadMetaBlob(ctx context.Context, key string) (*[]byte, error)
|
||||
|
||||
DeleteMeta(ctx context.Context, key string) error
|
||||
}
|
||||
|
@ -8,7 +8,9 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DBLogger struct{}
|
||||
type DBLogger struct {
|
||||
Ident string
|
||||
}
|
||||
|
||||
func (l DBLogger) PrePing(ctx context.Context) error {
|
||||
log.Debug().Msg("[SQL-PING]")
|
||||
@ -17,28 +19,28 @@ func (l DBLogger) PrePing(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (l DBLogger) PreTxBegin(ctx context.Context, txid uint16) error {
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%d>-START]", txid))
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%s|%d>-START]", l.Ident, txid))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l DBLogger) PreTxCommit(txid uint16) error {
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%d>-COMMIT]", txid))
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%s|%d>-COMMIT]", l.Ident, txid))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l DBLogger) PreTxRollback(txid uint16) error {
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%d>-ROLLBACK]", txid))
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%s|%d>-ROLLBACK]", l.Ident, txid))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l DBLogger) PreQuery(ctx context.Context, txID *uint16, sql *string, params *sq.PP) error {
|
||||
if txID == nil {
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-QUERY] %s", fmtSQLPrint(*sql)))
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL<%s>-QUERY] %s", l.Ident, fmtSQLPrint(*sql)))
|
||||
} else {
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%d>-QUERY] %s", *txID, fmtSQLPrint(*sql)))
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%s|%d>-QUERY] %s", l.Ident, *txID, fmtSQLPrint(*sql)))
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -46,9 +48,9 @@ func (l DBLogger) PreQuery(ctx context.Context, txID *uint16, sql *string, param
|
||||
|
||||
func (l DBLogger) PreExec(ctx context.Context, txID *uint16, sql *string, params *sq.PP) error {
|
||||
if txID == nil {
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-EXEC] %s", fmtSQLPrint(*sql)))
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-<%s>-EXEC] %s", l.Ident, fmtSQLPrint(*sql)))
|
||||
} else {
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%d>-EXEC] %s", *txID, fmtSQLPrint(*sql)))
|
||||
log.Debug().Msg(fmt.Sprintf("[SQL-TX<%s|%d>-EXEC] %s", l.Ident, *txID, fmtSQLPrint(*sql)))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
111
scnserver/db/impl/logs/database.go
Normal file
111
scnserver/db/impl/logs/database.go
Normal file
@ -0,0 +1,111 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
server "blackforestbytes.com/simplecloudnotifier"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/dbtools"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/impl/logs/schema"
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/sq"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
db sq.DB
|
||||
pp *dbtools.DBPreprocessor
|
||||
wal bool
|
||||
}
|
||||
|
||||
func NewLogsDatabase(cfg server.Config) (*Database, error) {
|
||||
conf := cfg.DBLogs
|
||||
|
||||
url := fmt.Sprintf("file:%s?_journal=%s&_timeout=%d&_fk=%s", conf.File, conf.Journal, conf.Timeout.Milliseconds(), langext.FormatBool(conf.CheckForeignKeys, "true", "false"))
|
||||
|
||||
xdb, err := sqlx.Open("sqlite3", url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conf.SingleConn {
|
||||
xdb.SetMaxOpenConns(1)
|
||||
} else {
|
||||
xdb.SetMaxOpenConns(5)
|
||||
xdb.SetMaxIdleConns(5)
|
||||
xdb.SetConnMaxLifetime(60 * time.Minute)
|
||||
xdb.SetConnMaxIdleTime(60 * time.Minute)
|
||||
}
|
||||
|
||||
qqdb := sq.NewDB(xdb)
|
||||
|
||||
qqdb.AddListener(dbtools.DBLogger{})
|
||||
|
||||
pp, err := dbtools.NewDBPreprocessor(qqdb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qqdb.AddListener(pp)
|
||||
|
||||
scndb := &Database{db: qqdb, pp: pp, wal: conf.Journal == "WAL"}
|
||||
|
||||
return scndb, nil
|
||||
}
|
||||
|
||||
func (db *Database) Migrate(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 24*time.Second)
|
||||
defer cancel()
|
||||
|
||||
currschema, err := db.ReadSchema(ctx)
|
||||
if currschema == 0 {
|
||||
|
||||
_, err = db.db.Exec(ctx, schema.LogsSchema1, sq.PP{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.WriteMetaInt(ctx, "schema", 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.pp.Init(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
} else if currschema == 1 {
|
||||
return nil // current
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Unknown DB schema: %d", currschema))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (db *Database) Ping(ctx context.Context) error {
|
||||
return db.db.Ping(ctx)
|
||||
}
|
||||
|
||||
func (db *Database) BeginTx(ctx context.Context) (sq.Tx, error) {
|
||||
return db.db.BeginTransaction(ctx, sql.LevelDefault)
|
||||
}
|
||||
|
||||
func (db *Database) Stop(ctx context.Context) error {
|
||||
if db.wal {
|
||||
_, err := db.db.Exec(ctx, "PRAGMA wal_checkpoint;", sq.PP{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := db.db.Exit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package db
|
||||
package logs
|
||||
|
||||
import (
|
||||
"context"
|
6
scnserver/db/impl/logs/schema/assets.go
Normal file
6
scnserver/db/impl/logs/schema/assets.go
Normal file
@ -0,0 +1,6 @@
|
||||
package schema
|
||||
|
||||
import _ "embed"
|
||||
|
||||
//go:embed schema_1.ddl
|
||||
var LogsSchema1 string
|
22
scnserver/db/impl/logs/schema/schema_1.ddl
Normal file
22
scnserver/db/impl/logs/schema/schema_1.ddl
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
CREATE TABLE `logs`
|
||||
(
|
||||
log_id INTEGER PRIMARY KEY,
|
||||
timestamp_created INTEGER NOT NULL
|
||||
|
||||
) STRICT;
|
||||
|
||||
|
||||
CREATE TABLE `meta`
|
||||
(
|
||||
meta_key TEXT NOT NULL,
|
||||
value_int INTEGER NULL,
|
||||
value_txt TEXT NULL,
|
||||
value_real REAL NULL,
|
||||
value_blob BLOB NULL,
|
||||
|
||||
PRIMARY KEY (meta_key)
|
||||
) STRICT;
|
||||
|
||||
|
||||
INSERT INTO meta (meta_key, value_int) VALUES ('schema', 1)
|
25
scnserver/db/impl/logs/utils.go
Normal file
25
scnserver/db/impl/logs/utils.go
Normal file
@ -0,0 +1,25 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bool2DB(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func time2DB(t time.Time) int64 {
|
||||
return t.UnixMilli()
|
||||
}
|
||||
|
||||
func time2DBOpt(t *time.Time) *int64 {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return langext.Ptr(t.UnixMilli())
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package db
|
||||
package primary
|
||||
|
||||
import (
|
||||
"blackforestbytes.com/simplecloudnotifier/models"
|
@ -1,4 +1,4 @@
|
||||
package db
|
||||
package primary
|
||||
|
||||
import (
|
||||
"blackforestbytes.com/simplecloudnotifier/models"
|
@ -1,6 +1,7 @@
|
||||
package db
|
||||
package primary
|
||||
|
||||
import (
|
||||
"blackforestbytes.com/simplecloudnotifier/db"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/sq"
|
||||
"time"
|
||||
)
|
||||
@ -11,5 +12,5 @@ type TxContext interface {
|
||||
Err() error
|
||||
Value(key any) any
|
||||
|
||||
GetOrCreateTransaction(db *Database) (sq.Tx, error)
|
||||
GetOrCreateTransaction(db db.DatabaseImpl) (sq.Tx, error)
|
||||
}
|
115
scnserver/db/impl/primary/database.go
Normal file
115
scnserver/db/impl/primary/database.go
Normal file
@ -0,0 +1,115 @@
|
||||
package primary
|
||||
|
||||
import (
|
||||
server "blackforestbytes.com/simplecloudnotifier"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/dbtools"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/impl/primary/schema"
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/sq"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
db sq.DB
|
||||
pp *dbtools.DBPreprocessor
|
||||
wal bool
|
||||
}
|
||||
|
||||
func NewPrimaryDatabase(cfg server.Config) (*Database, error) {
|
||||
conf := cfg.DBMain
|
||||
|
||||
url := fmt.Sprintf("file:%s?_journal=%s&_timeout=%d&_fk=%s", conf.File, conf.Journal, conf.Timeout.Milliseconds(), langext.FormatBool(conf.CheckForeignKeys, "true", "false"))
|
||||
|
||||
xdb, err := sqlx.Open("sqlite3", url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conf.SingleConn {
|
||||
xdb.SetMaxOpenConns(1)
|
||||
} else {
|
||||
xdb.SetMaxOpenConns(5)
|
||||
xdb.SetMaxIdleConns(5)
|
||||
xdb.SetConnMaxLifetime(60 * time.Minute)
|
||||
xdb.SetConnMaxIdleTime(60 * time.Minute)
|
||||
}
|
||||
|
||||
qqdb := sq.NewDB(xdb)
|
||||
|
||||
qqdb.AddListener(dbtools.DBLogger{})
|
||||
|
||||
pp, err := dbtools.NewDBPreprocessor(qqdb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qqdb.AddListener(pp)
|
||||
|
||||
scndb := &Database{db: qqdb, pp: pp, wal: conf.Journal == "WAL"}
|
||||
|
||||
return scndb, nil
|
||||
}
|
||||
|
||||
func (db *Database) Migrate(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 24*time.Second)
|
||||
defer cancel()
|
||||
|
||||
currschema, err := db.ReadSchema(ctx)
|
||||
if currschema == 0 {
|
||||
|
||||
_, err = db.db.Exec(ctx, schema.PrimarySchema3, sq.PP{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.WriteMetaInt(ctx, "schema", 3)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.pp.Init(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
} else if currschema == 1 {
|
||||
return errors.New("cannot autom. upgrade schema 1")
|
||||
} else if currschema == 2 {
|
||||
return errors.New("cannot autom. upgrade schema 2") //TODO
|
||||
} else if currschema == 3 {
|
||||
return nil // current
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Unknown DB schema: %d", currschema))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (db *Database) Ping(ctx context.Context) error {
|
||||
return db.db.Ping(ctx)
|
||||
}
|
||||
|
||||
func (db *Database) BeginTx(ctx context.Context) (sq.Tx, error) {
|
||||
return db.db.BeginTransaction(ctx, sql.LevelDefault)
|
||||
}
|
||||
|
||||
func (db *Database) Stop(ctx context.Context) error {
|
||||
if db.wal {
|
||||
_, err := db.db.Exec(ctx, "PRAGMA wal_checkpoint;", sq.PP{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := db.db.Exit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package db
|
||||
package primary
|
||||
|
||||
import (
|
||||
scn "blackforestbytes.com/simplecloudnotifier"
|
@ -1,4 +1,4 @@
|
||||
package db
|
||||
package primary
|
||||
|
||||
import (
|
||||
"blackforestbytes.com/simplecloudnotifier/db/cursortoken"
|
242
scnserver/db/impl/primary/meta.go
Normal file
242
scnserver/db/impl/primary/meta.go
Normal file
@ -0,0 +1,242 @@
|
||||
package primary
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/sq"
|
||||
)
|
||||
|
||||
func (db *Database) ReadSchema(ctx context.Context) (retval int, reterr error) {
|
||||
|
||||
r1, err := db.db.Query(ctx, "SELECT name FROM sqlite_master WHERE type = :typ AND name = :name", sq.PP{"typ": "table", "name": "meta"})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
err = r1.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = 0
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r1.Next() {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
err = r1.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
r2, err := db.db.Query(ctx, "SELECT value_int FROM meta WHERE meta_key = :key", sq.PP{"key": "schema"})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = 0
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r2.Next() {
|
||||
return 0, errors.New("no schema entry in meta table")
|
||||
}
|
||||
|
||||
var dbschema int
|
||||
err = r2.Scan(&dbschema)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return dbschema, nil
|
||||
}
|
||||
|
||||
func (db *Database) WriteMetaString(ctx context.Context, key string, value string) error {
|
||||
_, err := db.db.Exec(ctx, "INSERT INTO meta (meta_key, value_txt) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_txt = :val", sq.PP{
|
||||
"key": key,
|
||||
"val": value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) WriteMetaInt(ctx context.Context, key string, value int64) error {
|
||||
_, err := db.db.Exec(ctx, "INSERT INTO meta (meta_key, value_int) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_int = :val", sq.PP{
|
||||
"key": key,
|
||||
"val": value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) WriteMetaReal(ctx context.Context, key string, value float64) error {
|
||||
_, err := db.db.Exec(ctx, "INSERT INTO meta (meta_key, value_real) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_real = :val", sq.PP{
|
||||
"key": key,
|
||||
"val": value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) WriteMetaBlob(ctx context.Context, key string, value []byte) error {
|
||||
_, err := db.db.Exec(ctx, "INSERT INTO meta (meta_key, value_blob) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_blob = :val", sq.PP{
|
||||
"key": key,
|
||||
"val": value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) ReadMetaString(ctx context.Context, key string) (retval *string, reterr error) {
|
||||
r2, err := db.db.Query(ctx, "SELECT value_txt FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = nil
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
if !r2.Next() {
|
||||
return nil, errors.New("no matching entry in meta table")
|
||||
}
|
||||
|
||||
var value string
|
||||
err = r2.Scan(&value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return langext.Ptr(value), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReadMetaInt(ctx context.Context, key string) (retval *int64, reterr error) {
|
||||
r2, err := db.db.Query(ctx, "SELECT value_int FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = nil
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r2.Next() {
|
||||
return nil, errors.New("no matching entry in meta table")
|
||||
}
|
||||
|
||||
var value int64
|
||||
err = r2.Scan(&value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return langext.Ptr(value), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReadMetaReal(ctx context.Context, key string) (retval *float64, reterr error) {
|
||||
r2, err := db.db.Query(ctx, "SELECT value_real FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = nil
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r2.Next() {
|
||||
return nil, errors.New("no matching entry in meta table")
|
||||
}
|
||||
|
||||
var value float64
|
||||
err = r2.Scan(&value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return langext.Ptr(value), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReadMetaBlob(ctx context.Context, key string) (retval *[]byte, reterr error) {
|
||||
r2, err := db.db.Query(ctx, "SELECT value_blob FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = nil
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r2.Next() {
|
||||
return nil, errors.New("no matching entry in meta table")
|
||||
}
|
||||
|
||||
var value []byte
|
||||
err = r2.Scan(&value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return langext.Ptr(value), nil
|
||||
}
|
||||
|
||||
func (db *Database) DeleteMeta(ctx context.Context, key string) error {
|
||||
_, err := db.db.Exec(ctx, "DELETE FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -3,10 +3,10 @@ package schema
|
||||
import _ "embed"
|
||||
|
||||
//go:embed schema_1.ddl
|
||||
var Schema1 string
|
||||
var PrimarySchema1 string
|
||||
|
||||
//go:embed schema_2.ddl
|
||||
var Schema2 string
|
||||
var PrimarySchema2 string
|
||||
|
||||
//go:embed schema_3.ddl
|
||||
var Schema3 string
|
||||
var PrimarySchema3 string
|
7
scnserver/db/impl/primary/schema/schema_sqlite.ddl
Normal file
7
scnserver/db/impl/primary/schema/schema_sqlite.ddl
Normal file
@ -0,0 +1,7 @@
|
||||
CREATE TABLE sqlite_master (
|
||||
type text,
|
||||
name text,
|
||||
tbl_name text,
|
||||
rootpage integer,
|
||||
sql text
|
||||
);
|
@ -1,4 +1,4 @@
|
||||
package db
|
||||
package primary
|
||||
|
||||
import (
|
||||
"blackforestbytes.com/simplecloudnotifier/models"
|
@ -1,4 +1,4 @@
|
||||
package db
|
||||
package primary
|
||||
|
||||
import (
|
||||
scn "blackforestbytes.com/simplecloudnotifier"
|
25
scnserver/db/impl/primary/utils.go
Normal file
25
scnserver/db/impl/primary/utils.go
Normal file
@ -0,0 +1,25 @@
|
||||
package primary
|
||||
|
||||
import (
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bool2DB(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func time2DB(t time.Time) int64 {
|
||||
return t.UnixMilli()
|
||||
}
|
||||
|
||||
func time2DBOpt(t *time.Time) *int64 {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return langext.Ptr(t.UnixMilli())
|
||||
}
|
111
scnserver/db/impl/requests/database.go
Normal file
111
scnserver/db/impl/requests/database.go
Normal file
@ -0,0 +1,111 @@
|
||||
package requests
|
||||
|
||||
import (
|
||||
server "blackforestbytes.com/simplecloudnotifier"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/dbtools"
|
||||
"blackforestbytes.com/simplecloudnotifier/db/impl/requests/schema"
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/sq"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
db sq.DB
|
||||
pp *dbtools.DBPreprocessor
|
||||
wal bool
|
||||
}
|
||||
|
||||
func NewRequestsDatabase(cfg server.Config) (*Database, error) {
|
||||
conf := cfg.DBRequests
|
||||
|
||||
url := fmt.Sprintf("file:%s?_journal=%s&_timeout=%d&_fk=%s", conf.File, conf.Journal, conf.Timeout.Milliseconds(), langext.FormatBool(conf.CheckForeignKeys, "true", "false"))
|
||||
|
||||
xdb, err := sqlx.Open("sqlite3", url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conf.SingleConn {
|
||||
xdb.SetMaxOpenConns(1)
|
||||
} else {
|
||||
xdb.SetMaxOpenConns(5)
|
||||
xdb.SetMaxIdleConns(5)
|
||||
xdb.SetConnMaxLifetime(60 * time.Minute)
|
||||
xdb.SetConnMaxIdleTime(60 * time.Minute)
|
||||
}
|
||||
|
||||
qqdb := sq.NewDB(xdb)
|
||||
|
||||
qqdb.AddListener(dbtools.DBLogger{})
|
||||
|
||||
pp, err := dbtools.NewDBPreprocessor(qqdb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qqdb.AddListener(pp)
|
||||
|
||||
scndb := &Database{db: qqdb, pp: pp, wal: conf.Journal == "WAL"}
|
||||
|
||||
return scndb, nil
|
||||
}
|
||||
|
||||
func (db *Database) Migrate(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 24*time.Second)
|
||||
defer cancel()
|
||||
|
||||
currschema, err := db.ReadSchema(ctx)
|
||||
if currschema == 0 {
|
||||
|
||||
_, err = db.db.Exec(ctx, schema.RequestsSchema1, sq.PP{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.WriteMetaInt(ctx, "schema", 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.pp.Init(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
} else if currschema == 1 {
|
||||
return nil // current
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Unknown DB schema: %d", currschema))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (db *Database) Ping(ctx context.Context) error {
|
||||
return db.db.Ping(ctx)
|
||||
}
|
||||
|
||||
func (db *Database) BeginTx(ctx context.Context) (sq.Tx, error) {
|
||||
return db.db.BeginTransaction(ctx, sql.LevelDefault)
|
||||
}
|
||||
|
||||
func (db *Database) Stop(ctx context.Context) error {
|
||||
if db.wal {
|
||||
_, err := db.db.Exec(ctx, "PRAGMA wal_checkpoint;", sq.PP{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := db.db.Exit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
242
scnserver/db/impl/requests/meta.go
Normal file
242
scnserver/db/impl/requests/meta.go
Normal file
@ -0,0 +1,242 @@
|
||||
package requests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/sq"
|
||||
)
|
||||
|
||||
func (db *Database) ReadSchema(ctx context.Context) (retval int, reterr error) {
|
||||
|
||||
r1, err := db.db.Query(ctx, "SELECT name FROM sqlite_master WHERE type = :typ AND name = :name", sq.PP{"typ": "table", "name": "meta"})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
err = r1.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = 0
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r1.Next() {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
err = r1.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
r2, err := db.db.Query(ctx, "SELECT value_int FROM meta WHERE meta_key = :key", sq.PP{"key": "schema"})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = 0
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r2.Next() {
|
||||
return 0, errors.New("no schema entry in meta table")
|
||||
}
|
||||
|
||||
var dbschema int
|
||||
err = r2.Scan(&dbschema)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return dbschema, nil
|
||||
}
|
||||
|
||||
func (db *Database) WriteMetaString(ctx context.Context, key string, value string) error {
|
||||
_, err := db.db.Exec(ctx, "INSERT INTO meta (meta_key, value_txt) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_txt = :val", sq.PP{
|
||||
"key": key,
|
||||
"val": value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) WriteMetaInt(ctx context.Context, key string, value int64) error {
|
||||
_, err := db.db.Exec(ctx, "INSERT INTO meta (meta_key, value_int) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_int = :val", sq.PP{
|
||||
"key": key,
|
||||
"val": value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) WriteMetaReal(ctx context.Context, key string, value float64) error {
|
||||
_, err := db.db.Exec(ctx, "INSERT INTO meta (meta_key, value_real) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_real = :val", sq.PP{
|
||||
"key": key,
|
||||
"val": value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) WriteMetaBlob(ctx context.Context, key string, value []byte) error {
|
||||
_, err := db.db.Exec(ctx, "INSERT INTO meta (meta_key, value_blob) VALUES (:key, :val) ON CONFLICT(meta_key) DO UPDATE SET value_blob = :val", sq.PP{
|
||||
"key": key,
|
||||
"val": value,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) ReadMetaString(ctx context.Context, key string) (retval *string, reterr error) {
|
||||
r2, err := db.db.Query(ctx, "SELECT value_txt FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = nil
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
if !r2.Next() {
|
||||
return nil, errors.New("no matching entry in meta table")
|
||||
}
|
||||
|
||||
var value string
|
||||
err = r2.Scan(&value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return langext.Ptr(value), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReadMetaInt(ctx context.Context, key string) (retval *int64, reterr error) {
|
||||
r2, err := db.db.Query(ctx, "SELECT value_int FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = nil
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r2.Next() {
|
||||
return nil, errors.New("no matching entry in meta table")
|
||||
}
|
||||
|
||||
var value int64
|
||||
err = r2.Scan(&value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return langext.Ptr(value), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReadMetaReal(ctx context.Context, key string) (retval *float64, reterr error) {
|
||||
r2, err := db.db.Query(ctx, "SELECT value_real FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = nil
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r2.Next() {
|
||||
return nil, errors.New("no matching entry in meta table")
|
||||
}
|
||||
|
||||
var value float64
|
||||
err = r2.Scan(&value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return langext.Ptr(value), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReadMetaBlob(ctx context.Context, key string) (retval *[]byte, reterr error) {
|
||||
r2, err := db.db.Query(ctx, "SELECT value_blob FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
// overwrite return values
|
||||
retval = nil
|
||||
reterr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if !r2.Next() {
|
||||
return nil, errors.New("no matching entry in meta table")
|
||||
}
|
||||
|
||||
var value []byte
|
||||
err = r2.Scan(&value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r2.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return langext.Ptr(value), nil
|
||||
}
|
||||
|
||||
func (db *Database) DeleteMeta(ctx context.Context, key string) error {
|
||||
_, err := db.db.Exec(ctx, "DELETE FROM meta WHERE meta_key = :key", sq.PP{"key": key})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
6
scnserver/db/impl/requests/schema/assets.go
Normal file
6
scnserver/db/impl/requests/schema/assets.go
Normal file
@ -0,0 +1,6 @@
|
||||
package schema
|
||||
|
||||
import _ "embed"
|
||||
|
||||
//go:embed schema_1.ddl
|
||||
var RequestsSchema1 string
|
22
scnserver/db/impl/requests/schema/schema_1.ddl
Normal file
22
scnserver/db/impl/requests/schema/schema_1.ddl
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
CREATE TABLE `requests`
|
||||
(
|
||||
request_id INTEGER PRIMARY KEY,
|
||||
timestamp_created INTEGER NOT NULL
|
||||
|
||||
) STRICT;
|
||||
|
||||
|
||||
CREATE TABLE `meta`
|
||||
(
|
||||
meta_key TEXT NOT NULL,
|
||||
value_int INTEGER NULL,
|
||||
value_txt TEXT NULL,
|
||||
value_real REAL NULL,
|
||||
value_blob BLOB NULL,
|
||||
|
||||
PRIMARY KEY (meta_key)
|
||||
) STRICT;
|
||||
|
||||
|
||||
INSERT INTO meta (meta_key, value_int) VALUES ('schema', 1)
|
7
scnserver/db/impl/requests/schema/schema_sqlite.ddl
Normal file
7
scnserver/db/impl/requests/schema/schema_sqlite.ddl
Normal file
@ -0,0 +1,7 @@
|
||||
CREATE TABLE sqlite_master (
|
||||
type text,
|
||||
name text,
|
||||
tbl_name text,
|
||||
rootpage integer,
|
||||
sql text
|
||||
);
|
25
scnserver/db/impl/requests/utils.go
Normal file
25
scnserver/db/impl/requests/utils.go
Normal file
@ -0,0 +1,25 @@
|
||||
package requests
|
||||
|
||||
import (
|
||||
"gogs.mikescher.com/BlackForestBytes/goext/langext"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bool2DB(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func time2DB(t time.Time) int64 {
|
||||
return t.UnixMilli()
|
||||
}
|
||||
|
||||
func time2DBOpt(t *time.Time) *int64 {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return langext.Ptr(t.UnixMilli())
|
||||
}
|
@ -61,7 +61,7 @@ func (j *DeliveryRetryJob) run() bool {
|
||||
ctx := j.app.NewSimpleTransactionContext(10 * time.Second)
|
||||
defer ctx.Cancel()
|
||||
|
||||
deliveries, err := j.app.Database.ListRetrieableDeliveries(ctx, 32)
|
||||
deliveries, err := j.app.Database.Primary.ListRetrieableDeliveries(ctx, 32)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("Failed to query retrieable deliveries")
|
||||
return false
|
||||
@ -86,14 +86,14 @@ func (j *DeliveryRetryJob) run() bool {
|
||||
|
||||
func (j *DeliveryRetryJob) redeliver(ctx *logic.SimpleContext, delivery models.Delivery) {
|
||||
|
||||
client, err := j.app.Database.GetClient(ctx, delivery.ReceiverUserID, delivery.ReceiverClientID)
|
||||
client, err := j.app.Database.Primary.GetClient(ctx, delivery.ReceiverUserID, delivery.ReceiverClientID)
|
||||
if err != nil {
|
||||
log.Err(err).Int64("ReceiverUserID", delivery.ReceiverUserID.IntID()).Int64("ReceiverClientID", delivery.ReceiverClientID.IntID()).Msg("Failed to get client")
|
||||
ctx.RollbackTransaction()
|
||||
return
|
||||
}
|
||||
|
||||
msg, err := j.app.Database.GetMessage(ctx, delivery.SCNMessageID, true)
|
||||
msg, err := j.app.Database.Primary.GetMessage(ctx, delivery.SCNMessageID, true)
|
||||
if err != nil {
|
||||
log.Err(err).Int64("SCNMessageID", delivery.SCNMessageID.IntID()).Msg("Failed to get message")
|
||||
ctx.RollbackTransaction()
|
||||
@ -101,7 +101,7 @@ func (j *DeliveryRetryJob) redeliver(ctx *logic.SimpleContext, delivery models.D
|
||||
}
|
||||
|
||||
if msg.Deleted {
|
||||
err = j.app.Database.SetDeliveryFailed(ctx, delivery)
|
||||
err = j.app.Database.Primary.SetDeliveryFailed(ctx, delivery)
|
||||
if err != nil {
|
||||
log.Err(err).Int64("SCNMessageID", delivery.SCNMessageID.IntID()).Int64("DeliveryID", delivery.DeliveryID.IntID()).Msg("Failed to update delivery")
|
||||
ctx.RollbackTransaction()
|
||||
@ -111,14 +111,14 @@ func (j *DeliveryRetryJob) redeliver(ctx *logic.SimpleContext, delivery models.D
|
||||
|
||||
fcmDelivID, err := j.app.DeliverMessage(ctx, client, msg)
|
||||
if err == nil {
|
||||
err = j.app.Database.SetDeliverySuccess(ctx, delivery, *fcmDelivID)
|
||||
err = j.app.Database.Primary.SetDeliverySuccess(ctx, delivery, *fcmDelivID)
|
||||
if err != nil {
|
||||
log.Err(err).Int64("SCNMessageID", delivery.SCNMessageID.IntID()).Int64("DeliveryID", delivery.DeliveryID.IntID()).Msg("Failed to update delivery")
|
||||
ctx.RollbackTransaction()
|
||||
return
|
||||
}
|
||||
} else if delivery.RetryCount+1 > delivery.MaxRetryCount() {
|
||||
err = j.app.Database.SetDeliveryFailed(ctx, delivery)
|
||||
err = j.app.Database.Primary.SetDeliveryFailed(ctx, delivery)
|
||||
if err != nil {
|
||||
log.Err(err).Int64("SCNMessageID", delivery.SCNMessageID.IntID()).Int64("DeliveryID", delivery.DeliveryID.IntID()).Msg("Failed to update delivery")
|
||||
ctx.RollbackTransaction()
|
||||
@ -126,7 +126,7 @@ func (j *DeliveryRetryJob) redeliver(ctx *logic.SimpleContext, delivery models.D
|
||||
}
|
||||
log.Warn().Int64("SCNMessageID", delivery.SCNMessageID.IntID()).Int64("DeliveryID", delivery.DeliveryID.IntID()).Msg("Delivery failed after <max> retries (set to FAILURE)")
|
||||
} else {
|
||||
err = j.app.Database.SetDeliveryRetry(ctx, delivery)
|
||||
err = j.app.Database.Primary.SetDeliveryRetry(ctx, delivery)
|
||||
if err != nil {
|
||||
log.Err(err).Int64("SCNMessageID", delivery.SCNMessageID.IntID()).Int64("DeliveryID", delivery.DeliveryID.IntID()).Msg("Failed to update delivery")
|
||||
ctx.RollbackTransaction()
|
||||
|
@ -83,7 +83,7 @@ func (ac *AppContext) FinishSuccess(res ginresp.HTTPResponse) ginresp.HTTPRespon
|
||||
return res
|
||||
}
|
||||
|
||||
func (ac *AppContext) GetOrCreateTransaction(db *db.Database) (sq.Tx, error) {
|
||||
func (ac *AppContext) GetOrCreateTransaction(db db.DatabaseImpl) (sq.Tx, error) {
|
||||
if ac.cancelled {
|
||||
return nil, errors.New("context cancelled")
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
scn "blackforestbytes.com/simplecloudnotifier"
|
||||
"blackforestbytes.com/simplecloudnotifier/api/apierr"
|
||||
"blackforestbytes.com/simplecloudnotifier/api/ginresp"
|
||||
"blackforestbytes.com/simplecloudnotifier/db"
|
||||
"blackforestbytes.com/simplecloudnotifier/google"
|
||||
"blackforestbytes.com/simplecloudnotifier/models"
|
||||
"blackforestbytes.com/simplecloudnotifier/push"
|
||||
@ -28,7 +27,7 @@ import (
|
||||
type Application struct {
|
||||
Config scn.Config
|
||||
Gin *gin.Engine
|
||||
Database *db.Database
|
||||
Database *DBPool
|
||||
Pusher push.NotificationClient
|
||||
AndroidPublisher google.AndroidPublisherClient
|
||||
Jobs []Job
|
||||
@ -37,7 +36,7 @@ type Application struct {
|
||||
IsRunning *syncext.AtomicBool
|
||||
}
|
||||
|
||||
func NewApp(db *db.Database) *Application {
|
||||
func NewApp(db *DBPool) *Application {
|
||||
return &Application{
|
||||
Database: db,
|
||||
stopChan: make(chan bool),
|
||||
@ -264,7 +263,7 @@ func (app *Application) getPermissions(ctx *AppContext, hdr string) (PermissionS
|
||||
|
||||
key := strings.TrimSpace(hdr[4:])
|
||||
|
||||
user, err := app.Database.GetUserByKey(ctx, key)
|
||||
user, err := app.Database.Primary.GetUserByKey(ctx, key)
|
||||
if err != nil {
|
||||
return PermissionSet{}, err
|
||||
}
|
||||
@ -283,7 +282,7 @@ func (app *Application) getPermissions(ctx *AppContext, hdr string) (PermissionS
|
||||
}
|
||||
|
||||
func (app *Application) GetOrCreateChannel(ctx *AppContext, userid models.UserID, displayChanName string, intChanName string) (models.Channel, error) {
|
||||
existingChan, err := app.Database.GetChannelByName(ctx, userid, intChanName)
|
||||
existingChan, err := app.Database.Primary.GetChannelByName(ctx, userid, intChanName)
|
||||
if err != nil {
|
||||
return models.Channel{}, err
|
||||
}
|
||||
@ -295,12 +294,12 @@ func (app *Application) GetOrCreateChannel(ctx *AppContext, userid models.UserID
|
||||
subscribeKey := app.GenerateRandomAuthKey()
|
||||
sendKey := app.GenerateRandomAuthKey()
|
||||
|
||||
newChan, err := app.Database.CreateChannel(ctx, userid, displayChanName, intChanName, subscribeKey, sendKey)
|
||||
newChan, err := app.Database.Primary.CreateChannel(ctx, userid, displayChanName, intChanName, subscribeKey, sendKey)
|
||||
if err != nil {
|
||||
return models.Channel{}, err
|
||||
}
|
||||
|
||||
_, err = app.Database.CreateSubscription(ctx, userid, newChan, true)
|
||||
_, err = app.Database.Primary.CreateSubscription(ctx, userid, newChan, true)
|
||||
if err != nil {
|
||||
return models.Channel{}, err
|
||||
}
|
||||
|
88
scnserver/logic/dbpool.go
Normal file
88
scnserver/logic/dbpool.go
Normal file
@ -0,0 +1,88 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
scn "blackforestbytes.com/simplecloudnotifier"
|
||||
"blackforestbytes.com/simplecloudnotifier/db"
|
||||
logsdb "blackforestbytes.com/simplecloudnotifier/db/impl/logs"
|
||||
primarydb "blackforestbytes.com/simplecloudnotifier/db/impl/primary"
|
||||
requestsdb "blackforestbytes.com/simplecloudnotifier/db/impl/requests"
|
||||
"context"
|
||||
)
|
||||
|
||||
type DBPool struct {
|
||||
Primary *primarydb.Database
|
||||
Requests *requestsdb.Database
|
||||
Logs *logsdb.Database
|
||||
}
|
||||
|
||||
func NewDBPool(conf scn.Config) (*DBPool, error) {
|
||||
|
||||
dbprimary, err := primarydb.NewPrimaryDatabase(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dbrequests, err := requestsdb.NewRequestsDatabase(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dblogs, err := logsdb.NewLogsDatabase(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DBPool{
|
||||
Primary: dbprimary,
|
||||
Requests: dbrequests,
|
||||
Logs: dblogs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p DBPool) List() []db.DatabaseImpl {
|
||||
return []db.DatabaseImpl{
|
||||
p.Primary,
|
||||
p.Requests,
|
||||
p.Logs,
|
||||
}
|
||||
}
|
||||
|
||||
func (p DBPool) Stop(ctx context.Context) error {
|
||||
|
||||
var err error = nil
|
||||
|
||||
for _, subdb := range p.List() {
|
||||
err2 := subdb.Stop(ctx)
|
||||
if err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p DBPool) Migrate(ctx context.Context) error {
|
||||
for _, subdb := range p.List() {
|
||||
err := subdb.Migrate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p DBPool) Ping(ctx context.Context) error {
|
||||
for _, subdb := range p.List() {
|
||||
err := subdb.Ping(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -54,7 +54,7 @@ func (sc *SimpleContext) Cancel() {
|
||||
sc.cancelFunc()
|
||||
}
|
||||
|
||||
func (sc *SimpleContext) GetOrCreateTransaction(db *db.Database) (sq.Tx, error) {
|
||||
func (sc *SimpleContext) GetOrCreateTransaction(db db.DatabaseImpl) (sq.Tx, error) {
|
||||
if sc.cancelled {
|
||||
return nil, errors.New("context cancelled")
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
scn "blackforestbytes.com/simplecloudnotifier"
|
||||
"blackforestbytes.com/simplecloudnotifier/api"
|
||||
"blackforestbytes.com/simplecloudnotifier/api/ginext"
|
||||
"blackforestbytes.com/simplecloudnotifier/db"
|
||||
"blackforestbytes.com/simplecloudnotifier/google"
|
||||
"blackforestbytes.com/simplecloudnotifier/jobs"
|
||||
"blackforestbytes.com/simplecloudnotifier/logic"
|
||||
@ -21,54 +20,87 @@ type Void = struct{}
|
||||
func StartSimpleWebserver(t *testing.T) (*logic.Application, string, func()) {
|
||||
InitTests()
|
||||
|
||||
uuid1, _ := langext.NewHexUUID()
|
||||
uuid2, _ := langext.NewHexUUID()
|
||||
uuid3, _ := langext.NewHexUUID()
|
||||
|
||||
dbdir := t.TempDir()
|
||||
dbfile := filepath.Join(dbdir, uuid2+".sqlite3")
|
||||
dbfile1 := filepath.Join(dbdir, uuid1+".sqlite3")
|
||||
dbfile2 := filepath.Join(dbdir, uuid2+".sqlite3")
|
||||
dbfile3 := filepath.Join(dbdir, uuid3+".sqlite3")
|
||||
|
||||
err := os.MkdirAll(dbdir, os.ModePerm)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
|
||||
f, err := os.Create(dbfile)
|
||||
f1, err := os.Create(dbfile1)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
err = f.Close()
|
||||
err = f1.Close()
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
err = os.Chmod(dbfile1, 0777)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
f2, err := os.Create(dbfile2)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
err = f2.Close()
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
err = os.Chmod(dbfile2, 0777)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
f3, err := os.Create(dbfile3)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
err = f3.Close()
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
err = os.Chmod(dbfile3, 0777)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
|
||||
err = os.Chmod(dbfile, 0777)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
TPrintln("DatabaseFile<main>: " + dbfile1)
|
||||
TPrintln("DatabaseFile<requests>: " + dbfile2)
|
||||
TPrintln("DatabaseFile<logs>: " + dbfile3)
|
||||
|
||||
conf, ok := scn.GetConfig("local-host")
|
||||
if !ok {
|
||||
TestFail(t, "conf not found")
|
||||
}
|
||||
|
||||
TPrintln("DatabaseFile: " + dbfile)
|
||||
|
||||
conf := scn.Config{
|
||||
Namespace: "test",
|
||||
GinDebug: true,
|
||||
ServerIP: "0.0.0.0",
|
||||
ServerPort: "0", // simply choose a free port
|
||||
DBFile: dbfile,
|
||||
DBJournal: "WAL",
|
||||
DBTimeout: 500 * time.Millisecond,
|
||||
DBMaxOpenConns: 2,
|
||||
DBMaxIdleConns: 2,
|
||||
DBConnMaxLifetime: 1 * time.Second,
|
||||
DBConnMaxIdleTime: 1 * time.Second,
|
||||
RequestTimeout: 30 * time.Second,
|
||||
RequestMaxRetry: 32,
|
||||
RequestRetrySleep: 100 * time.Millisecond,
|
||||
ReturnRawErrors: true,
|
||||
DummyFirebase: true,
|
||||
DBSingleConn: false,
|
||||
}
|
||||
conf.ServerPort = "0" // simply choose a free port
|
||||
conf.DBMain.File = dbfile1
|
||||
conf.DBLogs.File = dbfile2
|
||||
conf.DBRequests.File = dbfile3
|
||||
conf.DBMain.Timeout = 500 * time.Millisecond
|
||||
conf.DBLogs.Timeout = 500 * time.Millisecond
|
||||
conf.DBRequests.Timeout = 500 * time.Millisecond
|
||||
conf.DBMain.ConnMaxLifetime = 1 * time.Second
|
||||
conf.DBLogs.ConnMaxLifetime = 1 * time.Second
|
||||
conf.DBRequests.ConnMaxLifetime = 1 * time.Second
|
||||
conf.DBMain.ConnMaxIdleTime = 1 * time.Second
|
||||
conf.DBLogs.ConnMaxIdleTime = 1 * time.Second
|
||||
conf.DBRequests.ConnMaxIdleTime = 1 * time.Second
|
||||
conf.RequestMaxRetry = 32
|
||||
conf.RequestRetrySleep = 100 * time.Millisecond
|
||||
conf.ReturnRawErrors = true
|
||||
conf.DummyFirebase = true
|
||||
|
||||
scn.Conf = conf
|
||||
|
||||
sqlite, err := db.NewDatabase(conf)
|
||||
sqlite, err := logic.NewDBPool(conf)
|
||||
if err != nil {
|
||||
TestFailErr(t, err)
|
||||
}
|
||||
@ -94,7 +126,9 @@ func StartSimpleWebserver(t *testing.T) (*logic.Application, string, func()) {
|
||||
|
||||
stop := func() {
|
||||
app.Stop()
|
||||
_ = os.Remove(dbfile)
|
||||
_ = os.Remove(dbfile1)
|
||||
_ = os.Remove(dbfile2)
|
||||
_ = os.Remove(dbfile3)
|
||||
_ = app.IsRunning.WaitWithTimeout(400*time.Millisecond, false)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user