mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-25 21:09:05 +01:00
Introduce db migration support
This commit is contained in:
parent
643bd3fb21
commit
8ffb562d6b
@ -61,7 +61,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var flagReinitDB, flagServer, flagSyncLDAP, flagGops, flagDev, flagVersion, flagLogDateTime bool
|
var flagReinitDB, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagDev, flagVersion, flagLogDateTime bool
|
||||||
var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string
|
var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string
|
||||||
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
||||||
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap")
|
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap")
|
||||||
@ -69,6 +69,7 @@ func main() {
|
|||||||
flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)")
|
flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)")
|
||||||
flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI")
|
flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI")
|
||||||
flag.BoolVar(&flagVersion, "version", false, "Show version information and exit")
|
flag.BoolVar(&flagVersion, "version", false, "Show version information and exit")
|
||||||
|
flag.BoolVar(&flagMigrateDB, "migrate-db", false, "Migrate database to supported version and exit")
|
||||||
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
|
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
|
||||||
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
|
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
|
||||||
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `<username>:[admin,support,api,user]:<password>`")
|
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `<username>:[admin,support,api,user]:<password>`")
|
||||||
@ -112,6 +113,11 @@ func main() {
|
|||||||
config.Keys.DB = os.Getenv(envvar)
|
config.Keys.DB = os.Getenv(envvar)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flagMigrateDB {
|
||||||
|
repository.MigrateDB(config.Keys.DB)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
||||||
db := repository.GetConnection()
|
db := repository.GetConnection()
|
||||||
|
|
||||||
|
8
go.mod
8
go.mod
@ -37,10 +37,13 @@ require (
|
|||||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||||
github.com/go-openapi/spec v0.20.7 // indirect
|
github.com/go-openapi/spec v0.20.7 // indirect
|
||||||
github.com/go-openapi/swag v0.22.3 // indirect
|
github.com/go-openapi/swag v0.22.3 // indirect
|
||||||
|
github.com/golang-migrate/migrate/v4 v4.15.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||||
github.com/gorilla/websocket v1.5.0 // indirect
|
github.com/gorilla/websocket v1.5.0 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
@ -49,7 +52,7 @@ require (
|
|||||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
@ -64,6 +67,7 @@ require (
|
|||||||
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect
|
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect
|
||||||
github.com/urfave/cli/v2 v2.8.1 // indirect
|
github.com/urfave/cli/v2 v2.8.1 // indirect
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||||
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||||
@ -71,7 +75,7 @@ require (
|
|||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/tools v0.1.12 // indirect
|
golang.org/x/tools v0.1.12 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||||
google.golang.org/appengine v1.6.6 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
@ -85,18 +85,6 @@ func Init(db *sqlx.DB,
|
|||||||
configs map[string]interface{}) (*Authentication, error) {
|
configs map[string]interface{}) (*Authentication, error) {
|
||||||
auth := &Authentication{}
|
auth := &Authentication{}
|
||||||
auth.db = db
|
auth.db = db
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE TABLE IF NOT EXISTS user (
|
|
||||||
username varchar(255) PRIMARY KEY NOT NULL,
|
|
||||||
password varchar(255) DEFAULT NULL,
|
|
||||||
ldap tinyint NOT NULL DEFAULT 0, /* col called "ldap" for historic reasons, fills the "AuthSource" */
|
|
||||||
name varchar(255) DEFAULT NULL,
|
|
||||||
roles varchar(255) NOT NULL DEFAULT "[]",
|
|
||||||
email varchar(255) DEFAULT NULL);`)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Error while initializing authentication -> create user table failed")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sessKey := os.Getenv("SESSION_KEY")
|
sessKey := os.Getenv("SESSION_KEY")
|
||||||
if sessKey == "" {
|
if sessKey == "" {
|
||||||
|
@ -55,6 +55,7 @@ func Connect(driver string, db string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dbConnInstance = &DBConnection{DB: dbHandle}
|
dbConnInstance = &DBConnection{DB: dbHandle}
|
||||||
|
checkDBVersion(dbHandle.DB)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,60 +19,6 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
// `AUTO_INCREMENT` is in a comment because of this hack:
|
|
||||||
// https://stackoverflow.com/a/41028314 (sqlite creates unique ids automatically)
|
|
||||||
const JobsDBSchema string = `
|
|
||||||
DROP TABLE IF EXISTS jobtag;
|
|
||||||
DROP TABLE IF EXISTS job;
|
|
||||||
DROP TABLE IF EXISTS tag;
|
|
||||||
|
|
||||||
CREATE TABLE job (
|
|
||||||
id INTEGER PRIMARY KEY /*!40101 AUTO_INCREMENT */,
|
|
||||||
job_id BIGINT NOT NULL,
|
|
||||||
cluster VARCHAR(255) NOT NULL,
|
|
||||||
subcluster VARCHAR(255) NOT NULL,
|
|
||||||
start_time BIGINT NOT NULL, -- Unix timestamp
|
|
||||||
|
|
||||||
user VARCHAR(255) NOT NULL,
|
|
||||||
project VARCHAR(255) NOT NULL,
|
|
||||||
` + "`partition`" + ` VARCHAR(255) NOT NULL, -- partition is a keyword in mysql -.-
|
|
||||||
array_job_id BIGINT NOT NULL,
|
|
||||||
duration INT NOT NULL DEFAULT 0,
|
|
||||||
walltime INT NOT NULL DEFAULT 0,
|
|
||||||
job_state VARCHAR(255) NOT NULL CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory')),
|
|
||||||
meta_data TEXT, -- JSON
|
|
||||||
resources TEXT NOT NULL, -- JSON
|
|
||||||
|
|
||||||
num_nodes INT NOT NULL,
|
|
||||||
num_hwthreads INT NOT NULL,
|
|
||||||
num_acc INT NOT NULL,
|
|
||||||
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
|
|
||||||
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
|
|
||||||
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
|
|
||||||
|
|
||||||
mem_used_max REAL NOT NULL DEFAULT 0.0,
|
|
||||||
flops_any_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
mem_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
load_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
net_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
|
|
||||||
file_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
file_data_vol_total REAL NOT NULL DEFAULT 0.0);
|
|
||||||
|
|
||||||
CREATE TABLE tag (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
tag_type VARCHAR(255) NOT NULL,
|
|
||||||
tag_name VARCHAR(255) NOT NULL,
|
|
||||||
CONSTRAINT be_unique UNIQUE (tag_type, tag_name));
|
|
||||||
|
|
||||||
CREATE TABLE jobtag (
|
|
||||||
job_id INTEGER,
|
|
||||||
tag_id INTEGER,
|
|
||||||
PRIMARY KEY (job_id, tag_id),
|
|
||||||
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
|
||||||
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
|
||||||
`
|
|
||||||
|
|
||||||
// Indexes are created after the job-archive is traversed for faster inserts.
|
// Indexes are created after the job-archive is traversed for faster inserts.
|
||||||
const JobsDbIndexes string = `
|
const JobsDbIndexes string = `
|
||||||
CREATE INDEX job_stats ON job (cluster,subcluster,user);
|
CREATE INDEX job_stats ON job (cluster,subcluster,user);
|
||||||
@ -211,13 +157,6 @@ func InitDB() error {
|
|||||||
starttime := time.Now()
|
starttime := time.Now()
|
||||||
log.Print("Building job table...")
|
log.Print("Building job table...")
|
||||||
|
|
||||||
// Basic database structure:
|
|
||||||
_, err := db.DB.Exec(JobsDBSchema)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Error while initializing basic DB structure")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inserts are bundled into transactions because in sqlite,
|
// Inserts are bundled into transactions because in sqlite,
|
||||||
// that speeds up inserts A LOT.
|
// that speeds up inserts A LOT.
|
||||||
tx, err := db.DB.Beginx()
|
tx, err := db.DB.Beginx()
|
||||||
|
69
internal/repository/migration.go
Normal file
69
internal/repository/migration.go
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"embed"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
|
"github.com/golang-migrate/migrate/v4"
|
||||||
|
"github.com/golang-migrate/migrate/v4/database/sqlite3"
|
||||||
|
"github.com/golang-migrate/migrate/v4/source/iofs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const supportedVersion uint = 2
|
||||||
|
|
||||||
|
//go:embed migrations/*.sql
|
||||||
|
var migrationFiles embed.FS
|
||||||
|
|
||||||
|
func checkDBVersion(db *sql.DB) {
|
||||||
|
driver, err := sqlite3.WithInstance(db, &sqlite3.Config{})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
d, err := iofs.New(migrationFiles, "migrations")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := migrate.NewWithInstance("iofs", d, "sqlite3", driver)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
v, _, err := m.Version()
|
||||||
|
if err != nil {
|
||||||
|
if err == migrate.ErrNilVersion {
|
||||||
|
log.Warn("Legacy database without version or missing database file!")
|
||||||
|
} else {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v < supportedVersion {
|
||||||
|
log.Warnf("Unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, supportedVersion)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func MigrateDB(db string) {
|
||||||
|
d, err := iofs.New(migrationFiles, "migrations")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := m.Up(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Close()
|
||||||
|
}
|
5
internal/repository/migrations/01_init-schema.down.sql
Normal file
5
internal/repository/migrations/01_init-schema.down.sql
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
DROP TABLE IF EXISTS job;
|
||||||
|
DROP TABLE IF EXISTS tags;
|
||||||
|
DROP TABLE IF EXISTS jobtag;
|
||||||
|
DROP TABLE IF EXISTS configuration;
|
||||||
|
DROP TABLE IF EXISTS user;
|
62
internal/repository/migrations/01_init-schema.up.sql
Normal file
62
internal/repository/migrations/01_init-schema.up.sql
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
CREATE TABLE IF NOT EXISTS job (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
job_id BIGINT NOT NULL,
|
||||||
|
cluster VARCHAR(255) NOT NULL,
|
||||||
|
subcluster VARCHAR(255) NOT NULL,
|
||||||
|
start_time BIGINT NOT NULL, -- Unix timestamp
|
||||||
|
|
||||||
|
user VARCHAR(255) NOT NULL,
|
||||||
|
project VARCHAR(255) NOT NULL,
|
||||||
|
partition VARCHAR(255) NOT NULL,
|
||||||
|
array_job_id BIGINT NOT NULL,
|
||||||
|
duration INT NOT NULL DEFAULT 0,
|
||||||
|
walltime INT NOT NULL DEFAULT 0,
|
||||||
|
job_state VARCHAR(255) NOT NULL
|
||||||
|
CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled',
|
||||||
|
'stopped', 'timeout', 'preempted', 'out_of_memory')),
|
||||||
|
meta_data TEXT, -- JSON
|
||||||
|
resources TEXT NOT NULL, -- JSON
|
||||||
|
|
||||||
|
num_nodes INT NOT NULL,
|
||||||
|
num_hwthreads INT NOT NULL,
|
||||||
|
num_acc INT NOT NULL,
|
||||||
|
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
|
||||||
|
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
|
||||||
|
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
|
||||||
|
|
||||||
|
mem_used_max REAL NOT NULL DEFAULT 0.0,
|
||||||
|
flops_any_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
mem_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
load_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
net_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
|
||||||
|
file_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
file_data_vol_total REAL NOT NULL DEFAULT 0.0);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS tag (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
tag_type VARCHAR(255) NOT NULL,
|
||||||
|
tag_name VARCHAR(255) NOT NULL,
|
||||||
|
CONSTRAINT be_unique UNIQUE (tag_type, tag_name));
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS jobtag (
|
||||||
|
job_id INTEGER,
|
||||||
|
tag_id INTEGER,
|
||||||
|
PRIMARY KEY (job_id, tag_id),
|
||||||
|
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS configuration (
|
||||||
|
username varchar(255),
|
||||||
|
confkey varchar(255),
|
||||||
|
value varchar(255),
|
||||||
|
PRIMARY KEY (username, confkey),
|
||||||
|
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS user (
|
||||||
|
username varchar(255) PRIMARY KEY NOT NULL,
|
||||||
|
password varchar(255) DEFAULT NULL,
|
||||||
|
ldap tinyint NOT NULL DEFAULT 0, /* col called "ldap" for historic reasons, fills the "AuthSource" */
|
||||||
|
name varchar(255) DEFAULT NULL,
|
||||||
|
roles varchar(255) NOT NULL DEFAULT "[]",
|
||||||
|
email varchar(255) DEFAULT NULL);
|
5
internal/repository/migrations/02_add-index.up.sql
Normal file
5
internal/repository/migrations/02_add-index.up.sql
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
CREATE INDEX IF NOT EXISTS job_stats ON job (cluster,subcluster,user);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_user ON job (user);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_starttime ON job (start_time);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_job_id ON job (job_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_state ON job (job_state);
|
@ -33,18 +33,6 @@ func GetUserCfgRepo() *UserCfgRepo {
|
|||||||
userCfgRepoOnce.Do(func() {
|
userCfgRepoOnce.Do(func() {
|
||||||
db := GetConnection()
|
db := GetConnection()
|
||||||
|
|
||||||
_, err := db.DB.Exec(`
|
|
||||||
CREATE TABLE IF NOT EXISTS configuration (
|
|
||||||
username varchar(255),
|
|
||||||
confkey varchar(255),
|
|
||||||
value varchar(255),
|
|
||||||
PRIMARY KEY (username, confkey),
|
|
||||||
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("db.DB.exec() error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("db.DB.Preparex() error: %v", err)
|
log.Fatalf("db.DB.Preparex() error: %v", err)
|
||||||
|
@ -269,11 +269,7 @@ func setup(t *testing.T) *api.RestApi {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
dbfilepath := filepath.Join(tmpdir, "test.db")
|
dbfilepath := filepath.Join(tmpdir, "test.db")
|
||||||
f, err := os.Create(dbfilepath)
|
repository.MigrateDB(dbfilepath)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
f.Close()
|
|
||||||
|
|
||||||
cfgFilePath := filepath.Join(tmpdir, "config.json")
|
cfgFilePath := filepath.Join(tmpdir, "config.json")
|
||||||
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
|
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
|
||||||
@ -294,10 +290,6 @@ func setup(t *testing.T) *api.RestApi {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := db.DB.Exec(repository.JobsDBSchema); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
jobRepo := repository.GetJobRepository()
|
jobRepo := repository.GetJobRepository()
|
||||||
resolver := &graph.Resolver{DB: db.DB, Repo: jobRepo}
|
resolver := &graph.Resolver{DB: db.DB, Repo: jobRepo}
|
||||||
|
|
||||||
|
BIN
test/test.db
BIN
test/test.db
Binary file not shown.
Loading…
Reference in New Issue
Block a user