mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-05-10 19:21:43 +02:00
Merge branch 'dev' of github.com:ClusterCockpit/cc-backend into dev
This commit is contained in:
commit
c2087b15d5
@ -12,7 +12,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func cliInit() {
|
func cliInit() {
|
||||||
flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize swlite database file, config.json and .env")
|
flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize sqlite database file, config.json and .env")
|
||||||
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
||||||
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'hpc_user' table with ldap")
|
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'hpc_user' table with ldap")
|
||||||
flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling")
|
flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling")
|
||||||
@ -24,10 +24,10 @@ func cliInit() {
|
|||||||
flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit")
|
flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit")
|
||||||
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
|
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
|
||||||
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
|
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
|
||||||
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `<username>:[admin,support,manager,api,user]:<password>`")
|
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: <username>:[admin,support,manager,api,user]:<password>")
|
||||||
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`")
|
flag.StringVar(&flagDelUser, "del-user", "", "Remove a existing user. Argument format: <username>")
|
||||||
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
|
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
|
||||||
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
|
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
|
||||||
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`")
|
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug, info (default), warn, err, crit]`")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
@ -62,24 +61,23 @@ const configString = `
|
|||||||
|
|
||||||
func initEnv() {
|
func initEnv() {
|
||||||
if util.CheckFileExists("var") {
|
if util.CheckFileExists("var") {
|
||||||
fmt.Print("Directory ./var already exists. Exiting!\n")
|
log.Exit("Directory ./var already exists. Cautiously exiting application initialization.")
|
||||||
os.Exit(0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil {
|
if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil {
|
||||||
log.Fatalf("Writing config.json failed: %s", err.Error())
|
log.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil {
|
if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil {
|
||||||
log.Fatalf("Writing .env failed: %s", err.Error())
|
log.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Mkdir("var", 0o777); err != nil {
|
if err := os.Mkdir("var", 0o777); err != nil {
|
||||||
log.Fatalf("Mkdir var failed: %s", err.Error())
|
log.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
err := repository.MigrateDB("sqlite3", "./var/job.db")
|
err := repository.MigrateDB("sqlite3", "./var/job.db")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Initialize job.db failed: %s", err.Error())
|
log.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,15 +61,23 @@ func main() {
|
|||||||
// Apply config flags for pkg/log
|
// Apply config flags for pkg/log
|
||||||
log.Init(flagLogLevel, flagLogDateTime)
|
log.Init(flagLogLevel, flagLogDateTime)
|
||||||
|
|
||||||
|
// If init flag set, run tasks here before any file dependencies cause errors
|
||||||
|
if flagInit {
|
||||||
|
initEnv()
|
||||||
|
log.Exit("Successfully setup environment!\n" +
|
||||||
|
"Please review config.json and .env and adjust it to your needs.\n" +
|
||||||
|
"Add your job-archive at ./var/job-archive.")
|
||||||
|
}
|
||||||
|
|
||||||
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
||||||
if flagGops {
|
if flagGops {
|
||||||
if err := agent.Listen(agent.Options{}); err != nil {
|
if err := agent.Listen(agent.Options{}); err != nil {
|
||||||
log.Fatalf("gops/agent.Listen failed: %s", err.Error())
|
log.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := runtimeEnv.LoadEnv("./.env"); err != nil && !os.IsNotExist(err) {
|
if err := runtimeEnv.LoadEnv("./.env"); err != nil && !os.IsNotExist(err) {
|
||||||
log.Fatalf("parsing './.env' file failed: %s", err.Error())
|
log.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize sub-modules and handle command line flags.
|
// Initialize sub-modules and handle command line flags.
|
||||||
@ -87,37 +95,29 @@ func main() {
|
|||||||
if flagMigrateDB {
|
if flagMigrateDB {
|
||||||
err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
|
err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
log.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagRevertDB {
|
if flagRevertDB {
|
||||||
err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB)
|
err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error())
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
log.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagForceDB {
|
if flagForceDB {
|
||||||
err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB)
|
err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
log.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
||||||
|
|
||||||
if flagInit {
|
|
||||||
initEnv()
|
|
||||||
fmt.Print("Successfully setup environment!\n")
|
|
||||||
fmt.Print("Please review config.json and .env and adjust it to your needs.\n")
|
|
||||||
fmt.Print("Add your job-archive at ./var/job-archive.\n")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !config.Keys.DisableAuthentication {
|
if !config.Keys.DisableAuthentication {
|
||||||
|
|
||||||
auth.Init()
|
auth.Init()
|
||||||
@ -125,20 +125,27 @@ func main() {
|
|||||||
if flagNewUser != "" {
|
if flagNewUser != "" {
|
||||||
parts := strings.SplitN(flagNewUser, ":", 3)
|
parts := strings.SplitN(flagNewUser, ":", 3)
|
||||||
if len(parts) != 3 || len(parts[0]) == 0 {
|
if len(parts) != 3 || len(parts[0]) == 0 {
|
||||||
log.Fatal("invalid argument format for user creation")
|
log.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+
|
||||||
|
"Want: <username>:[admin,support,manager,api,user]:<password>\n"+
|
||||||
|
"Have: %s\n", flagNewUser)
|
||||||
}
|
}
|
||||||
|
|
||||||
ur := repository.GetUserRepository()
|
ur := repository.GetUserRepository()
|
||||||
if err := ur.AddUser(&schema.User{
|
if err := ur.AddUser(&schema.User{
|
||||||
Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","),
|
Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Fatalf("adding '%s' user authentication failed: %v", parts[0], err)
|
log.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error())
|
||||||
|
} else {
|
||||||
|
log.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagDelUser != "" {
|
if flagDelUser != "" {
|
||||||
ur := repository.GetUserRepository()
|
ur := repository.GetUserRepository()
|
||||||
if err := ur.DelUser(flagDelUser); err != nil {
|
if err := ur.DelUser(flagDelUser); err != nil {
|
||||||
log.Fatalf("deleting user failed: %v", err)
|
log.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error())
|
||||||
|
} else {
|
||||||
|
log.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,60 +153,64 @@ func main() {
|
|||||||
|
|
||||||
if flagSyncLDAP {
|
if flagSyncLDAP {
|
||||||
if authHandle.LdapAuth == nil {
|
if authHandle.LdapAuth == nil {
|
||||||
log.Fatal("cannot sync: LDAP authentication is not configured")
|
log.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := authHandle.LdapAuth.Sync(); err != nil {
|
if err := authHandle.LdapAuth.Sync(); err != nil {
|
||||||
log.Fatalf("LDAP sync failed: %v", err)
|
log.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
log.Info("LDAP sync successfull")
|
log.Print("Sync LDAP: LDAP synchronization successfull.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagGenJWT != "" {
|
if flagGenJWT != "" {
|
||||||
ur := repository.GetUserRepository()
|
ur := repository.GetUserRepository()
|
||||||
user, err := ur.GetUser(flagGenJWT)
|
user, err := ur.GetUser(flagGenJWT)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("could not get user from JWT: %v", err)
|
log.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !user.HasRole(schema.RoleApi) {
|
if !user.HasRole(schema.RoleApi) {
|
||||||
log.Warnf("user '%s' does not have the API role", user.Username)
|
log.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username)
|
||||||
}
|
}
|
||||||
|
|
||||||
jwt, err := authHandle.JwtAuth.ProvideJWT(user)
|
jwt, err := authHandle.JwtAuth.ProvideJWT(user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to provide JWT to user '%s': %v", user.Username, err)
|
log.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("MAIN > JWT for '%s': %s\n", user.Username, jwt)
|
log.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt)
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if flagNewUser != "" || flagDelUser != "" {
|
} else if flagNewUser != "" || flagDelUser != "" {
|
||||||
log.Fatal("arguments --add-user and --del-user can only be used if authentication is enabled")
|
log.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
|
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
|
||||||
log.Fatalf("failed to initialize archive: %s", err.Error())
|
log.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := metricdata.Init(); err != nil {
|
if err := metricdata.Init(); err != nil {
|
||||||
log.Fatalf("failed to initialize metricdata repository: %s", err.Error())
|
log.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagReinitDB {
|
if flagReinitDB {
|
||||||
if err := importer.InitDB(); err != nil {
|
if err := importer.InitDB(); err != nil {
|
||||||
log.Fatalf("failed to re-initialize repository DB: %s", err.Error())
|
log.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error())
|
||||||
|
} else {
|
||||||
|
log.Print("Init DB: Sucessfully re-initialized repository DB.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagImportJob != "" {
|
if flagImportJob != "" {
|
||||||
if err := importer.HandleImportFlag(flagImportJob); err != nil {
|
if err := importer.HandleImportFlag(flagImportJob); err != nil {
|
||||||
log.Fatalf("job import failed: %s", err.Error())
|
log.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error())
|
||||||
|
} else {
|
||||||
|
log.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !flagServer {
|
if !flagServer {
|
||||||
return
|
log.Exit("No errors, server flag not set. Exiting cc-backend.")
|
||||||
}
|
}
|
||||||
|
|
||||||
archiver.Start(repository.GetJobRepository())
|
archiver.Start(repository.GetJobRepository())
|
||||||
|
@ -64,7 +64,7 @@ func serverInit() {
|
|||||||
case string:
|
case string:
|
||||||
return fmt.Errorf("MAIN > Panic: %s", e)
|
return fmt.Errorf("MAIN > Panic: %s", e)
|
||||||
case error:
|
case error:
|
||||||
return fmt.Errorf("MAIN > Panic caused by: %w", e)
|
return fmt.Errorf("MAIN > Panic caused by: %s", e.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.New("MAIN > Internal server error (panic)")
|
return errors.New("MAIN > Internal server error (panic)")
|
||||||
@ -268,7 +268,7 @@ func serverStart() {
|
|||||||
// Start http or https server
|
// Start http or https server
|
||||||
listener, err := net.Listen("tcp", config.Keys.Addr)
|
listener, err := net.Listen("tcp", config.Keys.Addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("starting http listener failed: %v", err)
|
log.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
|
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
|
||||||
@ -281,7 +281,7 @@ func serverStart() {
|
|||||||
cert, err := tls.LoadX509KeyPair(
|
cert, err := tls.LoadX509KeyPair(
|
||||||
config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
|
config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("loading X509 keypair failed: %v", err)
|
log.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
listener = tls.NewListener(listener, &tls.Config{
|
listener = tls.NewListener(listener, &tls.Config{
|
||||||
Certificates: []tls.Certificate{cert},
|
Certificates: []tls.Certificate{cert},
|
||||||
@ -292,20 +292,20 @@ func serverStart() {
|
|||||||
MinVersion: tls.VersionTLS12,
|
MinVersion: tls.VersionTLS12,
|
||||||
PreferServerCipherSuites: true,
|
PreferServerCipherSuites: true,
|
||||||
})
|
})
|
||||||
fmt.Printf("HTTPS server listening at %s...", config.Keys.Addr)
|
log.Printf("HTTPS server listening at %s...\n", config.Keys.Addr)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("HTTP server listening at %s...", config.Keys.Addr)
|
log.Printf("HTTP server listening at %s...\n", config.Keys.Addr)
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
// Because this program will want to bind to a privileged port (like 80), the listener must
|
// Because this program will want to bind to a privileged port (like 80), the listener must
|
||||||
// be established first, then the user can be changed, and after that,
|
// be established first, then the user can be changed, and after that,
|
||||||
// the actual http server can be started.
|
// the actual http server can be started.
|
||||||
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
||||||
log.Fatalf("error while preparing server start: %s", err.Error())
|
log.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||||
log.Fatalf("starting server failed: %v", err)
|
log.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
go.mod
5
go.mod
@ -1,6 +1,7 @@
|
|||||||
module github.com/ClusterCockpit/cc-backend
|
module github.com/ClusterCockpit/cc-backend
|
||||||
|
|
||||||
go 1.23.5
|
go 1.23.5
|
||||||
|
toolchain go1.24.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/99designs/gqlgen v0.17.66
|
github.com/99designs/gqlgen v0.17.66
|
||||||
@ -10,7 +11,7 @@ require (
|
|||||||
github.com/go-co-op/gocron/v2 v2.16.0
|
github.com/go-co-op/gocron/v2 v2.16.0
|
||||||
github.com/go-ldap/ldap/v3 v3.4.10
|
github.com/go-ldap/ldap/v3 v3.4.10
|
||||||
github.com/go-sql-driver/mysql v1.9.0
|
github.com/go-sql-driver/mysql v1.9.0
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||||
github.com/golang-migrate/migrate/v4 v4.18.2
|
github.com/golang-migrate/migrate/v4 v4.18.2
|
||||||
github.com/google/gops v0.3.28
|
github.com/google/gops v0.3.28
|
||||||
github.com/gorilla/handlers v1.5.2
|
github.com/gorilla/handlers v1.5.2
|
||||||
@ -78,7 +79,7 @@ require (
|
|||||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||||
go.uber.org/atomic v1.11.0 // indirect
|
go.uber.org/atomic v1.11.0 // indirect
|
||||||
golang.org/x/mod v0.23.0 // indirect
|
golang.org/x/mod v0.23.0 // indirect
|
||||||
golang.org/x/net v0.35.0 // indirect
|
golang.org/x/net v0.36.0 // indirect
|
||||||
golang.org/x/sync v0.11.0 // indirect
|
golang.org/x/sync v0.11.0 // indirect
|
||||||
golang.org/x/sys v0.30.0 // indirect
|
golang.org/x/sys v0.30.0 // indirect
|
||||||
golang.org/x/text v0.22.0 // indirect
|
golang.org/x/text v0.22.0 // indirect
|
||||||
|
8
go.sum
8
go.sum
@ -83,8 +83,8 @@ github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIx
|
|||||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||||
github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8=
|
github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8=
|
||||||
github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk=
|
github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
@ -279,8 +279,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
|||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -1424,8 +1424,6 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
|
|||||||
rw.Header().Set("Content-Type", "text/plain")
|
rw.Header().Set("Content-Type", "text/plain")
|
||||||
key, value := r.FormValue("key"), r.FormValue("value")
|
key, value := r.FormValue("key"), r.FormValue("value")
|
||||||
|
|
||||||
// fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value)
|
|
||||||
|
|
||||||
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, repository.GetUserFromContext(r.Context())); err != nil {
|
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, repository.GetUserFromContext(r.Context())); err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||||
return
|
return
|
||||||
|
@ -7,9 +7,9 @@ package config
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -53,20 +53,20 @@ func Init(flagConfigFile string) {
|
|||||||
raw, err := os.ReadFile(flagConfigFile)
|
raw, err := os.ReadFile(flagConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
log.Fatalf("CONFIG ERROR: %v", err)
|
log.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
|
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
|
||||||
log.Fatalf("Validate config: %v\n", err)
|
log.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||||
}
|
}
|
||||||
dec := json.NewDecoder(bytes.NewReader(raw))
|
dec := json.NewDecoder(bytes.NewReader(raw))
|
||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
if err := dec.Decode(&Keys); err != nil {
|
if err := dec.Decode(&Keys); err != nil {
|
||||||
log.Fatalf("could not decode: %v", err)
|
log.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
|
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
|
||||||
log.Fatal("At least one cluster required in config!")
|
log.Abort("Config Init: At least one cluster required in config. Exited with error.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -59,17 +59,15 @@ func Connect(driver string, db string) {
|
|||||||
} else {
|
} else {
|
||||||
dbHandle, err = sqlx.Open("sqlite3", opts.URL)
|
dbHandle, err = sqlx.Open("sqlite3", opts.URL)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
case "mysql":
|
case "mysql":
|
||||||
opts.URL += "?multiStatements=true"
|
opts.URL += "?multiStatements=true"
|
||||||
dbHandle, err = sqlx.Open("mysql", opts.URL)
|
dbHandle, err = sqlx.Open("mysql", opts.URL)
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("sqlx.Open() error: %v", err)
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
log.Fatalf("unsupported database driver: %s", driver)
|
log.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
|
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
|
||||||
@ -80,7 +78,7 @@ func Connect(driver string, db string) {
|
|||||||
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
|
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
|
||||||
err = checkDBVersion(driver, dbHandle.DB)
|
err = checkDBVersion(driver, dbHandle.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -194,11 +194,13 @@ func (r *JobRepository) FindConcurrentJobs(
|
|||||||
|
|
||||||
queryRunning := query.Where("job.job_state = ?").Where("(job.start_time BETWEEN ? AND ? OR job.start_time < ?)",
|
queryRunning := query.Where("job.job_state = ?").Where("(job.start_time BETWEEN ? AND ? OR job.start_time < ?)",
|
||||||
"running", startTimeTail, stopTimeTail, startTime)
|
"running", startTimeTail, stopTimeTail, startTime)
|
||||||
queryRunning = queryRunning.Where("job.resources LIKE ?", fmt.Sprint("%", hostname, "%"))
|
// Get At Least One Exact Hostname Match from JSON Resources Array in Database
|
||||||
|
queryRunning = queryRunning.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, '$.hostname') = ?)", hostname)
|
||||||
|
|
||||||
query = query.Where("job.job_state != ?").Where("((job.start_time BETWEEN ? AND ?) OR (job.start_time + job.duration) BETWEEN ? AND ? OR (job.start_time < ?) AND (job.start_time + job.duration) > ?)",
|
query = query.Where("job.job_state != ?").Where("((job.start_time BETWEEN ? AND ?) OR (job.start_time + job.duration) BETWEEN ? AND ? OR (job.start_time < ?) AND (job.start_time + job.duration) > ?)",
|
||||||
"running", startTimeTail, stopTimeTail, startTimeFront, stopTimeTail, startTime, stopTime)
|
"running", startTimeTail, stopTimeTail, startTimeFront, stopTimeTail, startTime, stopTime)
|
||||||
query = query.Where("job.resources LIKE ?", fmt.Sprint("%", hostname, "%"))
|
// Get At Least One Exact Hostname Match from JSON Resources Array in Database
|
||||||
|
query = query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, '$.hostname') = ?)", hostname)
|
||||||
|
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -67,7 +67,8 @@ func (r *JobRepository) QueryJobs(
|
|||||||
|
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while running query: %v", err)
|
queryString, queryVars, _ := query.ToSql()
|
||||||
|
log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,7 +198,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
|||||||
query = buildIntCondition("job.num_hwthreads", filter.NumHWThreads, query)
|
query = buildIntCondition("job.num_hwthreads", filter.NumHWThreads, query)
|
||||||
}
|
}
|
||||||
if filter.Node != nil {
|
if filter.Node != nil {
|
||||||
query = buildStringCondition("job.resources", filter.Node, query)
|
query = buildResourceJsonCondition("hostname", filter.Node, query)
|
||||||
}
|
}
|
||||||
if filter.Energy != nil {
|
if filter.Energy != nil {
|
||||||
query = buildFloatCondition("job.energy", filter.Energy, query)
|
query = buildFloatCondition("job.energy", filter.Energy, query)
|
||||||
@ -299,6 +300,28 @@ func buildMetaJsonCondition(jsonField string, cond *model.StringInput, query sq.
|
|||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func buildResourceJsonCondition(jsonField string, cond *model.StringInput, query sq.SelectBuilder) sq.SelectBuilder {
|
||||||
|
// Verify and Search Only in Valid Jsons
|
||||||
|
query = query.Where("JSON_VALID(resources)")
|
||||||
|
// add "AND" Sql query Block for field match
|
||||||
|
if cond.Eq != nil {
|
||||||
|
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\") = ?)", *cond.Eq)
|
||||||
|
}
|
||||||
|
if cond.Neq != nil { // Currently Unused
|
||||||
|
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\") != ?)", *cond.Neq)
|
||||||
|
}
|
||||||
|
if cond.StartsWith != nil { // Currently Unused
|
||||||
|
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\")) LIKE ?)", fmt.Sprint(*cond.StartsWith, "%"))
|
||||||
|
}
|
||||||
|
if cond.EndsWith != nil { // Currently Unused
|
||||||
|
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\") LIKE ?)", fmt.Sprint("%", *cond.EndsWith))
|
||||||
|
}
|
||||||
|
if cond.Contains != nil {
|
||||||
|
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\") LIKE ?)", fmt.Sprint("%", *cond.Contains, "%"))
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)")
|
matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)")
|
||||||
matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")
|
matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")
|
||||||
|
@ -54,7 +54,7 @@ func checkDBVersion(backend string, db *sql.DB) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Fatalf("unsupported database backend: %s", backend)
|
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||||
}
|
}
|
||||||
|
|
||||||
v, dirty, err := m.Version()
|
v, dirty, err := m.Version()
|
||||||
@ -102,7 +102,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
|
|||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Fatalf("unsupported database backend: %s", backend)
|
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
|
@ -35,7 +35,7 @@ func GetUserCfgRepo() *UserCfgRepo {
|
|||||||
|
|
||||||
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("db.DB.Preparex() error: %v", err)
|
log.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
userCfgRepoInstance = &UserCfgRepo{
|
userCfgRepoInstance = &UserCfgRepo{
|
||||||
|
@ -40,7 +40,7 @@ func Start() {
|
|||||||
jobRepo = repository.GetJobRepository()
|
jobRepo = repository.GetJobRepository()
|
||||||
s, err = gocron.NewScheduler()
|
s, err = gocron.NewScheduler()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error while creating gocron scheduler: %s", err.Error())
|
log.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Keys.StopJobsExceedingWalltime > 0 {
|
if config.Keys.StopJobsExceedingWalltime > 0 {
|
||||||
|
236
pkg/log/log.go
236
pkg/log/log.go
@ -46,12 +46,12 @@ var loglevel string = "info"
|
|||||||
/* CONFIG */
|
/* CONFIG */
|
||||||
|
|
||||||
func Init(lvl string, logdate bool) {
|
func Init(lvl string, logdate bool) {
|
||||||
|
// Discard I/O for all writers below selected loglevel; <CRITICAL> is always written.
|
||||||
switch lvl {
|
switch lvl {
|
||||||
case "crit":
|
case "crit":
|
||||||
ErrWriter = io.Discard
|
ErrWriter = io.Discard
|
||||||
fallthrough
|
fallthrough
|
||||||
case "err", "fatal":
|
case "err":
|
||||||
WarnWriter = io.Discard
|
WarnWriter = io.Discard
|
||||||
fallthrough
|
fallthrough
|
||||||
case "warn":
|
case "warn":
|
||||||
@ -63,8 +63,7 @@ func Init(lvl string, logdate bool) {
|
|||||||
// Nothing to do...
|
// Nothing to do...
|
||||||
break
|
break
|
||||||
default:
|
default:
|
||||||
fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel 'debug'\n", lvl)
|
fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel '%s'\n", lvl, loglevel)
|
||||||
//SetLogLevel("debug")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !logdate {
|
if !logdate {
|
||||||
@ -84,109 +83,138 @@ func Init(lvl string, logdate bool) {
|
|||||||
loglevel = lvl
|
loglevel = lvl
|
||||||
}
|
}
|
||||||
|
|
||||||
/* PRINT */
|
/* HELPER */
|
||||||
|
|
||||||
// Private helper
|
|
||||||
func printStr(v ...interface{}) string {
|
|
||||||
return fmt.Sprint(v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uses Info() -> If errorpath required at some point:
|
|
||||||
// Will need own writer with 'Output(2, out)' to correctly render path
|
|
||||||
func Print(v ...interface{}) {
|
|
||||||
Info(v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Debug(v ...interface{}) {
|
|
||||||
DebugLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Info(v ...interface{}) {
|
|
||||||
InfoLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Warn(v ...interface{}) {
|
|
||||||
WarnLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Error(v ...interface{}) {
|
|
||||||
ErrLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes panic stacktrace, but keeps application alive
|
|
||||||
func Panic(v ...interface{}) {
|
|
||||||
ErrLog.Output(2, printStr(v...))
|
|
||||||
panic("Panic triggered ...")
|
|
||||||
}
|
|
||||||
|
|
||||||
func Crit(v ...interface{}) {
|
|
||||||
CritLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes critical log, stops application
|
|
||||||
func Fatal(v ...interface{}) {
|
|
||||||
CritLog.Output(2, printStr(v...))
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* PRINT FORMAT*/
|
|
||||||
|
|
||||||
// Private helper
|
|
||||||
func printfStr(format string, v ...interface{}) string {
|
|
||||||
return fmt.Sprintf(format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uses Infof() -> If errorpath required at some point:
|
|
||||||
// Will need own writer with 'Output(2, out)' to correctly render path
|
|
||||||
func Printf(format string, v ...interface{}) {
|
|
||||||
Infof(format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Debugf(format string, v ...interface{}) {
|
|
||||||
DebugLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Infof(format string, v ...interface{}) {
|
|
||||||
InfoLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Warnf(format string, v ...interface{}) {
|
|
||||||
WarnLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Errorf(format string, v ...interface{}) {
|
|
||||||
ErrLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes panic stacktrace, but keeps application alive
|
|
||||||
func Panicf(format string, v ...interface{}) {
|
|
||||||
ErrLog.Output(2, printfStr(format, v...))
|
|
||||||
panic("Panic triggered ...")
|
|
||||||
}
|
|
||||||
|
|
||||||
func Critf(format string, v ...interface{}) {
|
|
||||||
CritLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes crit log, stops application
|
|
||||||
func Fatalf(format string, v ...interface{}) {
|
|
||||||
CritLog.Output(2, printfStr(format, v...))
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Loglevel() string {
|
func Loglevel() string {
|
||||||
return loglevel
|
return loglevel
|
||||||
}
|
}
|
||||||
|
|
||||||
/* SPECIAL */
|
/* PRIVATE HELPER */
|
||||||
|
|
||||||
// func Finfof(w io.Writer, format string, v ...interface{}) {
|
// Return unformatted string
|
||||||
// if w != io.Discard {
|
func printStr(v ...interface{}) string {
|
||||||
// if logDateTime {
|
return fmt.Sprint(v...)
|
||||||
// currentTime := time.Now()
|
}
|
||||||
// fmt.Fprintf(InfoWriter, currentTime.String()+InfoPrefix+format+"\n", v...)
|
|
||||||
// } else {
|
// Return formatted string
|
||||||
// fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...)
|
func printfStr(format string, v ...interface{}) string {
|
||||||
// }
|
return fmt.Sprintf(format, v...)
|
||||||
// }
|
}
|
||||||
// }
|
|
||||||
|
/* PRINT */
|
||||||
|
|
||||||
|
// Prints to STDOUT without string formatting; application continues.
|
||||||
|
// Used for special cases not requiring log information like date or location.
|
||||||
|
func Print(v ...interface{}) {
|
||||||
|
fmt.Fprintln(os.Stdout, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to STDOUT without string formatting; application exits with error code 0.
|
||||||
|
// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
|
||||||
|
func Exit(v ...interface{}) {
|
||||||
|
fmt.Fprintln(os.Stdout, v...)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to STDOUT without string formatting; application exits with error code 1.
|
||||||
|
// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
|
||||||
|
func Abort(v ...interface{}) {
|
||||||
|
fmt.Fprintln(os.Stdout, v...)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to DEBUG writer without string formatting; application continues.
|
||||||
|
// Used for logging additional information, primarily for development.
|
||||||
|
func Debug(v ...interface{}) {
|
||||||
|
DebugLog.Output(2, printStr(v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to INFO writer without string formatting; application continues.
|
||||||
|
// Used for logging additional information, e.g. notable returns or common fail-cases.
|
||||||
|
func Info(v ...interface{}) {
|
||||||
|
InfoLog.Output(2, printStr(v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to WARNING writer without string formatting; application continues.
|
||||||
|
// Used for logging important information, e.g. uncommon edge-cases or administration related information.
|
||||||
|
func Warn(v ...interface{}) {
|
||||||
|
WarnLog.Output(2, printStr(v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to ERROR writer without string formatting; application continues.
|
||||||
|
// Used for logging errors, but code still can return default(s) or nil.
|
||||||
|
func Error(v ...interface{}) {
|
||||||
|
ErrLog.Output(2, printStr(v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to CRITICAL writer without string formatting; application exits with error code 1.
|
||||||
|
// Used for terminating on unexpected errors with date and code location.
|
||||||
|
func Fatal(v ...interface{}) {
|
||||||
|
CritLog.Output(2, printStr(v...))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to PANIC function without string formatting; application exits with panic.
|
||||||
|
// Used for terminating on unexpected errors with stacktrace.
|
||||||
|
func Panic(v ...interface{}) {
|
||||||
|
panic(printStr(v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/* PRINT FORMAT*/
|
||||||
|
|
||||||
|
// Prints to STDOUT with string formatting; application continues.
|
||||||
|
// Used for special cases not requiring log information like date or location.
|
||||||
|
func Printf(format string, v ...interface{}) {
|
||||||
|
fmt.Fprintf(os.Stdout, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to STDOUT with string formatting; application exits with error code 0.
|
||||||
|
// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
|
||||||
|
func Exitf(format string, v ...interface{}) {
|
||||||
|
fmt.Fprintf(os.Stdout, format, v...)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to STDOUT with string formatting; application exits with error code 1.
|
||||||
|
// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
|
||||||
|
func Abortf(format string, v ...interface{}) {
|
||||||
|
fmt.Fprintf(os.Stdout, format, v...)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to DEBUG writer with string formatting; application continues.
|
||||||
|
// Used for logging additional information, primarily for development.
|
||||||
|
func Debugf(format string, v ...interface{}) {
|
||||||
|
DebugLog.Output(2, printfStr(format, v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to INFO writer with string formatting; application continues.
|
||||||
|
// Used for logging additional information, e.g. notable returns or common fail-cases.
|
||||||
|
func Infof(format string, v ...interface{}) {
|
||||||
|
InfoLog.Output(2, printfStr(format, v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to WARNING writer with string formatting; application continues.
|
||||||
|
// Used for logging important information, e.g. uncommon edge-cases or administration related information.
|
||||||
|
func Warnf(format string, v ...interface{}) {
|
||||||
|
WarnLog.Output(2, printfStr(format, v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to ERROR writer with string formatting; application continues.
|
||||||
|
// Used for logging errors, but code still can return default(s) or nil.
|
||||||
|
func Errorf(format string, v ...interface{}) {
|
||||||
|
ErrLog.Output(2, printfStr(format, v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to CRITICAL writer with string formatting; application exits with error code 1.
|
||||||
|
// Used for terminating on unexpected errors with date and code location.
|
||||||
|
func Fatalf(format string, v ...interface{}) {
|
||||||
|
CritLog.Output(2, printfStr(format, v...))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints to PANIC function with string formatting; application exits with panic.
|
||||||
|
// Used for terminating on unexpected errors with stacktrace.
|
||||||
|
func Panicf(format string, v ...interface{}) {
|
||||||
|
panic(printfStr(format, v...))
|
||||||
|
}
|
||||||
|
@ -22,8 +22,7 @@ func parseDate(in string) int64 {
|
|||||||
if in != "" {
|
if in != "" {
|
||||||
t, err := time.ParseInLocation(shortForm, in, loc)
|
t, err := time.ParseInLocation(shortForm, in, loc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("date parse error %v", err)
|
log.Abortf("Archive Manager Main: Date parse failed with input: '%s'\nError: %s\n", in, err.Error())
|
||||||
os.Exit(0)
|
|
||||||
}
|
}
|
||||||
return t.Unix()
|
return t.Unix()
|
||||||
}
|
}
|
||||||
|
6
web/frontend/package-lock.json
generated
6
web/frontend/package-lock.json
generated
@ -59,9 +59,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@babel/runtime": {
|
"node_modules/@babel/runtime": {
|
||||||
"version": "7.26.0",
|
"version": "7.27.0",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.0.tgz",
|
||||||
"integrity": "sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==",
|
"integrity": "sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"regenerator-runtime": "^0.14.0"
|
"regenerator-runtime": "^0.14.0"
|
||||||
|
@ -53,10 +53,16 @@
|
|||||||
{ range: "last30d", rangeLabel: "Last 30 days"}
|
{ range: "last30d", rangeLabel: "Last 30 days"}
|
||||||
];
|
];
|
||||||
|
|
||||||
|
const nodeMatchLabels = {
|
||||||
|
eq: "",
|
||||||
|
contains: " Contains",
|
||||||
|
}
|
||||||
|
|
||||||
let filters = {
|
let filters = {
|
||||||
projectMatch: filterPresets.projectMatch || "contains",
|
projectMatch: filterPresets.projectMatch || "contains",
|
||||||
userMatch: filterPresets.userMatch || "contains",
|
userMatch: filterPresets.userMatch || "contains",
|
||||||
jobIdMatch: filterPresets.jobIdMatch || "eq",
|
jobIdMatch: filterPresets.jobIdMatch || "eq",
|
||||||
|
nodeMatch: filterPresets.nodeMatch || "eq",
|
||||||
|
|
||||||
cluster: filterPresets.cluster || null,
|
cluster: filterPresets.cluster || null,
|
||||||
partition: filterPresets.partition || null,
|
partition: filterPresets.partition || null,
|
||||||
@ -106,7 +112,7 @@
|
|||||||
|
|
||||||
let items = [];
|
let items = [];
|
||||||
if (filters.cluster) items.push({ cluster: { eq: filters.cluster } });
|
if (filters.cluster) items.push({ cluster: { eq: filters.cluster } });
|
||||||
if (filters.node) items.push({ node: { contains: filters.node } });
|
if (filters.node) items.push({ node: { [filters.nodeMatch]: filters.node } });
|
||||||
if (filters.partition) items.push({ partition: { eq: filters.partition } });
|
if (filters.partition) items.push({ partition: { eq: filters.partition } });
|
||||||
if (filters.states.length != allJobStates.length)
|
if (filters.states.length != allJobStates.length)
|
||||||
items.push({ state: filters.states });
|
items.push({ state: filters.states });
|
||||||
@ -178,6 +184,8 @@
|
|||||||
let opts = [];
|
let opts = [];
|
||||||
if (filters.cluster) opts.push(`cluster=${filters.cluster}`);
|
if (filters.cluster) opts.push(`cluster=${filters.cluster}`);
|
||||||
if (filters.node) opts.push(`node=${filters.node}`);
|
if (filters.node) opts.push(`node=${filters.node}`);
|
||||||
|
if (filters.node && filters.nodeMatch != "eq") // "eq" is default-case
|
||||||
|
opts.push(`nodeMatch=${filters.nodeMatch}`);
|
||||||
if (filters.partition) opts.push(`partition=${filters.partition}`);
|
if (filters.partition) opts.push(`partition=${filters.partition}`);
|
||||||
if (filters.states.length != allJobStates.length)
|
if (filters.states.length != allJobStates.length)
|
||||||
for (let state of filters.states) opts.push(`state=${state}`);
|
for (let state of filters.states) opts.push(`state=${state}`);
|
||||||
@ -196,7 +204,7 @@
|
|||||||
opts.push(`jobId=${singleJobId}`);
|
opts.push(`jobId=${singleJobId}`);
|
||||||
}
|
}
|
||||||
if (filters.jobIdMatch != "eq")
|
if (filters.jobIdMatch != "eq")
|
||||||
opts.push(`jobIdMatch=${filters.jobIdMatch}`);
|
opts.push(`jobIdMatch=${filters.jobIdMatch}`); // "eq" is default-case
|
||||||
for (let tag of filters.tags) opts.push(`tag=${tag}`);
|
for (let tag of filters.tags) opts.push(`tag=${tag}`);
|
||||||
if (filters.duration.from && filters.duration.to)
|
if (filters.duration.from && filters.duration.to)
|
||||||
opts.push(`duration=${filters.duration.from}-${filters.duration.to}`);
|
opts.push(`duration=${filters.duration.from}-${filters.duration.to}`);
|
||||||
@ -218,13 +226,13 @@
|
|||||||
} else {
|
} else {
|
||||||
for (let singleUser of filters.user) opts.push(`user=${singleUser}`);
|
for (let singleUser of filters.user) opts.push(`user=${singleUser}`);
|
||||||
}
|
}
|
||||||
if (filters.userMatch != "contains")
|
if (filters.userMatch != "contains") // "contains" is default-case
|
||||||
opts.push(`userMatch=${filters.userMatch}`);
|
opts.push(`userMatch=${filters.userMatch}`);
|
||||||
if (filters.project) opts.push(`project=${filters.project}`);
|
if (filters.project) opts.push(`project=${filters.project}`);
|
||||||
|
if (filters.project && filters.projectMatch != "contains") // "contains" is default-case
|
||||||
|
opts.push(`projectMatch=${filters.projectMatch}`);
|
||||||
if (filters.jobName) opts.push(`jobName=${filters.jobName}`);
|
if (filters.jobName) opts.push(`jobName=${filters.jobName}`);
|
||||||
if (filters.arrayJobId) opts.push(`arrayJobId=${filters.arrayJobId}`);
|
if (filters.arrayJobId) opts.push(`arrayJobId=${filters.arrayJobId}`);
|
||||||
if (filters.project && filters.projectMatch != "contains")
|
|
||||||
opts.push(`projectMatch=${filters.projectMatch}`);
|
|
||||||
if (filters.stats.length != 0)
|
if (filters.stats.length != 0)
|
||||||
for (let stat of filters.stats) {
|
for (let stat of filters.stats) {
|
||||||
opts.push(`stat=${stat.field}-${stat.from}-${stat.to}`);
|
opts.push(`stat=${stat.field}-${stat.from}-${stat.to}`);
|
||||||
@ -386,7 +394,7 @@
|
|||||||
|
|
||||||
{#if filters.node != null}
|
{#if filters.node != null}
|
||||||
<Info icon="hdd-stack" on:click={() => (isResourcesOpen = true)}>
|
<Info icon="hdd-stack" on:click={() => (isResourcesOpen = true)}>
|
||||||
Node: {filters.node}
|
Node{nodeMatchLabels[filters.nodeMatch]}: {filters.node}
|
||||||
</Info>
|
</Info>
|
||||||
{/if}
|
{/if}
|
||||||
|
|
||||||
@ -449,6 +457,7 @@
|
|||||||
bind:numHWThreads={filters.numHWThreads}
|
bind:numHWThreads={filters.numHWThreads}
|
||||||
bind:numAccelerators={filters.numAccelerators}
|
bind:numAccelerators={filters.numAccelerators}
|
||||||
bind:namedNode={filters.node}
|
bind:namedNode={filters.node}
|
||||||
|
bind:nodeMatch={filters.nodeMatch}
|
||||||
bind:isNodesModified
|
bind:isNodesModified
|
||||||
bind:isHwthreadsModified
|
bind:isHwthreadsModified
|
||||||
bind:isAccsModified
|
bind:isAccsModified
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
ModalBody,
|
ModalBody,
|
||||||
ModalHeader,
|
ModalHeader,
|
||||||
ModalFooter,
|
ModalFooter,
|
||||||
|
Input
|
||||||
} from "@sveltestrap/sveltestrap";
|
} from "@sveltestrap/sveltestrap";
|
||||||
import DoubleRangeSlider from "../select/DoubleRangeSlider.svelte";
|
import DoubleRangeSlider from "../select/DoubleRangeSlider.svelte";
|
||||||
|
|
||||||
@ -40,11 +41,18 @@
|
|||||||
export let isHwthreadsModified = false;
|
export let isHwthreadsModified = false;
|
||||||
export let isAccsModified = false;
|
export let isAccsModified = false;
|
||||||
export let namedNode = null;
|
export let namedNode = null;
|
||||||
|
export let nodeMatch = "eq"
|
||||||
|
|
||||||
let pendingNumNodes = numNodes,
|
let pendingNumNodes = numNodes,
|
||||||
pendingNumHWThreads = numHWThreads,
|
pendingNumHWThreads = numHWThreads,
|
||||||
pendingNumAccelerators = numAccelerators,
|
pendingNumAccelerators = numAccelerators,
|
||||||
pendingNamedNode = namedNode;
|
pendingNamedNode = namedNode,
|
||||||
|
pendingNodeMatch = nodeMatch;
|
||||||
|
|
||||||
|
const nodeMatchLabels = {
|
||||||
|
eq: "Equal To",
|
||||||
|
contains: "Contains",
|
||||||
|
}
|
||||||
|
|
||||||
const findMaxNumAccels = (clusters) =>
|
const findMaxNumAccels = (clusters) =>
|
||||||
clusters.reduce(
|
clusters.reduce(
|
||||||
@ -145,7 +153,17 @@
|
|||||||
<ModalHeader>Select number of utilized Resources</ModalHeader>
|
<ModalHeader>Select number of utilized Resources</ModalHeader>
|
||||||
<ModalBody>
|
<ModalBody>
|
||||||
<h6>Named Node</h6>
|
<h6>Named Node</h6>
|
||||||
<input type="text" class="form-control" bind:value={pendingNamedNode} />
|
<div class="d-flex">
|
||||||
|
<Input type="text" class="w-75" bind:value={pendingNamedNode} />
|
||||||
|
<div class="mx-1"></div>
|
||||||
|
<Input type="select" class="w-25" bind:value={pendingNodeMatch}>
|
||||||
|
{#each Object.entries(nodeMatchLabels) as [nodeMatchKey, nodeMatchLabel]}
|
||||||
|
<option value={nodeMatchKey}>
|
||||||
|
{nodeMatchLabel}
|
||||||
|
</option>
|
||||||
|
{/each}
|
||||||
|
</Input>
|
||||||
|
</div>
|
||||||
<h6 style="margin-top: 1rem;">Number of Nodes</h6>
|
<h6 style="margin-top: 1rem;">Number of Nodes</h6>
|
||||||
<DoubleRangeSlider
|
<DoubleRangeSlider
|
||||||
on:change={({ detail }) => {
|
on:change={({ detail }) => {
|
||||||
@ -215,11 +233,13 @@
|
|||||||
to: pendingNumAccelerators.to,
|
to: pendingNumAccelerators.to,
|
||||||
};
|
};
|
||||||
namedNode = pendingNamedNode;
|
namedNode = pendingNamedNode;
|
||||||
|
nodeMatch = pendingNodeMatch;
|
||||||
dispatch("set-filter", {
|
dispatch("set-filter", {
|
||||||
numNodes,
|
numNodes,
|
||||||
numHWThreads,
|
numHWThreads,
|
||||||
numAccelerators,
|
numAccelerators,
|
||||||
namedNode,
|
namedNode,
|
||||||
|
nodeMatch
|
||||||
});
|
});
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
@ -233,6 +253,7 @@
|
|||||||
pendingNumHWThreads = { from: null, to: null };
|
pendingNumHWThreads = { from: null, to: null };
|
||||||
pendingNumAccelerators = { from: null, to: null };
|
pendingNumAccelerators = { from: null, to: null };
|
||||||
pendingNamedNode = null;
|
pendingNamedNode = null;
|
||||||
|
pendingNodeMatch = null;
|
||||||
numNodes = { from: pendingNumNodes.from, to: pendingNumNodes.to };
|
numNodes = { from: pendingNumNodes.from, to: pendingNumNodes.to };
|
||||||
numHWThreads = {
|
numHWThreads = {
|
||||||
from: pendingNumHWThreads.from,
|
from: pendingNumHWThreads.from,
|
||||||
@ -246,11 +267,13 @@
|
|||||||
isHwthreadsModified = false;
|
isHwthreadsModified = false;
|
||||||
isAccsModified = false;
|
isAccsModified = false;
|
||||||
namedNode = pendingNamedNode;
|
namedNode = pendingNamedNode;
|
||||||
|
nodeMatch = pendingNodeMatch;
|
||||||
dispatch("set-filter", {
|
dispatch("set-filter", {
|
||||||
numNodes,
|
numNodes,
|
||||||
numHWThreads,
|
numHWThreads,
|
||||||
numAccelerators,
|
numAccelerators,
|
||||||
namedNode,
|
namedNode,
|
||||||
|
nodeMatch
|
||||||
});
|
});
|
||||||
}}>Reset</Button
|
}}>Reset</Button
|
||||||
>
|
>
|
||||||
|
@ -41,7 +41,9 @@
|
|||||||
if (a == null || b == null) return -1;
|
if (a == null || b == null) return -1;
|
||||||
|
|
||||||
if (field === "id") {
|
if (field === "id") {
|
||||||
return s.dir != "up" ? a[field].localeCompare(b[field]) : b[field].localeCompare(a[field])
|
return s.dir != "up" ?
|
||||||
|
a[field].localeCompare(b[field], undefined, {numeric: true, sensitivity: 'base'}) :
|
||||||
|
b[field].localeCompare(a[field], undefined, {numeric: true, sensitivity: 'base'})
|
||||||
} else {
|
} else {
|
||||||
return s.dir != "up"
|
return s.dir != "up"
|
||||||
? a.data[field] - b.data[field]
|
? a.data[field] - b.data[field]
|
||||||
|
@ -205,7 +205,7 @@
|
|||||||
</Col>
|
</Col>
|
||||||
</Row>
|
</Row>
|
||||||
{:else}
|
{:else}
|
||||||
{#each nodes as nodeData}
|
{#each nodes as nodeData (nodeData.host)}
|
||||||
<NodeListRow {nodeData} {cluster} {selectedMetrics}/>
|
<NodeListRow {nodeData} {cluster} {selectedMetrics}/>
|
||||||
{:else}
|
{:else}
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -26,8 +26,7 @@ var frontendFiles embed.FS
|
|||||||
func ServeFiles() http.Handler {
|
func ServeFiles() http.Handler {
|
||||||
publicFiles, err := fs.Sub(frontendFiles, "frontend/public")
|
publicFiles, err := fs.Sub(frontendFiles, "frontend/public")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("WEB/WEB > cannot find frontend public files")
|
log.Abortf("Serve Files: Could not find 'frontend/public' file directory.\nError: %s\n", err.Error())
|
||||||
panic(err)
|
|
||||||
}
|
}
|
||||||
return http.FileServer(http.FS(publicFiles))
|
return http.FileServer(http.FS(publicFiles))
|
||||||
}
|
}
|
||||||
@ -75,8 +74,7 @@ func init() {
|
|||||||
templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFS(templateFiles, path))
|
templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFS(templateFiles, path))
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Fatalf("WEB/WEB > cannot find frontend template files")
|
log.Abortf("Web init(): Could not find frontend template files.\nError: %s\n", err.Error())
|
||||||
panic(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = base
|
_ = base
|
||||||
|
Loading…
x
Reference in New Issue
Block a user