mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-07-01 11:13:50 +02:00
Merge pull request #399 from ClusterCockpit/port-to-cclib
Port to cclib
This commit is contained in:
commit
c13f386e3b
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package main
|
package main
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package main
|
package main
|
||||||
@ -8,8 +8,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
const envString = `
|
const envString = `
|
||||||
@ -73,23 +73,23 @@ const configString = `
|
|||||||
|
|
||||||
func initEnv() {
|
func initEnv() {
|
||||||
if util.CheckFileExists("var") {
|
if util.CheckFileExists("var") {
|
||||||
log.Exit("Directory ./var already exists. Cautiously exiting application initialization.")
|
cclog.Exit("Directory ./var already exists. Cautiously exiting application initialization.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil {
|
if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil {
|
||||||
log.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
cclog.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil {
|
if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil {
|
||||||
log.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
cclog.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Mkdir("var", 0o777); err != nil {
|
if err := os.Mkdir("var", 0o777); err != nil {
|
||||||
log.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
cclog.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
err := repository.MigrateDB("sqlite3", "./var/job.db")
|
err := repository.MigrateDB("sqlite3", "./var/job.db")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
cclog.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package main
|
package main
|
||||||
@ -21,11 +21,11 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/tagger"
|
"github.com/ClusterCockpit/cc-backend/internal/tagger"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/taskManager"
|
"github.com/ClusterCockpit/cc-backend/internal/taskManager"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
|
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
"github.com/google/gops/agent"
|
"github.com/google/gops/agent"
|
||||||
"github.com/joho/godotenv"
|
"github.com/joho/godotenv"
|
||||||
|
|
||||||
@ -61,13 +61,12 @@ func main() {
|
|||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply config flags for pkg/log
|
cclog.Init(flagLogLevel, flagLogDateTime)
|
||||||
log.Init(flagLogLevel, flagLogDateTime)
|
|
||||||
|
|
||||||
// If init flag set, run tasks here before any file dependencies cause errors
|
// If init flag set, run tasks here before any file dependencies cause errors
|
||||||
if flagInit {
|
if flagInit {
|
||||||
initEnv()
|
initEnv()
|
||||||
log.Exit("Successfully setup environment!\n" +
|
cclog.Exit("Successfully setup environment!\n" +
|
||||||
"Please review config.json and .env and adjust it to your needs.\n" +
|
"Please review config.json and .env and adjust it to your needs.\n" +
|
||||||
"Add your job-archive at ./var/job-archive.")
|
"Add your job-archive at ./var/job-archive.")
|
||||||
}
|
}
|
||||||
@ -75,13 +74,13 @@ func main() {
|
|||||||
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
||||||
if flagGops {
|
if flagGops {
|
||||||
if err := agent.Listen(agent.Options{}); err != nil {
|
if err := agent.Listen(agent.Options{}); err != nil {
|
||||||
log.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error())
|
cclog.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := godotenv.Load()
|
err := godotenv.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error())
|
cclog.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize sub-modules and handle command line flags.
|
// Initialize sub-modules and handle command line flags.
|
||||||
@ -99,25 +98,25 @@ func main() {
|
|||||||
if flagMigrateDB {
|
if flagMigrateDB {
|
||||||
err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
|
err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
|
cclog.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
|
||||||
}
|
}
|
||||||
log.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
|
cclog.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagRevertDB {
|
if flagRevertDB {
|
||||||
err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB)
|
err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error())
|
cclog.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error())
|
||||||
}
|
}
|
||||||
log.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1))
|
cclog.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagForceDB {
|
if flagForceDB {
|
||||||
err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB)
|
err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
|
cclog.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
|
||||||
}
|
}
|
||||||
log.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
|
cclog.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
||||||
@ -129,7 +128,7 @@ func main() {
|
|||||||
if flagNewUser != "" {
|
if flagNewUser != "" {
|
||||||
parts := strings.SplitN(flagNewUser, ":", 3)
|
parts := strings.SplitN(flagNewUser, ":", 3)
|
||||||
if len(parts) != 3 || len(parts[0]) == 0 {
|
if len(parts) != 3 || len(parts[0]) == 0 {
|
||||||
log.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+
|
cclog.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+
|
||||||
"Want: <username>:[admin,support,manager,api,user]:<password>\n"+
|
"Want: <username>:[admin,support,manager,api,user]:<password>\n"+
|
||||||
"Have: %s\n", flagNewUser)
|
"Have: %s\n", flagNewUser)
|
||||||
}
|
}
|
||||||
@ -138,18 +137,18 @@ func main() {
|
|||||||
if err := ur.AddUser(&schema.User{
|
if err := ur.AddUser(&schema.User{
|
||||||
Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","),
|
Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error())
|
cclog.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1])
|
cclog.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagDelUser != "" {
|
if flagDelUser != "" {
|
||||||
ur := repository.GetUserRepository()
|
ur := repository.GetUserRepository()
|
||||||
if err := ur.DelUser(flagDelUser); err != nil {
|
if err := ur.DelUser(flagDelUser); err != nil {
|
||||||
log.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error())
|
cclog.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser)
|
cclog.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,59 +156,59 @@ func main() {
|
|||||||
|
|
||||||
if flagSyncLDAP {
|
if flagSyncLDAP {
|
||||||
if authHandle.LdapAuth == nil {
|
if authHandle.LdapAuth == nil {
|
||||||
log.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.")
|
cclog.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := authHandle.LdapAuth.Sync(); err != nil {
|
if err := authHandle.LdapAuth.Sync(); err != nil {
|
||||||
log.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error())
|
cclog.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
log.Print("Sync LDAP: LDAP synchronization successfull.")
|
cclog.Print("Sync LDAP: LDAP synchronization successfull.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagGenJWT != "" {
|
if flagGenJWT != "" {
|
||||||
ur := repository.GetUserRepository()
|
ur := repository.GetUserRepository()
|
||||||
user, err := ur.GetUser(flagGenJWT)
|
user, err := ur.GetUser(flagGenJWT)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error())
|
cclog.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !user.HasRole(schema.RoleApi) {
|
if !user.HasRole(schema.RoleApi) {
|
||||||
log.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username)
|
cclog.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username)
|
||||||
}
|
}
|
||||||
|
|
||||||
jwt, err := authHandle.JwtAuth.ProvideJWT(user)
|
jwt, err := authHandle.JwtAuth.ProvideJWT(user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error())
|
cclog.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt)
|
cclog.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt)
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if flagNewUser != "" || flagDelUser != "" {
|
} else if flagNewUser != "" || flagDelUser != "" {
|
||||||
log.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.")
|
cclog.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
|
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
|
||||||
log.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error())
|
cclog.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := metricdata.Init(); err != nil {
|
if err := metricdata.Init(); err != nil {
|
||||||
log.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error())
|
cclog.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagReinitDB {
|
if flagReinitDB {
|
||||||
if err := importer.InitDB(); err != nil {
|
if err := importer.InitDB(); err != nil {
|
||||||
log.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error())
|
cclog.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error())
|
||||||
} else {
|
} else {
|
||||||
log.Print("Init DB: Sucessfully re-initialized repository DB.")
|
cclog.Print("Init DB: Sucessfully re-initialized repository DB.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagImportJob != "" {
|
if flagImportJob != "" {
|
||||||
if err := importer.HandleImportFlag(flagImportJob); err != nil {
|
if err := importer.HandleImportFlag(flagImportJob); err != nil {
|
||||||
log.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error())
|
cclog.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob)
|
cclog.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,12 +218,12 @@ func main() {
|
|||||||
|
|
||||||
if flagApplyTags {
|
if flagApplyTags {
|
||||||
if err := tagger.RunTaggers(); err != nil {
|
if err := tagger.RunTaggers(); err != nil {
|
||||||
log.Abortf("Running job taggers.\nError: %s\n", err.Error())
|
cclog.Abortf("Running job taggers.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !flagServer {
|
if !flagServer {
|
||||||
log.Exit("No errors, server flag not set. Exiting cc-backend.")
|
cclog.Exit("No errors, server flag not set. Exiting cc-backend.")
|
||||||
}
|
}
|
||||||
|
|
||||||
archiver.Start(repository.GetJobRepository())
|
archiver.Start(repository.GetJobRepository())
|
||||||
@ -260,5 +259,5 @@ func main() {
|
|||||||
}
|
}
|
||||||
runtimeEnv.SystemdNotifiy(true, "running")
|
runtimeEnv.SystemdNotifiy(true, "running")
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
log.Print("Graceful shutdown completed!")
|
cclog.Print("Graceful shutdown completed!")
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package main
|
package main
|
||||||
@ -27,9 +27,9 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/routerConfig"
|
"github.com/ClusterCockpit/cc-backend/internal/routerConfig"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
|
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
|
||||||
"github.com/ClusterCockpit/cc-backend/web"
|
"github.com/ClusterCockpit/cc-backend/web"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/gorilla/handlers"
|
"github.com/gorilla/handlers"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
httpSwagger "github.com/swaggo/http-swagger"
|
httpSwagger "github.com/swaggo/http-swagger"
|
||||||
@ -101,7 +101,7 @@ func serverInit() {
|
|||||||
|
|
||||||
router.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) {
|
router.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) {
|
||||||
rw.Header().Add("Content-Type", "text/html; charset=utf-8")
|
rw.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||||
log.Debugf("##%v##", info)
|
cclog.Debugf("##%v##", info)
|
||||||
web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo, Infos: info})
|
web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo, Infos: info})
|
||||||
}).Methods(http.MethodGet)
|
}).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) {
|
router.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) {
|
||||||
@ -237,7 +237,7 @@ func serverInit() {
|
|||||||
if config.Keys.EmbedStaticFiles {
|
if config.Keys.EmbedStaticFiles {
|
||||||
if i, err := os.Stat("./var/img"); err == nil {
|
if i, err := os.Stat("./var/img"); err == nil {
|
||||||
if i.IsDir() {
|
if i.IsDir() {
|
||||||
log.Info("Use local directory for static images")
|
cclog.Info("Use local directory for static images")
|
||||||
router.PathPrefix("/img/").Handler(http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img"))))
|
router.PathPrefix("/img/").Handler(http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img"))))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -258,12 +258,12 @@ func serverInit() {
|
|||||||
func serverStart() {
|
func serverStart() {
|
||||||
handler := handlers.CustomLoggingHandler(io.Discard, router, func(_ io.Writer, params handlers.LogFormatterParams) {
|
handler := handlers.CustomLoggingHandler(io.Discard, router, func(_ io.Writer, params handlers.LogFormatterParams) {
|
||||||
if strings.HasPrefix(params.Request.RequestURI, "/api/") {
|
if strings.HasPrefix(params.Request.RequestURI, "/api/") {
|
||||||
log.Debugf("%s %s (%d, %.02fkb, %dms)",
|
cclog.Debugf("%s %s (%d, %.02fkb, %dms)",
|
||||||
params.Request.Method, params.URL.RequestURI(),
|
params.Request.Method, params.URL.RequestURI(),
|
||||||
params.StatusCode, float32(params.Size)/1024,
|
params.StatusCode, float32(params.Size)/1024,
|
||||||
time.Since(params.TimeStamp).Milliseconds())
|
time.Since(params.TimeStamp).Milliseconds())
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("%s %s (%d, %.02fkb, %dms)",
|
cclog.Debugf("%s %s (%d, %.02fkb, %dms)",
|
||||||
params.Request.Method, params.URL.RequestURI(),
|
params.Request.Method, params.URL.RequestURI(),
|
||||||
params.StatusCode, float32(params.Size)/1024,
|
params.StatusCode, float32(params.Size)/1024,
|
||||||
time.Since(params.TimeStamp).Milliseconds())
|
time.Since(params.TimeStamp).Milliseconds())
|
||||||
@ -280,7 +280,7 @@ func serverStart() {
|
|||||||
// Start http or https server
|
// Start http or https server
|
||||||
listener, err := net.Listen("tcp", config.Keys.Addr)
|
listener, err := net.Listen("tcp", config.Keys.Addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error())
|
cclog.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
|
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
|
||||||
@ -293,7 +293,7 @@ func serverStart() {
|
|||||||
cert, err := tls.LoadX509KeyPair(
|
cert, err := tls.LoadX509KeyPair(
|
||||||
config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
|
config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error())
|
cclog.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
listener = tls.NewListener(listener, &tls.Config{
|
listener = tls.NewListener(listener, &tls.Config{
|
||||||
Certificates: []tls.Certificate{cert},
|
Certificates: []tls.Certificate{cert},
|
||||||
@ -304,20 +304,20 @@ func serverStart() {
|
|||||||
MinVersion: tls.VersionTLS12,
|
MinVersion: tls.VersionTLS12,
|
||||||
PreferServerCipherSuites: true,
|
PreferServerCipherSuites: true,
|
||||||
})
|
})
|
||||||
log.Printf("HTTPS server listening at %s...\n", config.Keys.Addr)
|
cclog.Printf("HTTPS server listening at %s...\n", config.Keys.Addr)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("HTTP server listening at %s...\n", config.Keys.Addr)
|
cclog.Printf("HTTP server listening at %s...\n", config.Keys.Addr)
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
// Because this program will want to bind to a privileged port (like 80), the listener must
|
// Because this program will want to bind to a privileged port (like 80), the listener must
|
||||||
// be established first, then the user can be changed, and after that,
|
// be established first, then the user can be changed, and after that,
|
||||||
// the actual http server can be started.
|
// the actual http server can be started.
|
||||||
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
||||||
log.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error())
|
cclog.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||||
log.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error())
|
cclog.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
30
go.mod
30
go.mod
@ -6,11 +6,10 @@ toolchain go1.24.1
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/99designs/gqlgen v0.17.66
|
github.com/99designs/gqlgen v0.17.66
|
||||||
github.com/ClusterCockpit/cc-units v0.4.0
|
github.com/ClusterCockpit/cc-lib v0.3.0
|
||||||
github.com/Masterminds/squirrel v1.5.4
|
github.com/Masterminds/squirrel v1.5.4
|
||||||
github.com/coreos/go-oidc/v3 v3.12.0
|
github.com/coreos/go-oidc/v3 v3.12.0
|
||||||
github.com/expr-lang/expr v1.17.3
|
github.com/expr-lang/expr v1.17.3
|
||||||
github.com/fsnotify/fsnotify v1.9.0
|
|
||||||
github.com/go-co-op/gocron/v2 v2.16.0
|
github.com/go-co-op/gocron/v2 v2.16.0
|
||||||
github.com/go-ldap/ldap/v3 v3.4.10
|
github.com/go-ldap/ldap/v3 v3.4.10
|
||||||
github.com/go-sql-driver/mysql v1.9.0
|
github.com/go-sql-driver/mysql v1.9.0
|
||||||
@ -23,15 +22,14 @@ require (
|
|||||||
github.com/jmoiron/sqlx v1.4.0
|
github.com/jmoiron/sqlx v1.4.0
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/mattn/go-sqlite3 v1.14.24
|
github.com/mattn/go-sqlite3 v1.14.24
|
||||||
github.com/prometheus/client_golang v1.21.0
|
github.com/prometheus/client_golang v1.22.0
|
||||||
github.com/prometheus/common v0.62.0
|
github.com/prometheus/common v0.63.0
|
||||||
github.com/qustavo/sqlhooks/v2 v2.1.0
|
github.com/qustavo/sqlhooks/v2 v2.1.0
|
||||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
|
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
|
||||||
github.com/swaggo/http-swagger v1.3.4
|
github.com/swaggo/http-swagger v1.3.4
|
||||||
github.com/swaggo/swag v1.16.4
|
github.com/swaggo/swag v1.16.4
|
||||||
github.com/vektah/gqlparser/v2 v2.5.22
|
github.com/vektah/gqlparser/v2 v2.5.22
|
||||||
golang.org/x/crypto v0.35.0
|
golang.org/x/crypto v0.37.0
|
||||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa
|
|
||||||
golang.org/x/oauth2 v0.27.0
|
golang.org/x/oauth2 v0.27.0
|
||||||
golang.org/x/time v0.5.0
|
golang.org/x/time v0.5.0
|
||||||
)
|
)
|
||||||
@ -45,6 +43,7 @@ require (
|
|||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||||
@ -69,8 +68,8 @@ require (
|
|||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.6.2 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.16.1 // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/sosodev/duration v1.3.1 // indirect
|
github.com/sosodev/duration v1.3.1 // indirect
|
||||||
@ -78,13 +77,14 @@ require (
|
|||||||
github.com/urfave/cli/v2 v2.27.5 // indirect
|
github.com/urfave/cli/v2 v2.27.5 // indirect
|
||||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||||
go.uber.org/atomic v1.11.0 // indirect
|
go.uber.org/atomic v1.11.0 // indirect
|
||||||
golang.org/x/mod v0.23.0 // indirect
|
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
|
||||||
golang.org/x/net v0.36.0 // indirect
|
golang.org/x/mod v0.24.0 // indirect
|
||||||
golang.org/x/sync v0.11.0 // indirect
|
golang.org/x/net v0.39.0 // indirect
|
||||||
golang.org/x/sys v0.30.0 // indirect
|
golang.org/x/sync v0.13.0 // indirect
|
||||||
golang.org/x/text v0.22.0 // indirect
|
golang.org/x/sys v0.32.0 // indirect
|
||||||
golang.org/x/tools v0.30.0 // indirect
|
golang.org/x/text v0.24.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.5 // indirect
|
golang.org/x/tools v0.32.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.36.6 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
|
61
go.sum
61
go.sum
@ -6,8 +6,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25
|
|||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||||
github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0=
|
github.com/ClusterCockpit/cc-lib v0.3.0 h1:HEWOgnzRM01U10ZFfpiUWMzkLHg5nPdXZqdsiI2q4x0=
|
||||||
github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
|
github.com/ClusterCockpit/cc-lib v0.3.0/go.mod h1:7CuXVNIJdynMZf6B9v4m54VCbbFg3ZD0tvLw2bVxN0A=
|
||||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||||
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
|
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
|
||||||
@ -88,8 +88,9 @@ github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI
|
|||||||
github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8=
|
github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8=
|
||||||
github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk=
|
github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
@ -143,8 +144,6 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E
|
|||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
|
||||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
@ -186,14 +185,14 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
|
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||||
github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||||
github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAiCZi0=
|
github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAiCZi0=
|
||||||
github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU=
|
github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
@ -251,17 +250,17 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
|
|||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||||
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||||
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4=
|
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
|
||||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
|
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
@ -273,8 +272,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
|||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||||
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||||
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -284,8 +283,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
|||||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@ -297,8 +296,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
@ -317,8 +316,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
|||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@ -327,11 +326,11 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
|||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
|
||||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
72
gqlgen.yml
72
gqlgen.yml
@ -51,70 +51,52 @@ models:
|
|||||||
- github.com/99designs/gqlgen/graphql.Int64
|
- github.com/99designs/gqlgen/graphql.Int64
|
||||||
- github.com/99designs/gqlgen/graphql.Int32
|
- github.com/99designs/gqlgen/graphql.Int32
|
||||||
Job:
|
Job:
|
||||||
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Job"
|
model: "github.com/ClusterCockpit/cc-lib/schema.Job"
|
||||||
fields:
|
fields:
|
||||||
tags:
|
tags:
|
||||||
resolver: true
|
resolver: true
|
||||||
metaData:
|
metaData:
|
||||||
resolver: true
|
resolver: true
|
||||||
Cluster:
|
Cluster:
|
||||||
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Cluster"
|
model: "github.com/ClusterCockpit/cc-lib/schema.Cluster"
|
||||||
fields:
|
fields:
|
||||||
partitions:
|
partitions:
|
||||||
resolver: true
|
resolver: true
|
||||||
Node:
|
Node:
|
||||||
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Node"
|
model: "github.com/ClusterCockpit/cc-lib/schema.Node"
|
||||||
fields:
|
fields:
|
||||||
metaData:
|
metaData:
|
||||||
resolver: true
|
resolver: true
|
||||||
NullableFloat:
|
NullableFloat: { model: "github.com/ClusterCockpit/cc-lib/schema.Float" }
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Float" }
|
MetricScope: { model: "github.com/ClusterCockpit/cc-lib/schema.MetricScope" }
|
||||||
MetricScope:
|
MetricValue: { model: "github.com/ClusterCockpit/cc-lib/schema.MetricValue" }
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricScope" }
|
|
||||||
MetricValue:
|
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricValue" }
|
|
||||||
JobStatistics:
|
JobStatistics:
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobStatistics" }
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.JobStatistics" }
|
||||||
GlobalMetricListItem:
|
GlobalMetricListItem:
|
||||||
{
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.GlobalMetricListItem" }
|
||||||
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.GlobalMetricListItem",
|
|
||||||
}
|
|
||||||
ClusterSupport:
|
ClusterSupport:
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.ClusterSupport" }
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.ClusterSupport" }
|
||||||
Tag: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Tag" }
|
Tag: { model: "github.com/ClusterCockpit/cc-lib/schema.Tag" }
|
||||||
Resource:
|
Resource: { model: "github.com/ClusterCockpit/cc-lib/schema.Resource" }
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" }
|
JobState: { model: "github.com/ClusterCockpit/cc-lib/schema.JobState" }
|
||||||
JobState:
|
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" }
|
|
||||||
MonitoringState:
|
MonitoringState:
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.NodeState" }
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.NodeState" }
|
||||||
HealthState:
|
HealthState:
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MonitoringState" }
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.MonitoringState" }
|
||||||
TimeRange:
|
TimeRange: { model: "github.com/ClusterCockpit/cc-lib/schema.TimeRange" }
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.TimeRange" }
|
IntRange: { model: "github.com/ClusterCockpit/cc-lib/schema.IntRange" }
|
||||||
IntRange:
|
JobMetric: { model: "github.com/ClusterCockpit/cc-lib/schema.JobMetric" }
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.IntRange" }
|
Series: { model: "github.com/ClusterCockpit/cc-lib/schema.Series" }
|
||||||
JobMetric:
|
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" }
|
|
||||||
Series: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Series" }
|
|
||||||
MetricStatistics:
|
MetricStatistics:
|
||||||
{
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.MetricStatistics" }
|
||||||
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics",
|
|
||||||
}
|
|
||||||
MetricConfig:
|
MetricConfig:
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricConfig" }
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.MetricConfig" }
|
||||||
SubClusterConfig:
|
SubClusterConfig:
|
||||||
{
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.SubClusterConfig" }
|
||||||
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubClusterConfig",
|
Accelerator: { model: "github.com/ClusterCockpit/cc-lib/schema.Accelerator" }
|
||||||
}
|
Topology: { model: "github.com/ClusterCockpit/cc-lib/schema.Topology" }
|
||||||
Accelerator:
|
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Accelerator" }
|
|
||||||
Topology:
|
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Topology" }
|
|
||||||
FilterRanges:
|
FilterRanges:
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.FilterRanges" }
|
{ model: "github.com/ClusterCockpit/cc-lib/schema.FilterRanges" }
|
||||||
SubCluster:
|
SubCluster: { model: "github.com/ClusterCockpit/cc-lib/schema.SubCluster" }
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubCluster" }
|
StatsSeries: { model: "github.com/ClusterCockpit/cc-lib/schema.StatsSeries" }
|
||||||
StatsSeries:
|
Unit: { model: "github.com/ClusterCockpit/cc-lib/schema.Unit" }
|
||||||
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" }
|
|
||||||
Unit: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Unit" }
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package api_test
|
package api_test
|
||||||
@ -27,8 +27,8 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
|
||||||
_ "github.com/mattn/go-sqlite3"
|
_ "github.com/mattn/go-sqlite3"
|
||||||
@ -116,7 +116,7 @@ func setup(t *testing.T) *api.RestApi {
|
|||||||
]
|
]
|
||||||
}`
|
}`
|
||||||
|
|
||||||
log.Init("info", true)
|
cclog.Init("info", true)
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
jobarchive := filepath.Join(tmpdir, "job-archive")
|
jobarchive := filepath.Join(tmpdir, "job-archive")
|
||||||
if err := os.Mkdir(jobarchive, 0777); err != nil {
|
if err := os.Mkdir(jobarchive, 0777); err != nil {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package api
|
package api
|
||||||
@ -12,7 +12,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetClustersApiResponse model
|
// GetClustersApiResponse model
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package api
|
package api
|
||||||
@ -23,8 +23,8 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -198,7 +198,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
|||||||
results = append(results, job)
|
results = append(results, job)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("/api/jobs: %d jobs returned", len(results))
|
cclog.Debugf("/api/jobs: %d jobs returned", len(results))
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
bw := bufio.NewWriter(rw)
|
bw := bufio.NewWriter(rw)
|
||||||
defer bw.Flush()
|
defer bw.Flush()
|
||||||
@ -286,12 +286,12 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request)
|
|||||||
if r.URL.Query().Get("all-metrics") == "true" {
|
if r.URL.Query().Get("all-metrics") == "true" {
|
||||||
data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution)
|
data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
|
cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("/api/job/%s: get job %d", id, job.JobID)
|
cclog.Debugf("/api/job/%s: get job %d", id, job.JobID)
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
bw := bufio.NewWriter(rw)
|
bw := bufio.NewWriter(rw)
|
||||||
defer bw.Flush()
|
defer bw.Flush()
|
||||||
@ -382,7 +382,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution)
|
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
|
cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,7 +397,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("/api/job/%s: get job %d", id, job.JobID)
|
cclog.Debugf("/api/job/%s: get job %d", id, job.JobID)
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
bw := bufio.NewWriter(rw)
|
bw := bufio.NewWriter(rw)
|
||||||
defer bw.Flush()
|
defer bw.Flush()
|
||||||
@ -565,7 +565,7 @@ func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
for _, rtag := range req {
|
for _, rtag := range req {
|
||||||
// Only Global and Admin Tags
|
// Only Global and Admin Tags
|
||||||
if rtag.Scope != "global" && rtag.Scope != "admin" {
|
if rtag.Scope != "global" && rtag.Scope != "admin" {
|
||||||
log.Warnf("Cannot delete private tag for job %d: Skip", job.JobID)
|
cclog.Warnf("Cannot delete private tag for job %d: Skip", job.JobID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -611,7 +611,7 @@ func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) {
|
|||||||
for _, rtag := range req {
|
for _, rtag := range req {
|
||||||
// Only Global and Admin Tags
|
// Only Global and Admin Tags
|
||||||
if rtag.Scope != "global" && rtag.Scope != "admin" {
|
if rtag.Scope != "global" && rtag.Scope != "admin" {
|
||||||
log.Warn("Cannot delete private tag: Skip")
|
cclog.Warn("Cannot delete private tag: Skip")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -654,7 +654,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("REST: %s\n", req.GoString())
|
cclog.Printf("REST: %s\n", req.GoString())
|
||||||
req.State = schema.JobStateRunning
|
req.State = schema.JobStateRunning
|
||||||
|
|
||||||
if err := importer.SanityChecks(&req); err != nil {
|
if err := importer.SanityChecks(&req); err != nil {
|
||||||
@ -697,7 +697,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
|
cclog.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
rw.WriteHeader(http.StatusCreated)
|
rw.WriteHeader(http.StatusCreated)
|
||||||
json.NewEncoder(rw).Encode(DefaultApiResponse{
|
json.NewEncoder(rw).Encode(DefaultApiResponse{
|
||||||
@ -737,7 +737,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req)
|
// cclog.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req)
|
||||||
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
|
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
job, err = api.JobRepository.FindCached(req.JobId, req.Cluster, req.StartTime)
|
job, err = api.JobRepository.FindCached(req.JobId, req.Cluster, req.StartTime)
|
||||||
@ -920,7 +920,7 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
|
|||||||
}
|
}
|
||||||
api.JobRepository.Mutex.Unlock()
|
api.JobRepository.Mutex.Unlock()
|
||||||
|
|
||||||
log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%d, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State)
|
cclog.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%d, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State)
|
||||||
|
|
||||||
// Send a response (with status OK). This means that errors that happen from here on forward
|
// Send a response (with status OK). This means that errors that happen from here on forward
|
||||||
// can *NOT* be communicated to the client. If reading from a MetricDataRepository or
|
// can *NOT* be communicated to the client. If reading from a MetricDataRepository or
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package api
|
package api
|
||||||
@ -10,7 +10,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Node struct {
|
type Node struct {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package api
|
package api
|
||||||
@ -16,9 +16,9 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ type DefaultApiResponse struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func handleError(err error, statusCode int, rw http.ResponseWriter) {
|
func handleError(err error, statusCode int, rw http.ResponseWriter) {
|
||||||
log.Warnf("REST ERROR : %s", err.Error())
|
cclog.Warnf("REST ERROR : %s", err.Error())
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
rw.WriteHeader(statusCode)
|
rw.WriteHeader(statusCode)
|
||||||
json.NewEncoder(rw).Encode(ErrorResponse{
|
json.NewEncoder(rw).Encode(ErrorResponse{
|
||||||
@ -161,7 +161,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
|
|||||||
if !noticeExists {
|
if !noticeExists {
|
||||||
ntxt, err := os.Create("./var/notice.txt")
|
ntxt, err := os.Create("./var/notice.txt")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Creating ./var/notice.txt failed: %s", err.Error())
|
cclog.Errorf("Creating ./var/notice.txt failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -170,7 +170,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
if newContent != "" {
|
if newContent != "" {
|
||||||
if err := os.WriteFile("./var/notice.txt", []byte(newContent), 0o666); err != nil {
|
if err := os.WriteFile("./var/notice.txt", []byte(newContent), 0o666); err != nil {
|
||||||
log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
|
cclog.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
@ -178,7 +178,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := os.WriteFile("./var/notice.txt", []byte(""), 0o666); err != nil {
|
if err := os.WriteFile("./var/notice.txt", []byte(""), 0o666); err != nil {
|
||||||
log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
|
cclog.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package api
|
package api
|
||||||
@ -10,7 +10,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archiver
|
package archiver
|
||||||
@ -10,8 +10,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ func archivingWorker() {
|
|||||||
// not using meta data, called to load JobMeta into Cache?
|
// not using meta data, called to load JobMeta into Cache?
|
||||||
// will fail if job meta not in repository
|
// will fail if job meta not in repository
|
||||||
if _, err := jobRepo.FetchMetadata(job); err != nil {
|
if _, err := jobRepo.FetchMetadata(job); err != nil {
|
||||||
log.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error())
|
cclog.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error())
|
||||||
jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed)
|
jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -49,7 +49,7 @@ func archivingWorker() {
|
|||||||
// TODO: Maybe use context with cancel/timeout here
|
// TODO: Maybe use context with cancel/timeout here
|
||||||
jobMeta, err := ArchiveJob(job, context.Background())
|
jobMeta, err := ArchiveJob(job, context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error())
|
cclog.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error())
|
||||||
jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed)
|
jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -57,21 +57,21 @@ func archivingWorker() {
|
|||||||
stmt := sq.Update("job").Where("job.id = ?", job.ID)
|
stmt := sq.Update("job").Where("job.id = ?", job.ID)
|
||||||
|
|
||||||
if stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta); err != nil {
|
if stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta); err != nil {
|
||||||
log.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error())
|
cclog.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta); err != nil {
|
if stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta); err != nil {
|
||||||
log.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error())
|
cclog.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Update the jobs database entry one last time:
|
// Update the jobs database entry one last time:
|
||||||
stmt = jobRepo.MarkArchived(stmt, schema.MonitoringStatusArchivingSuccessful)
|
stmt = jobRepo.MarkArchived(stmt, schema.MonitoringStatusArchivingSuccessful)
|
||||||
if err := jobRepo.Execute(stmt); err != nil {
|
if err := jobRepo.Execute(stmt); err != nil {
|
||||||
log.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error())
|
cclog.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Debugf("archiving job %d took %s", job.JobID, time.Since(start))
|
cclog.Debugf("archiving job %d took %s", job.JobID, time.Since(start))
|
||||||
log.Printf("archiving job (dbid: %d) successful", job.ID)
|
cclog.Printf("archiving job (dbid: %d) successful", job.ID)
|
||||||
|
|
||||||
repository.CallJobStopHooks(job)
|
repository.CallJobStopHooks(job)
|
||||||
archivePending.Done()
|
archivePending.Done()
|
||||||
@ -84,7 +84,7 @@ func archivingWorker() {
|
|||||||
// Trigger async archiving
|
// Trigger async archiving
|
||||||
func TriggerArchiving(job *schema.Job) {
|
func TriggerArchiving(job *schema.Job) {
|
||||||
if archiveChannel == nil {
|
if archiveChannel == nil {
|
||||||
log.Fatal("Cannot archive without archiving channel. Did you Start the archiver?")
|
cclog.Fatal("Cannot archive without archiving channel. Did you Start the archiver?")
|
||||||
}
|
}
|
||||||
|
|
||||||
archivePending.Add(1)
|
archivePending.Add(1)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archiver
|
package archiver
|
||||||
@ -11,8 +11,8 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Writes a running job to the job-archive
|
// Writes a running job to the job-archive
|
||||||
@ -36,7 +36,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.Job, error) {
|
|||||||
|
|
||||||
jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s)
|
jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error wile loading job data for archiving")
|
cclog.Error("Error wile loading job data for archiving")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package auth
|
package auth
|
||||||
@ -22,9 +22,9 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
"github.com/gorilla/sessions"
|
"github.com/gorilla/sessions"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ func (auth *Authentication) AuthViaSession(
|
|||||||
) (*schema.User, error) {
|
) (*schema.User, error) {
|
||||||
session, err := auth.sessionStore.Get(r, "session")
|
session, err := auth.sessionStore.Get(r, "session")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while getting session store")
|
cclog.Error("Error while getting session store")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,16 +93,16 @@ func Init() {
|
|||||||
|
|
||||||
sessKey := os.Getenv("SESSION_KEY")
|
sessKey := os.Getenv("SESSION_KEY")
|
||||||
if sessKey == "" {
|
if sessKey == "" {
|
||||||
log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
|
cclog.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
|
||||||
bytes := make([]byte, 32)
|
bytes := make([]byte, 32)
|
||||||
if _, err := rand.Read(bytes); err != nil {
|
if _, err := rand.Read(bytes); err != nil {
|
||||||
log.Fatal("Error while initializing authentication -> failed to generate random bytes for session key")
|
cclog.Fatal("Error while initializing authentication -> failed to generate random bytes for session key")
|
||||||
}
|
}
|
||||||
authInstance.sessionStore = sessions.NewCookieStore(bytes)
|
authInstance.sessionStore = sessions.NewCookieStore(bytes)
|
||||||
} else {
|
} else {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(sessKey)
|
bytes, err := base64.StdEncoding.DecodeString(sessKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Error while initializing authentication -> decoding session key failed")
|
cclog.Fatal("Error while initializing authentication -> decoding session key failed")
|
||||||
}
|
}
|
||||||
authInstance.sessionStore = sessions.NewCookieStore(bytes)
|
authInstance.sessionStore = sessions.NewCookieStore(bytes)
|
||||||
}
|
}
|
||||||
@ -114,41 +114,41 @@ func Init() {
|
|||||||
if config.Keys.LdapConfig != nil {
|
if config.Keys.LdapConfig != nil {
|
||||||
ldapAuth := &LdapAuthenticator{}
|
ldapAuth := &LdapAuthenticator{}
|
||||||
if err := ldapAuth.Init(); err != nil {
|
if err := ldapAuth.Init(); err != nil {
|
||||||
log.Warn("Error while initializing authentication -> ldapAuth init failed")
|
cclog.Warn("Error while initializing authentication -> ldapAuth init failed")
|
||||||
} else {
|
} else {
|
||||||
authInstance.LdapAuth = ldapAuth
|
authInstance.LdapAuth = ldapAuth
|
||||||
authInstance.authenticators = append(authInstance.authenticators, authInstance.LdapAuth)
|
authInstance.authenticators = append(authInstance.authenticators, authInstance.LdapAuth)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Info("Missing LDAP configuration: No LDAP support!")
|
cclog.Info("Missing LDAP configuration: No LDAP support!")
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Keys.JwtConfig != nil {
|
if config.Keys.JwtConfig != nil {
|
||||||
authInstance.JwtAuth = &JWTAuthenticator{}
|
authInstance.JwtAuth = &JWTAuthenticator{}
|
||||||
if err := authInstance.JwtAuth.Init(); err != nil {
|
if err := authInstance.JwtAuth.Init(); err != nil {
|
||||||
log.Fatal("Error while initializing authentication -> jwtAuth init failed")
|
cclog.Fatal("Error while initializing authentication -> jwtAuth init failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
jwtSessionAuth := &JWTSessionAuthenticator{}
|
jwtSessionAuth := &JWTSessionAuthenticator{}
|
||||||
if err := jwtSessionAuth.Init(); err != nil {
|
if err := jwtSessionAuth.Init(); err != nil {
|
||||||
log.Info("jwtSessionAuth init failed: No JWT login support!")
|
cclog.Info("jwtSessionAuth init failed: No JWT login support!")
|
||||||
} else {
|
} else {
|
||||||
authInstance.authenticators = append(authInstance.authenticators, jwtSessionAuth)
|
authInstance.authenticators = append(authInstance.authenticators, jwtSessionAuth)
|
||||||
}
|
}
|
||||||
|
|
||||||
jwtCookieSessionAuth := &JWTCookieSessionAuthenticator{}
|
jwtCookieSessionAuth := &JWTCookieSessionAuthenticator{}
|
||||||
if err := jwtCookieSessionAuth.Init(); err != nil {
|
if err := jwtCookieSessionAuth.Init(); err != nil {
|
||||||
log.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!")
|
cclog.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!")
|
||||||
} else {
|
} else {
|
||||||
authInstance.authenticators = append(authInstance.authenticators, jwtCookieSessionAuth)
|
authInstance.authenticators = append(authInstance.authenticators, jwtCookieSessionAuth)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Info("Missing JWT configuration: No JWT token support!")
|
cclog.Info("Missing JWT configuration: No JWT token support!")
|
||||||
}
|
}
|
||||||
|
|
||||||
authInstance.LocalAuth = &LocalAuthenticator{}
|
authInstance.LocalAuth = &LocalAuthenticator{}
|
||||||
if err := authInstance.LocalAuth.Init(); err != nil {
|
if err := authInstance.LocalAuth.Init(); err != nil {
|
||||||
log.Fatal("Error while initializing authentication -> localAuth init failed")
|
cclog.Fatal("Error while initializing authentication -> localAuth init failed")
|
||||||
}
|
}
|
||||||
authInstance.authenticators = append(authInstance.authenticators, authInstance.LocalAuth)
|
authInstance.authenticators = append(authInstance.authenticators, authInstance.LocalAuth)
|
||||||
})
|
})
|
||||||
@ -156,7 +156,7 @@ func Init() {
|
|||||||
|
|
||||||
func GetAuthInstance() *Authentication {
|
func GetAuthInstance() *Authentication {
|
||||||
if authInstance == nil {
|
if authInstance == nil {
|
||||||
log.Fatal("Authentication module not initialized!")
|
cclog.Fatal("Authentication module not initialized!")
|
||||||
}
|
}
|
||||||
|
|
||||||
return authInstance
|
return authInstance
|
||||||
@ -167,14 +167,14 @@ func handleTokenUser(tokenUser *schema.User) {
|
|||||||
dbUser, err := r.GetUser(tokenUser.Username)
|
dbUser, err := r.GetUser(tokenUser.Username)
|
||||||
|
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
log.Errorf("Error while loading user '%s': %v", tokenUser.Username, err)
|
cclog.Errorf("Error while loading user '%s': %v", tokenUser.Username, err)
|
||||||
} else if err == sql.ErrNoRows && config.Keys.JwtConfig.SyncUserOnLogin { // Adds New User
|
} else if err == sql.ErrNoRows && config.Keys.JwtConfig.SyncUserOnLogin { // Adds New User
|
||||||
if err := r.AddUser(tokenUser); err != nil {
|
if err := r.AddUser(tokenUser); err != nil {
|
||||||
log.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err)
|
cclog.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err)
|
||||||
}
|
}
|
||||||
} else if err == nil && config.Keys.JwtConfig.UpdateUserOnLogin { // Update Existing User
|
} else if err == nil && config.Keys.JwtConfig.UpdateUserOnLogin { // Update Existing User
|
||||||
if err := r.UpdateUser(dbUser, tokenUser); err != nil {
|
if err := r.UpdateUser(dbUser, tokenUser); err != nil {
|
||||||
log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
|
cclog.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -184,14 +184,14 @@ func handleOIDCUser(OIDCUser *schema.User) {
|
|||||||
dbUser, err := r.GetUser(OIDCUser.Username)
|
dbUser, err := r.GetUser(OIDCUser.Username)
|
||||||
|
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
log.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err)
|
cclog.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err)
|
||||||
} else if err == sql.ErrNoRows && config.Keys.OpenIDConfig.SyncUserOnLogin { // Adds New User
|
} else if err == sql.ErrNoRows && config.Keys.OpenIDConfig.SyncUserOnLogin { // Adds New User
|
||||||
if err := r.AddUser(OIDCUser); err != nil {
|
if err := r.AddUser(OIDCUser); err != nil {
|
||||||
log.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err)
|
cclog.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err)
|
||||||
}
|
}
|
||||||
} else if err == nil && config.Keys.OpenIDConfig.UpdateUserOnLogin { // Update Existing User
|
} else if err == nil && config.Keys.OpenIDConfig.UpdateUserOnLogin { // Update Existing User
|
||||||
if err := r.UpdateUser(dbUser, OIDCUser); err != nil {
|
if err := r.UpdateUser(dbUser, OIDCUser); err != nil {
|
||||||
log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
|
cclog.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -199,7 +199,7 @@ func handleOIDCUser(OIDCUser *schema.User) {
|
|||||||
func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, user *schema.User) error {
|
func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, user *schema.User) error {
|
||||||
session, err := auth.sessionStore.New(r, "session")
|
session, err := auth.sessionStore.New(r, "session")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("session creation failed: %s", err.Error())
|
cclog.Errorf("session creation failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -215,7 +215,7 @@ func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request,
|
|||||||
session.Values["projects"] = user.Projects
|
session.Values["projects"] = user.Projects
|
||||||
session.Values["roles"] = user.Roles
|
session.Values["roles"] = user.Roles
|
||||||
if err := auth.sessionStore.Save(r, rw, session); err != nil {
|
if err := auth.sessionStore.Save(r, rw, session); err != nil {
|
||||||
log.Warnf("session save failed: %s", err.Error())
|
cclog.Warnf("session save failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -236,7 +236,7 @@ func (auth *Authentication) Login(
|
|||||||
|
|
||||||
limiter := getIPUserLimiter(ip, username)
|
limiter := getIPUserLimiter(ip, username)
|
||||||
if !limiter.Allow() {
|
if !limiter.Allow() {
|
||||||
log.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username)
|
cclog.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username)
|
||||||
onfailure(rw, r, errors.New("too many login attempts, try again in a few minutes"))
|
onfailure(rw, r, errors.New("too many login attempts, try again in a few minutes"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -246,7 +246,7 @@ func (auth *Authentication) Login(
|
|||||||
var err error
|
var err error
|
||||||
dbUser, err = repository.GetUserRepository().GetUser(username)
|
dbUser, err = repository.GetUserRepository().GetUser(username)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
log.Errorf("Error while loading user '%v'", username)
|
cclog.Errorf("Error while loading user '%v'", username)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,12 +256,12 @@ func (auth *Authentication) Login(
|
|||||||
if user, ok = authenticator.CanLogin(dbUser, username, rw, r); !ok {
|
if user, ok = authenticator.CanLogin(dbUser, username, rw, r); !ok {
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("Can login with user %v", user)
|
cclog.Debugf("Can login with user %v", user)
|
||||||
}
|
}
|
||||||
|
|
||||||
user, err := authenticator.Login(user, rw, r)
|
user, err := authenticator.Login(user, rw, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("user login failed: %s", err.Error())
|
cclog.Warnf("user login failed: %s", err.Error())
|
||||||
onfailure(rw, r, err)
|
onfailure(rw, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -270,7 +270,7 @@ func (auth *Authentication) Login(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
|
cclog.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
|
||||||
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
|
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
|
||||||
|
|
||||||
if r.FormValue("redirect") != "" {
|
if r.FormValue("redirect") != "" {
|
||||||
@ -282,7 +282,7 @@ func (auth *Authentication) Login(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("login failed: no authenticator applied")
|
cclog.Debugf("login failed: no authenticator applied")
|
||||||
onfailure(rw, r, errors.New("no authenticator applied"))
|
onfailure(rw, r, errors.New("no authenticator applied"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -294,14 +294,14 @@ func (auth *Authentication) Auth(
|
|||||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
|
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("auth -> authentication failed: %s", err.Error())
|
cclog.Infof("auth -> authentication failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusUnauthorized)
|
http.Error(rw, err.Error(), http.StatusUnauthorized)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if user == nil {
|
if user == nil {
|
||||||
user, err = auth.AuthViaSession(rw, r)
|
user, err = auth.AuthViaSession(rw, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("auth -> authentication failed: %s", err.Error())
|
cclog.Infof("auth -> authentication failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusUnauthorized)
|
http.Error(rw, err.Error(), http.StatusUnauthorized)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -312,7 +312,7 @@ func (auth *Authentication) Auth(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("auth -> authentication failed")
|
cclog.Info("auth -> authentication failed")
|
||||||
onfailure(rw, r, errors.New("unauthorized (please login first)"))
|
onfailure(rw, r, errors.New("unauthorized (please login first)"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -324,14 +324,14 @@ func (auth *Authentication) AuthApi(
|
|||||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
|
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("auth api -> authentication failed: %s", err.Error())
|
cclog.Infof("auth api -> authentication failed: %s", err.Error())
|
||||||
onfailure(rw, r, err)
|
onfailure(rw, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ipErr := securedCheck(user, r)
|
ipErr := securedCheck(user, r)
|
||||||
if ipErr != nil {
|
if ipErr != nil {
|
||||||
log.Infof("auth api -> secured check failed: %s", ipErr.Error())
|
cclog.Infof("auth api -> secured check failed: %s", ipErr.Error())
|
||||||
onfailure(rw, r, ipErr)
|
onfailure(rw, r, ipErr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -351,11 +351,11 @@ func (auth *Authentication) AuthApi(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Info("auth api -> authentication failed: missing role")
|
cclog.Info("auth api -> authentication failed: missing role")
|
||||||
onfailure(rw, r, errors.New("unauthorized"))
|
onfailure(rw, r, errors.New("unauthorized"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Info("auth api -> authentication failed: no auth")
|
cclog.Info("auth api -> authentication failed: no auth")
|
||||||
onfailure(rw, r, errors.New("unauthorized"))
|
onfailure(rw, r, errors.New("unauthorized"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -367,7 +367,7 @@ func (auth *Authentication) AuthUserApi(
|
|||||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
|
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("auth user api -> authentication failed: %s", err.Error())
|
cclog.Infof("auth user api -> authentication failed: %s", err.Error())
|
||||||
onfailure(rw, r, err)
|
onfailure(rw, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -387,11 +387,11 @@ func (auth *Authentication) AuthUserApi(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Info("auth user api -> authentication failed: missing role")
|
cclog.Info("auth user api -> authentication failed: missing role")
|
||||||
onfailure(rw, r, errors.New("unauthorized"))
|
onfailure(rw, r, errors.New("unauthorized"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Info("auth user api -> authentication failed: no auth")
|
cclog.Info("auth user api -> authentication failed: no auth")
|
||||||
onfailure(rw, r, errors.New("unauthorized"))
|
onfailure(rw, r, errors.New("unauthorized"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -403,7 +403,7 @@ func (auth *Authentication) AuthConfigApi(
|
|||||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
user, err := auth.AuthViaSession(rw, r)
|
user, err := auth.AuthViaSession(rw, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("auth config api -> authentication failed: %s", err.Error())
|
cclog.Infof("auth config api -> authentication failed: %s", err.Error())
|
||||||
onfailure(rw, r, err)
|
onfailure(rw, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -412,7 +412,7 @@ func (auth *Authentication) AuthConfigApi(
|
|||||||
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
|
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Info("auth config api -> authentication failed: no auth")
|
cclog.Info("auth config api -> authentication failed: no auth")
|
||||||
onfailure(rw, r, errors.New("unauthorized"))
|
onfailure(rw, r, errors.New("unauthorized"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -424,7 +424,7 @@ func (auth *Authentication) AuthFrontendApi(
|
|||||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
user, err := auth.AuthViaSession(rw, r)
|
user, err := auth.AuthViaSession(rw, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("auth frontend api -> authentication failed: %s", err.Error())
|
cclog.Infof("auth frontend api -> authentication failed: %s", err.Error())
|
||||||
onfailure(rw, r, err)
|
onfailure(rw, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -433,7 +433,7 @@ func (auth *Authentication) AuthFrontendApi(
|
|||||||
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
|
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Info("auth frontend api -> authentication failed: no auth")
|
cclog.Info("auth frontend api -> authentication failed: no auth")
|
||||||
onfailure(rw, r, errors.New("unauthorized"))
|
onfailure(rw, r, errors.New("unauthorized"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package auth
|
package auth
|
||||||
@ -15,8 +15,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/golang-jwt/jwt/v5"
|
"github.com/golang-jwt/jwt/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,17 +28,17 @@ type JWTAuthenticator struct {
|
|||||||
func (ja *JWTAuthenticator) Init() error {
|
func (ja *JWTAuthenticator) Init() error {
|
||||||
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
|
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
|
||||||
if pubKey == "" || privKey == "" {
|
if pubKey == "" || privKey == "" {
|
||||||
log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
|
cclog.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
|
||||||
} else {
|
} else {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Could not decode JWT public key")
|
cclog.Warn("Could not decode JWT public key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.publicKey = ed25519.PublicKey(bytes)
|
ja.publicKey = ed25519.PublicKey(bytes)
|
||||||
bytes, err = base64.StdEncoding.DecodeString(privKey)
|
bytes, err = base64.StdEncoding.DecodeString(privKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Could not decode JWT private key")
|
cclog.Warn("Could not decode JWT private key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.privateKey = ed25519.PrivateKey(bytes)
|
ja.privateKey = ed25519.PrivateKey(bytes)
|
||||||
@ -70,11 +70,11 @@ func (ja *JWTAuthenticator) AuthViaJWT(
|
|||||||
return ja.publicKey, nil
|
return ja.publicKey, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while parsing JWT token")
|
cclog.Warn("Error while parsing JWT token")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !token.Valid {
|
if !token.Valid {
|
||||||
log.Warn("jwt token claims are not valid")
|
cclog.Warn("jwt token claims are not valid")
|
||||||
return nil, errors.New("jwt token claims are not valid")
|
return nil, errors.New("jwt token claims are not valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ func (ja *JWTAuthenticator) AuthViaJWT(
|
|||||||
user, err := ur.GetUser(sub)
|
user, err := ur.GetUser(sub)
|
||||||
// Deny any logins for unknown usernames
|
// Deny any logins for unknown usernames
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Could not find user from JWT in internal database.")
|
cclog.Warn("Could not find user from JWT in internal database.")
|
||||||
return nil, errors.New("unknown user")
|
return nil, errors.New("unknown user")
|
||||||
}
|
}
|
||||||
// Take user roles from database instead of trusting the JWT
|
// Take user roles from database instead of trusting the JWT
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package auth
|
package auth
|
||||||
@ -15,8 +15,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/golang-jwt/jwt/v5"
|
"github.com/golang-jwt/jwt/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -31,18 +31,18 @@ var _ Authenticator = (*JWTCookieSessionAuthenticator)(nil)
|
|||||||
func (ja *JWTCookieSessionAuthenticator) Init() error {
|
func (ja *JWTCookieSessionAuthenticator) Init() error {
|
||||||
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
|
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
|
||||||
if pubKey == "" || privKey == "" {
|
if pubKey == "" || privKey == "" {
|
||||||
log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
|
cclog.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
|
||||||
return errors.New("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
|
return errors.New("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
|
||||||
} else {
|
} else {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Could not decode JWT public key")
|
cclog.Warn("Could not decode JWT public key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.publicKey = ed25519.PublicKey(bytes)
|
ja.publicKey = ed25519.PublicKey(bytes)
|
||||||
bytes, err = base64.StdEncoding.DecodeString(privKey)
|
bytes, err = base64.StdEncoding.DecodeString(privKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Could not decode JWT private key")
|
cclog.Warn("Could not decode JWT private key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.privateKey = ed25519.PrivateKey(bytes)
|
ja.privateKey = ed25519.PrivateKey(bytes)
|
||||||
@ -53,13 +53,13 @@ func (ja *JWTCookieSessionAuthenticator) Init() error {
|
|||||||
if keyFound && pubKeyCrossLogin != "" {
|
if keyFound && pubKeyCrossLogin != "" {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(pubKeyCrossLogin)
|
bytes, err := base64.StdEncoding.DecodeString(pubKeyCrossLogin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Could not decode cross login JWT public key")
|
cclog.Warn("Could not decode cross login JWT public key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.publicKeyCrossLogin = ed25519.PublicKey(bytes)
|
ja.publicKeyCrossLogin = ed25519.PublicKey(bytes)
|
||||||
} else {
|
} else {
|
||||||
ja.publicKeyCrossLogin = nil
|
ja.publicKeyCrossLogin = nil
|
||||||
log.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
|
cclog.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
|
||||||
return errors.New("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
|
return errors.New("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,22 +67,22 @@ func (ja *JWTCookieSessionAuthenticator) Init() error {
|
|||||||
// Warn if other necessary settings are not configured
|
// Warn if other necessary settings are not configured
|
||||||
if jc != nil {
|
if jc != nil {
|
||||||
if jc.CookieName == "" {
|
if jc.CookieName == "" {
|
||||||
log.Info("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
|
cclog.Info("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
return errors.New("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
|
return errors.New("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
}
|
}
|
||||||
if !jc.ValidateUser {
|
if !jc.ValidateUser {
|
||||||
log.Info("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
|
cclog.Info("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
|
||||||
}
|
}
|
||||||
if jc.TrustedIssuer == "" {
|
if jc.TrustedIssuer == "" {
|
||||||
log.Info("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
|
cclog.Info("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
return errors.New("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
|
return errors.New("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warn("config for JWTs not configured (cross login via JWT cookie will fail)")
|
cclog.Warn("config for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
return errors.New("config for JWTs not configured (cross login via JWT cookie will fail)")
|
return errors.New("config for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("JWT Cookie Session authenticator successfully registered")
|
cclog.Info("JWT Cookie Session authenticator successfully registered")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,12 +140,12 @@ func (ja *JWTCookieSessionAuthenticator) Login(
|
|||||||
return ja.publicKey, nil
|
return ja.publicKey, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("JWT cookie session: error while parsing token")
|
cclog.Warn("JWT cookie session: error while parsing token")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !token.Valid {
|
if !token.Valid {
|
||||||
log.Warn("jwt token claims are not valid")
|
cclog.Warn("jwt token claims are not valid")
|
||||||
return nil, errors.New("jwt token claims are not valid")
|
return nil, errors.New("jwt token claims are not valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,12 +159,12 @@ func (ja *JWTCookieSessionAuthenticator) Login(
|
|||||||
var err error
|
var err error
|
||||||
user, err = repository.GetUserRepository().GetUser(sub)
|
user, err = repository.GetUserRepository().GetUser(sub)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
log.Errorf("Error while loading user '%v'", sub)
|
cclog.Errorf("Error while loading user '%v'", sub)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny any logins for unknown usernames
|
// Deny any logins for unknown usernames
|
||||||
if user == nil {
|
if user == nil {
|
||||||
log.Warn("Could not find user from JWT in internal database.")
|
cclog.Warn("Could not find user from JWT in internal database.")
|
||||||
return nil, errors.New("unknown user")
|
return nil, errors.New("unknown user")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package auth
|
package auth
|
||||||
@ -15,8 +15,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/golang-jwt/jwt/v5"
|
"github.com/golang-jwt/jwt/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,13 +30,13 @@ func (ja *JWTSessionAuthenticator) Init() error {
|
|||||||
if pubKey := os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" {
|
if pubKey := os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Could not decode cross login JWT HS512 key")
|
cclog.Warn("Could not decode cross login JWT HS512 key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.loginTokenKey = bytes
|
ja.loginTokenKey = bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("JWT Session authenticator successfully registered")
|
cclog.Info("JWT Session authenticator successfully registered")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,12 +67,12 @@ func (ja *JWTSessionAuthenticator) Login(
|
|||||||
return nil, fmt.Errorf("unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg())
|
return nil, fmt.Errorf("unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg())
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while parsing jwt token")
|
cclog.Warn("Error while parsing jwt token")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !token.Valid {
|
if !token.Valid {
|
||||||
log.Warn("jwt token claims are not valid")
|
cclog.Warn("jwt token claims are not valid")
|
||||||
return nil, errors.New("jwt token claims are not valid")
|
return nil, errors.New("jwt token claims are not valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,12 +86,12 @@ func (ja *JWTSessionAuthenticator) Login(
|
|||||||
var err error
|
var err error
|
||||||
user, err = repository.GetUserRepository().GetUser(sub)
|
user, err = repository.GetUserRepository().GetUser(sub)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
log.Errorf("Error while loading user '%v'", sub)
|
cclog.Errorf("Error while loading user '%v'", sub)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny any logins for unknown usernames
|
// Deny any logins for unknown usernames
|
||||||
if user == nil {
|
if user == nil {
|
||||||
log.Warn("Could not find user from JWT in internal database.")
|
cclog.Warn("Could not find user from JWT in internal database.")
|
||||||
return nil, errors.New("unknown user")
|
return nil, errors.New("unknown user")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package auth
|
package auth
|
||||||
@ -13,8 +13,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/go-ldap/ldap/v3"
|
"github.com/go-ldap/ldap/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,7 +28,7 @@ var _ Authenticator = (*LdapAuthenticator)(nil)
|
|||||||
func (la *LdapAuthenticator) Init() error {
|
func (la *LdapAuthenticator) Init() error {
|
||||||
la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD")
|
la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD")
|
||||||
if la.syncPassword == "" {
|
if la.syncPassword == "" {
|
||||||
log.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
|
cclog.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
|
||||||
}
|
}
|
||||||
|
|
||||||
lc := config.Keys.LdapConfig
|
lc := config.Keys.LdapConfig
|
||||||
@ -58,7 +58,7 @@ func (la *LdapAuthenticator) CanLogin(
|
|||||||
if lc.SyncUserOnLogin {
|
if lc.SyncUserOnLogin {
|
||||||
l, err := la.getLdapConnection(true)
|
l, err := la.getLdapConnection(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("LDAP connection error")
|
cclog.Error("LDAP connection error")
|
||||||
}
|
}
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
|
|
||||||
@ -71,12 +71,12 @@ func (la *LdapAuthenticator) CanLogin(
|
|||||||
|
|
||||||
sr, err := l.Search(searchRequest)
|
sr, err := l.Search(searchRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn(err)
|
cclog.Warn(err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(sr.Entries) != 1 {
|
if len(sr.Entries) != 1 {
|
||||||
log.Warn("LDAP: User does not exist or too many entries returned")
|
cclog.Warn("LDAP: User does not exist or too many entries returned")
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,7 +96,7 @@ func (la *LdapAuthenticator) CanLogin(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := repository.GetUserRepository().AddUser(user); err != nil {
|
if err := repository.GetUserRepository().AddUser(user); err != nil {
|
||||||
log.Errorf("User '%s' LDAP: Insert into DB failed", username)
|
cclog.Errorf("User '%s' LDAP: Insert into DB failed", username)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,14 +114,14 @@ func (la *LdapAuthenticator) Login(
|
|||||||
) (*schema.User, error) {
|
) (*schema.User, error) {
|
||||||
l, err := la.getLdapConnection(false)
|
l, err := la.getLdapConnection(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting ldap connection")
|
cclog.Warn("Error while getting ldap connection")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
|
|
||||||
userDn := strings.Replace(config.Keys.LdapConfig.UserBind, "{username}", user.Username, -1)
|
userDn := strings.Replace(config.Keys.LdapConfig.UserBind, "{username}", user.Username, -1)
|
||||||
if err := l.Bind(userDn, r.FormValue("password")); err != nil {
|
if err := l.Bind(userDn, r.FormValue("password")); err != nil {
|
||||||
log.Errorf("AUTH/LDAP > Authentication for user %s failed: %v",
|
cclog.Errorf("AUTH/LDAP > Authentication for user %s failed: %v",
|
||||||
user.Username, err)
|
user.Username, err)
|
||||||
return nil, fmt.Errorf("Authentication failed")
|
return nil, fmt.Errorf("Authentication failed")
|
||||||
}
|
}
|
||||||
@ -148,7 +148,7 @@ func (la *LdapAuthenticator) Sync() error {
|
|||||||
|
|
||||||
l, err := la.getLdapConnection(true)
|
l, err := la.getLdapConnection(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("LDAP connection error")
|
cclog.Error("LDAP connection error")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
@ -159,7 +159,7 @@ func (la *LdapAuthenticator) Sync() error {
|
|||||||
lc.UserFilter,
|
lc.UserFilter,
|
||||||
[]string{"dn", "uid", la.UserAttr}, nil))
|
[]string{"dn", "uid", la.UserAttr}, nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("LDAP search error")
|
cclog.Warn("LDAP search error")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,7 +182,7 @@ func (la *LdapAuthenticator) Sync() error {
|
|||||||
for username, where := range users {
|
for username, where := range users {
|
||||||
if where == IN_DB && lc.SyncDelOldUsers {
|
if where == IN_DB && lc.SyncDelOldUsers {
|
||||||
ur.DelUser(username)
|
ur.DelUser(username)
|
||||||
log.Debugf("sync: remove %v (does not show up in LDAP anymore)", username)
|
cclog.Debugf("sync: remove %v (does not show up in LDAP anymore)", username)
|
||||||
} else if where == IN_LDAP {
|
} else if where == IN_LDAP {
|
||||||
name := newnames[username]
|
name := newnames[username]
|
||||||
|
|
||||||
@ -198,9 +198,9 @@ func (la *LdapAuthenticator) Sync() error {
|
|||||||
AuthSource: schema.AuthViaLDAP,
|
AuthSource: schema.AuthViaLDAP,
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name)
|
cclog.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name)
|
||||||
if err := ur.AddUser(user); err != nil {
|
if err := ur.AddUser(user); err != nil {
|
||||||
log.Errorf("User '%s' LDAP: Insert into DB failed", username)
|
cclog.Errorf("User '%s' LDAP: Insert into DB failed", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -213,14 +213,14 @@ func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) {
|
|||||||
lc := config.Keys.LdapConfig
|
lc := config.Keys.LdapConfig
|
||||||
conn, err := ldap.DialURL(lc.Url)
|
conn, err := ldap.DialURL(lc.Url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("LDAP URL dial failed")
|
cclog.Warn("LDAP URL dial failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if admin {
|
if admin {
|
||||||
if err := conn.Bind(lc.SearchDN, la.syncPassword); err != nil {
|
if err := conn.Bind(lc.SearchDN, la.syncPassword); err != nil {
|
||||||
conn.Close()
|
conn.Close()
|
||||||
log.Warn("LDAP connection bind failed")
|
cclog.Warn("LDAP connection bind failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package auth
|
package auth
|
||||||
@ -8,8 +8,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,19 +27,19 @@ func (la *LocalAuthenticator) CanLogin(
|
|||||||
user *schema.User,
|
user *schema.User,
|
||||||
username string,
|
username string,
|
||||||
rw http.ResponseWriter,
|
rw http.ResponseWriter,
|
||||||
r *http.Request) (*schema.User, bool) {
|
r *http.Request,
|
||||||
|
) (*schema.User, bool) {
|
||||||
return user, user != nil && user.AuthSource == schema.AuthViaLocalPassword
|
return user, user != nil && user.AuthSource == schema.AuthViaLocalPassword
|
||||||
}
|
}
|
||||||
|
|
||||||
func (la *LocalAuthenticator) Login(
|
func (la *LocalAuthenticator) Login(
|
||||||
user *schema.User,
|
user *schema.User,
|
||||||
rw http.ResponseWriter,
|
rw http.ResponseWriter,
|
||||||
r *http.Request) (*schema.User, error) {
|
r *http.Request,
|
||||||
|
) (*schema.User, error) {
|
||||||
if e := bcrypt.CompareHashAndPassword([]byte(user.Password),
|
if e := bcrypt.CompareHashAndPassword([]byte(user.Password),
|
||||||
[]byte(r.FormValue("password"))); e != nil {
|
[]byte(r.FormValue("password"))); e != nil {
|
||||||
log.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username)
|
cclog.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username)
|
||||||
return nil, fmt.Errorf("Authentication failed")
|
return nil, fmt.Errorf("Authentication failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package auth
|
package auth
|
||||||
@ -15,8 +15,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/coreos/go-oidc/v3/oidc"
|
"github.com/coreos/go-oidc/v3/oidc"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
@ -51,15 +51,15 @@ func setCallbackCookie(w http.ResponseWriter, r *http.Request, name, value strin
|
|||||||
func NewOIDC(a *Authentication) *OIDC {
|
func NewOIDC(a *Authentication) *OIDC {
|
||||||
provider, err := oidc.NewProvider(context.Background(), config.Keys.OpenIDConfig.Provider)
|
provider, err := oidc.NewProvider(context.Background(), config.Keys.OpenIDConfig.Provider)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
cclog.Fatal(err)
|
||||||
}
|
}
|
||||||
clientID := os.Getenv("OID_CLIENT_ID")
|
clientID := os.Getenv("OID_CLIENT_ID")
|
||||||
if clientID == "" {
|
if clientID == "" {
|
||||||
log.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)")
|
cclog.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)")
|
||||||
}
|
}
|
||||||
clientSecret := os.Getenv("OID_CLIENT_SECRET")
|
clientSecret := os.Getenv("OID_CLIENT_SECRET")
|
||||||
if clientSecret == "" {
|
if clientSecret == "" {
|
||||||
log.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)")
|
cclog.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)")
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &oauth2.Config{
|
client := &oauth2.Config{
|
||||||
@ -173,7 +173,7 @@ func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
oa.authentication.SaveSession(rw, r, user)
|
oa.authentication.SaveSession(rw, r, user)
|
||||||
log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
|
cclog.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
|
||||||
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
|
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
|
||||||
http.RedirectHandler("/", http.StatusTemporaryRedirect).ServeHTTP(rw, r.WithContext(ctx))
|
http.RedirectHandler("/", http.StatusTemporaryRedirect).ServeHTTP(rw, r.WithContext(ctx))
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package config
|
package config
|
||||||
@ -9,8 +9,8 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Keys schema.ProgramConfig = schema.ProgramConfig{
|
var Keys schema.ProgramConfig = schema.ProgramConfig{
|
||||||
@ -53,20 +53,20 @@ func Init(flagConfigFile string) {
|
|||||||
raw, err := os.ReadFile(flagConfigFile)
|
raw, err := os.ReadFile(flagConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
log.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
cclog.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
|
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
|
||||||
log.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
cclog.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||||
}
|
}
|
||||||
dec := json.NewDecoder(bytes.NewReader(raw))
|
dec := json.NewDecoder(bytes.NewReader(raw))
|
||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
if err := dec.Decode(&Keys); err != nil {
|
if err := dec.Decode(&Keys); err != nil {
|
||||||
log.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
|
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
|
||||||
log.Abort("Config Init: At least one cluster required in config. Exited with error.")
|
cclog.Abort("Config Init: At least one cluster required in config. Exited with error.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package config
|
package config
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-backend.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package model
|
package model
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Count struct {
|
type Count struct {
|
||||||
@ -170,7 +170,6 @@ type NamedStatsWithScope struct {
|
|||||||
type NodeFilter struct {
|
type NodeFilter struct {
|
||||||
Hostname *StringInput `json:"hostname,omitempty"`
|
Hostname *StringInput `json:"hostname,omitempty"`
|
||||||
Cluster *StringInput `json:"cluster,omitempty"`
|
Cluster *StringInput `json:"cluster,omitempty"`
|
||||||
SubCluster *StringInput `json:"subCluster,omitempty"`
|
|
||||||
NodeState *string `json:"nodeState,omitempty"`
|
NodeState *string `json:"nodeState,omitempty"`
|
||||||
HealthState *schema.NodeState `json:"healthState,omitempty"`
|
HealthState *schema.NodeState `json:"healthState,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ func Init() {
|
|||||||
|
|
||||||
func GetResolverInstance() *Resolver {
|
func GetResolverInstance() *Resolver {
|
||||||
if resolverInstance == nil {
|
if resolverInstance == nil {
|
||||||
log.Fatal("Authentication module not initialized!")
|
cclog.Fatal("Authentication module not initialized!")
|
||||||
}
|
}
|
||||||
|
|
||||||
return resolverInstance
|
return resolverInstance
|
||||||
|
@ -20,8 +20,8 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Partitions is the resolver for the partitions field.
|
// Partitions is the resolver for the partitions field.
|
||||||
@ -54,7 +54,7 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod
|
|||||||
func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) {
|
func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) {
|
||||||
rawFootprint, err := r.Repo.FetchFootprint(obj)
|
rawFootprint, err := r.Repo.FetchFootprint(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while fetching job footprint data")
|
cclog.Warn("Error while fetching job footprint data")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.
|
|||||||
func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]*model.EnergyFootprintValue, error) {
|
func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]*model.EnergyFootprintValue, error) {
|
||||||
rawEnergyFootprint, err := r.Repo.FetchEnergyFootprint(obj)
|
rawEnergyFootprint, err := r.Repo.FetchEnergyFootprint(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while fetching job energy footprint data")
|
cclog.Warn("Error while fetching job energy footprint data")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,12 +143,12 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s
|
|||||||
// Create in DB
|
// Create in DB
|
||||||
id, err := r.Repo.CreateTag(typeArg, name, scope)
|
id, err := r.Repo.CreateTag(typeArg, name, scope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while creating tag")
|
cclog.Warn("Error while creating tag")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil
|
return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Not authorized to create tag with scope: %s", scope)
|
cclog.Warnf("Not authorized to create tag with scope: %s", scope)
|
||||||
return nil, fmt.Errorf("not authorized to create tag with scope: %s", scope)
|
return nil, fmt.Errorf("not authorized to create tag with scope: %s", scope)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,7 +168,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
|
|||||||
|
|
||||||
jid, err := strconv.ParseInt(job, 10, 64)
|
jid, err := strconv.ParseInt(job, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while adding tag to job")
|
cclog.Warn("Error while adding tag to job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,14 +177,14 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
|
|||||||
// Get ID
|
// Get ID
|
||||||
tid, err := strconv.ParseInt(tagId, 10, 64)
|
tid, err := strconv.ParseInt(tagId, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while parsing tag id")
|
cclog.Warn("Error while parsing tag id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test Exists
|
// Test Exists
|
||||||
_, _, tscope, exists := r.Repo.TagInfo(tid)
|
_, _, tscope, exists := r.Repo.TagInfo(tid)
|
||||||
if !exists {
|
if !exists {
|
||||||
log.Warnf("Tag does not exist (ID): %d", tid)
|
cclog.Warnf("Tag does not exist (ID): %d", tid)
|
||||||
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
|
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,11 +194,11 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
|
|||||||
user.Username == tscope {
|
user.Username == tscope {
|
||||||
// Add to Job
|
// Add to Job
|
||||||
if tags, err = r.Repo.AddTag(user, jid, tid); err != nil {
|
if tags, err = r.Repo.AddTag(user, jid, tid); err != nil {
|
||||||
log.Warn("Error while adding tag")
|
cclog.Warn("Error while adding tag")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Not authorized to add tag: %d", tid)
|
cclog.Warnf("Not authorized to add tag: %d", tid)
|
||||||
return nil, fmt.Errorf("not authorized to add tag: %d", tid)
|
return nil, fmt.Errorf("not authorized to add tag: %d", tid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -215,7 +215,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
|
|||||||
|
|
||||||
jid, err := strconv.ParseInt(job, 10, 64)
|
jid, err := strconv.ParseInt(job, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while parsing job id")
|
cclog.Warn("Error while parsing job id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,14 +224,14 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
|
|||||||
// Get ID
|
// Get ID
|
||||||
tid, err := strconv.ParseInt(tagId, 10, 64)
|
tid, err := strconv.ParseInt(tagId, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while parsing tag id")
|
cclog.Warn("Error while parsing tag id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test Exists
|
// Test Exists
|
||||||
_, _, tscope, exists := r.Repo.TagInfo(tid)
|
_, _, tscope, exists := r.Repo.TagInfo(tid)
|
||||||
if !exists {
|
if !exists {
|
||||||
log.Warnf("Tag does not exist (ID): %d", tid)
|
cclog.Warnf("Tag does not exist (ID): %d", tid)
|
||||||
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
|
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -241,11 +241,11 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
|
|||||||
user.Username == tscope {
|
user.Username == tscope {
|
||||||
// Remove from Job
|
// Remove from Job
|
||||||
if tags, err = r.Repo.RemoveTag(user, jid, tid); err != nil {
|
if tags, err = r.Repo.RemoveTag(user, jid, tid); err != nil {
|
||||||
log.Warn("Error while removing tag")
|
cclog.Warn("Error while removing tag")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Not authorized to remove tag: %d", tid)
|
cclog.Warnf("Not authorized to remove tag: %d", tid)
|
||||||
return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
|
return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,14 +267,14 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin
|
|||||||
// Get ID
|
// Get ID
|
||||||
tid, err := strconv.ParseInt(tagId, 10, 64)
|
tid, err := strconv.ParseInt(tagId, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while parsing tag id for removal")
|
cclog.Warn("Error while parsing tag id for removal")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test Exists
|
// Test Exists
|
||||||
_, _, tscope, exists := r.Repo.TagInfo(tid)
|
_, _, tscope, exists := r.Repo.TagInfo(tid)
|
||||||
if !exists {
|
if !exists {
|
||||||
log.Warnf("Tag does not exist (ID): %d", tid)
|
cclog.Warnf("Tag does not exist (ID): %d", tid)
|
||||||
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
|
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,13 +282,13 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin
|
|||||||
if user.HasRole(schema.RoleAdmin) && (tscope == "global" || tscope == "admin") || user.Username == tscope {
|
if user.HasRole(schema.RoleAdmin) && (tscope == "global" || tscope == "admin") || user.Username == tscope {
|
||||||
// Remove from DB
|
// Remove from DB
|
||||||
if err = r.Repo.RemoveTagById(tid); err != nil {
|
if err = r.Repo.RemoveTagById(tid); err != nil {
|
||||||
log.Warn("Error while removing tag")
|
cclog.Warn("Error while removing tag")
|
||||||
return nil, err
|
return nil, err
|
||||||
} else {
|
} else {
|
||||||
tags = append(tags, int(tid))
|
tags = append(tags, int(tid))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Not authorized to remove tag: %d", tid)
|
cclog.Warnf("Not authorized to remove tag: %d", tid)
|
||||||
return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
|
return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -298,7 +298,7 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin
|
|||||||
// UpdateConfiguration is the resolver for the updateConfiguration field.
|
// UpdateConfiguration is the resolver for the updateConfiguration field.
|
||||||
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
|
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
|
||||||
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil {
|
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil {
|
||||||
log.Warn("Error while updating user config")
|
cclog.Warn("Error while updating user config")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,7 +344,7 @@ func (r *queryResolver) User(ctx context.Context, username string) (*model.User,
|
|||||||
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
|
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
|
||||||
data, err := r.Repo.AllocatedNodes(cluster)
|
data, err := r.Repo.AllocatedNodes(cluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while fetching allocated nodes")
|
cclog.Warn("Error while fetching allocated nodes")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,7 +364,7 @@ func (r *queryResolver) Node(ctx context.Context, id string) (*schema.Node, erro
|
|||||||
repo := repository.GetNodeRepository()
|
repo := repository.GetNodeRepository()
|
||||||
numericId, err := strconv.ParseInt(id, 10, 64)
|
numericId, err := strconv.ParseInt(id, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while parsing job id")
|
cclog.Warn("Error while parsing job id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return repo.GetNode(numericId, false)
|
return repo.GetNode(numericId, false)
|
||||||
@ -387,13 +387,13 @@ func (r *queryResolver) NodeStats(ctx context.Context, filter []*model.NodeFilte
|
|||||||
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
|
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
|
||||||
numericId, err := strconv.ParseInt(id, 10, 64)
|
numericId, err := strconv.ParseInt(id, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while parsing job id")
|
cclog.Warn("Error while parsing job id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
job, err := r.Repo.FindById(ctx, numericId)
|
job, err := r.Repo.FindById(ctx, numericId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while finding job by id")
|
cclog.Warn("Error while finding job by id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -420,13 +420,13 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
|
|||||||
|
|
||||||
job, err := r.Query().Job(ctx, id)
|
job, err := r.Query().Job(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying job for metrics")
|
cclog.Warn("Error while querying job for metrics")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
|
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while loading job data")
|
cclog.Warn("Error while loading job data")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -448,13 +448,13 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
|
|||||||
func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) {
|
func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) {
|
||||||
job, err := r.Query().Job(ctx, id)
|
job, err := r.Query().Job(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while querying job %s for metadata", id)
|
cclog.Warnf("Error while querying job %s for metadata", id)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
|
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while loading jobStats data for job id %s", id)
|
cclog.Warnf("Error while loading jobStats data for job id %s", id)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -473,13 +473,13 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin
|
|||||||
func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) {
|
func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) {
|
||||||
job, err := r.Query().Job(ctx, id)
|
job, err := r.Query().Job(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while querying job %s for metadata", id)
|
cclog.Warnf("Error while querying job %s for metadata", id)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
|
data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while loading scopedJobStats data for job id %s", id)
|
cclog.Warnf("Error while loading scopedJobStats data for job id %s", id)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -518,13 +518,13 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
|
|||||||
|
|
||||||
jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
|
jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying jobs")
|
cclog.Warn("Error while querying jobs")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
count, err := r.Repo.CountJobs(ctx, filter)
|
count, err := r.Repo.CountJobs(ctx, filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while counting jobs")
|
cclog.Warn("Error while counting jobs")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -540,7 +540,7 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
|
|||||||
}
|
}
|
||||||
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
|
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying next jobs")
|
cclog.Warn("Error while querying next jobs")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -636,7 +636,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job
|
|||||||
|
|
||||||
jobs, err := r.Repo.QueryJobs(ctx, filter, nil, order)
|
jobs, err := r.Repo.QueryJobs(ctx, filter, nil, order)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying jobs for comparison")
|
cclog.Warn("Error while querying jobs for comparison")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -644,7 +644,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job
|
|||||||
for _, job := range jobs {
|
for _, job := range jobs {
|
||||||
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
|
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
|
cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
|
||||||
continue
|
continue
|
||||||
// return nil, err
|
// return nil, err
|
||||||
}
|
}
|
||||||
@ -701,7 +701,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
|
|||||||
|
|
||||||
data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("error while loading node data")
|
cclog.Warn("error while loading node data")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -713,7 +713,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
|
|||||||
}
|
}
|
||||||
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
|
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error in nodeMetrics resolver: %s", err)
|
cclog.Warnf("error in nodeMetrics resolver: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for metric, scopedMetrics := range metrics {
|
for metric, scopedMetrics := range metrics {
|
||||||
@ -757,7 +757,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub
|
|||||||
|
|
||||||
data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx)
|
data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("error while loading node data")
|
cclog.Warn("error while loading node data")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -769,7 +769,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub
|
|||||||
}
|
}
|
||||||
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
|
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error in nodeMetrics resolver: %s", err)
|
cclog.Warnf("error in nodeMetrics resolver: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for metric, scopedMetrics := range metrics {
|
for metric, scopedMetrics := range metrics {
|
||||||
@ -824,12 +824,10 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
|
|||||||
// SubCluster returns generated.SubClusterResolver implementation.
|
// SubCluster returns generated.SubClusterResolver implementation.
|
||||||
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
|
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
|
||||||
|
|
||||||
type (
|
type clusterResolver struct{ *Resolver }
|
||||||
clusterResolver struct{ *Resolver }
|
type jobResolver struct{ *Resolver }
|
||||||
jobResolver struct{ *Resolver }
|
type metricValueResolver struct{ *Resolver }
|
||||||
metricValueResolver struct{ *Resolver }
|
type mutationResolver struct{ *Resolver }
|
||||||
mutationResolver struct{ *Resolver }
|
type nodeResolver struct{ *Resolver }
|
||||||
nodeResolver struct{ *Resolver }
|
type queryResolver struct{ *Resolver }
|
||||||
queryResolver struct{ *Resolver }
|
type subClusterResolver struct{ *Resolver }
|
||||||
subClusterResolver struct{ *Resolver }
|
|
||||||
)
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package graph
|
package graph
|
||||||
@ -12,9 +12,8 @@ import (
|
|||||||
"github.com/99designs/gqlgen/graphql"
|
"github.com/99designs/gqlgen/graphql"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
// "github.com/ClusterCockpit/cc-backend/pkg/archive"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const MAX_JOBS_FOR_ANALYSIS = 500
|
const MAX_JOBS_FOR_ANALYSIS = 500
|
||||||
@ -28,7 +27,7 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
) ([][]float64, error) {
|
) ([][]float64, error) {
|
||||||
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while querying jobs for roofline")
|
cclog.Error("Error while querying jobs for roofline")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
|
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
|
||||||
@ -56,13 +55,13 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
|
|
||||||
jobdata, err := metricDataDispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0)
|
jobdata, err := metricDataDispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while loading roofline metrics for job %d", job.ID)
|
cclog.Errorf("Error while loading roofline metrics for job %d", job.ID)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"]
|
flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"]
|
||||||
if flops_ == nil && membw_ == nil {
|
if flops_ == nil && membw_ == nil {
|
||||||
log.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID)
|
cclog.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID)
|
||||||
continue
|
continue
|
||||||
// return nil, fmt.Errorf("GRAPH/UTIL > 'flops_any' or 'mem_bw' missing for job %d", job.ID)
|
// return nil, fmt.Errorf("GRAPH/UTIL > 'flops_any' or 'mem_bw' missing for job %d", job.ID)
|
||||||
}
|
}
|
||||||
@ -70,7 +69,7 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
flops, ok1 := flops_["node"]
|
flops, ok1 := flops_["node"]
|
||||||
membw, ok2 := membw_["node"]
|
membw, ok2 := membw_["node"]
|
||||||
if !ok1 || !ok2 {
|
if !ok1 || !ok2 {
|
||||||
log.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
|
cclog.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
|
||||||
continue
|
continue
|
||||||
// TODO/FIXME:
|
// TODO/FIXME:
|
||||||
// return nil, errors.New("GRAPH/UTIL > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
|
// return nil, errors.New("GRAPH/UTIL > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
|
||||||
@ -105,7 +104,7 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
|
func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
|
||||||
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while querying jobs for footprint")
|
cclog.Error("Error while querying jobs for footprint")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
|
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
|
||||||
@ -128,7 +127,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
||||||
log.Error("Error while loading averages for footprint")
|
cclog.Error("Error while loading averages for footprint")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package importer
|
package importer
|
||||||
@ -15,8 +15,8 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Import all jobs specified as `<path-to-meta.json>:<path-to-data.json>,...`
|
// Import all jobs specified as `<path-to-meta.json>:<path-to-data.json>,...`
|
||||||
@ -31,7 +31,7 @@ func HandleImportFlag(flag string) error {
|
|||||||
|
|
||||||
raw, err := os.ReadFile(files[0])
|
raw, err := os.ReadFile(files[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while reading metadata file for import")
|
cclog.Warn("Error while reading metadata file for import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -47,13 +47,13 @@ func HandleImportFlag(flag string) error {
|
|||||||
MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
|
MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
|
||||||
}
|
}
|
||||||
if err = dec.Decode(&job); err != nil {
|
if err = dec.Decode(&job); err != nil {
|
||||||
log.Warn("Error while decoding raw json metadata for import")
|
cclog.Warn("Error while decoding raw json metadata for import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
raw, err = os.ReadFile(files[1])
|
raw, err = os.ReadFile(files[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while reading jobdata file for import")
|
cclog.Warn("Error while reading jobdata file for import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ func HandleImportFlag(flag string) error {
|
|||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
jobData := schema.JobData{}
|
jobData := schema.JobData{}
|
||||||
if err = dec.Decode(&jobData); err != nil {
|
if err = dec.Decode(&jobData); err != nil {
|
||||||
log.Warn("Error while decoding raw json jobdata for import")
|
cclog.Warn("Error while decoding raw json jobdata for import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,7 +74,7 @@ func HandleImportFlag(flag string) error {
|
|||||||
|
|
||||||
sc, err := archive.GetSubCluster(job.Cluster, job.SubCluster)
|
sc, err := archive.GetSubCluster(job.Cluster, job.SubCluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("cannot get subcluster: %s", err.Error())
|
cclog.Errorf("cannot get subcluster: %s", err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ func HandleImportFlag(flag string) error {
|
|||||||
|
|
||||||
job.RawFootprint, err = json.Marshal(job.Footprint)
|
job.RawFootprint, err = json.Marshal(job.Footprint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while marshaling job footprint")
|
cclog.Warn("Error while marshaling job footprint")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -108,7 +108,7 @@ func HandleImportFlag(flag string) error {
|
|||||||
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
|
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
|
||||||
// Note: For DB data, calculate and save as kWh
|
// Note: For DB data, calculate and save as kWh
|
||||||
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
|
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
|
||||||
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp)
|
cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp)
|
||||||
// FIXME: Needs sum as stats type
|
// FIXME: Needs sum as stats type
|
||||||
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
|
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
|
||||||
// Energy: Power (in Watts) * Time (in Seconds)
|
// Energy: Power (in Watts) * Time (in Seconds)
|
||||||
@ -120,7 +120,7 @@ func HandleImportFlag(flag string) error {
|
|||||||
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
|
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID)
|
cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
job.EnergyFootprint[fp] = metricEnergy
|
job.EnergyFootprint[fp] = metricEnergy
|
||||||
@ -129,45 +129,45 @@ func HandleImportFlag(flag string) error {
|
|||||||
|
|
||||||
job.Energy = (math.Round(totalEnergy*100.0) / 100.0)
|
job.Energy = (math.Round(totalEnergy*100.0) / 100.0)
|
||||||
if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil {
|
if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil {
|
||||||
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID)
|
cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
job.RawResources, err = json.Marshal(job.Resources)
|
job.RawResources, err = json.Marshal(job.Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while marshaling job resources")
|
cclog.Warn("Error while marshaling job resources")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
job.RawMetaData, err = json.Marshal(job.MetaData)
|
job.RawMetaData, err = json.Marshal(job.MetaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while marshaling job metadata")
|
cclog.Warn("Error while marshaling job metadata")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = SanityChecks(&job); err != nil {
|
if err = SanityChecks(&job); err != nil {
|
||||||
log.Warn("BaseJob SanityChecks failed")
|
cclog.Warn("BaseJob SanityChecks failed")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = archive.GetHandle().ImportJob(&job, &jobData); err != nil {
|
if err = archive.GetHandle().ImportJob(&job, &jobData); err != nil {
|
||||||
log.Error("Error while importing job")
|
cclog.Error("Error while importing job")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := r.InsertJob(&job)
|
id, err := r.InsertJob(&job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while job db insert")
|
cclog.Warn("Error while job db insert")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tag := range job.Tags {
|
for _, tag := range job.Tags {
|
||||||
if err := r.ImportTag(id, tag.Type, tag.Name, tag.Scope); err != nil {
|
if err := r.ImportTag(id, tag.Type, tag.Name, tag.Scope); err != nil {
|
||||||
log.Error("Error while adding or creating tag on import")
|
cclog.Error("Error while adding or creating tag on import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
|
cclog.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package importer_test
|
package importer_test
|
||||||
@ -16,7 +16,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/importer"
|
"github.com/ClusterCockpit/cc-backend/internal/importer"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
func copyFile(s string, d string) error {
|
func copyFile(s string, d string) error {
|
||||||
@ -78,7 +78,7 @@ func setup(t *testing.T) *repository.JobRepository {
|
|||||||
}
|
}
|
||||||
]}`
|
]}`
|
||||||
|
|
||||||
log.Init("info", true)
|
cclog.Init("info", true)
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
|
|
||||||
jobarchive := filepath.Join(tmpdir, "job-archive")
|
jobarchive := filepath.Join(tmpdir, "job-archive")
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package importer
|
package importer
|
||||||
@ -13,8 +13,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -27,15 +27,15 @@ const (
|
|||||||
func InitDB() error {
|
func InitDB() error {
|
||||||
r := repository.GetJobRepository()
|
r := repository.GetJobRepository()
|
||||||
if err := r.Flush(); err != nil {
|
if err := r.Flush(); err != nil {
|
||||||
log.Errorf("repository initDB(): %v", err)
|
cclog.Errorf("repository initDB(): %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
starttime := time.Now()
|
starttime := time.Now()
|
||||||
log.Print("Building job table...")
|
cclog.Print("Building job table...")
|
||||||
|
|
||||||
t, err := r.TransactionInit()
|
t, err := r.TransactionInit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while initializing SQL transactions")
|
cclog.Warn("Error while initializing SQL transactions")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tags := make(map[string]int64)
|
tags := make(map[string]int64)
|
||||||
@ -63,7 +63,7 @@ func InitDB() error {
|
|||||||
|
|
||||||
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
|
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("cannot get subcluster: %s", err.Error())
|
cclog.Errorf("cannot get subcluster: %s", err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,7 +83,7 @@ func InitDB() error {
|
|||||||
|
|
||||||
jobMeta.RawFootprint, err = json.Marshal(jobMeta.Footprint)
|
jobMeta.RawFootprint, err = json.Marshal(jobMeta.Footprint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while marshaling job footprint")
|
cclog.Warn("Error while marshaling job footprint")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ func InitDB() error {
|
|||||||
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
|
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
|
||||||
// Note: For DB data, calculate and save as kWh
|
// Note: For DB data, calculate and save as kWh
|
||||||
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
|
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
|
||||||
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
|
cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
|
||||||
// FIXME: Needs sum as stats type
|
// FIXME: Needs sum as stats type
|
||||||
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
|
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
|
||||||
// Energy: Power (in Watts) * Time (in Seconds)
|
// Energy: Power (in Watts) * Time (in Seconds)
|
||||||
@ -109,7 +109,7 @@ func InitDB() error {
|
|||||||
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
|
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
|
cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
jobMeta.EnergyFootprint[fp] = metricEnergy
|
jobMeta.EnergyFootprint[fp] = metricEnergy
|
||||||
@ -118,26 +118,26 @@ func InitDB() error {
|
|||||||
|
|
||||||
jobMeta.Energy = (math.Round(totalEnergy*100.0) / 100.0)
|
jobMeta.Energy = (math.Round(totalEnergy*100.0) / 100.0)
|
||||||
if jobMeta.RawEnergyFootprint, err = json.Marshal(jobMeta.EnergyFootprint); err != nil {
|
if jobMeta.RawEnergyFootprint, err = json.Marshal(jobMeta.EnergyFootprint); err != nil {
|
||||||
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
jobMeta.RawResources, err = json.Marshal(jobMeta.Resources)
|
jobMeta.RawResources, err = json.Marshal(jobMeta.Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("repository initDB(): %v", err)
|
cclog.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
jobMeta.RawMetaData, err = json.Marshal(jobMeta.MetaData)
|
jobMeta.RawMetaData, err = json.Marshal(jobMeta.MetaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("repository initDB(): %v", err)
|
cclog.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := SanityChecks(jobMeta); err != nil {
|
if err := SanityChecks(jobMeta); err != nil {
|
||||||
log.Errorf("repository initDB(): %v", err)
|
cclog.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -145,7 +145,7 @@ func InitDB() error {
|
|||||||
id, err := r.TransactionAddNamed(t,
|
id, err := r.TransactionAddNamed(t,
|
||||||
repository.NamedJobInsert, jobMeta)
|
repository.NamedJobInsert, jobMeta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("repository initDB(): %v", err)
|
cclog.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -158,7 +158,7 @@ func InitDB() error {
|
|||||||
addTagQuery,
|
addTagQuery,
|
||||||
tag.Name, tag.Type)
|
tag.Name, tag.Type)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error adding tag: %v", err)
|
cclog.Errorf("Error adding tag: %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -176,11 +176,11 @@ func InitDB() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if errorOccured > 0 {
|
if errorOccured > 0 {
|
||||||
log.Warnf("Error in import of %d jobs!", errorOccured)
|
cclog.Warnf("Error in import of %d jobs!", errorOccured)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.TransactionEnd(t)
|
r.TransactionEnd(t)
|
||||||
log.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
|
cclog.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,7 +190,7 @@ func SanityChecks(job *schema.Job) error {
|
|||||||
return fmt.Errorf("no such cluster: %v", job.Cluster)
|
return fmt.Errorf("no such cluster: %v", job.Cluster)
|
||||||
}
|
}
|
||||||
if err := archive.AssignSubCluster(job); err != nil {
|
if err := archive.AssignSubCluster(job); err != nil {
|
||||||
log.Warn("Error while assigning subcluster to job")
|
cclog.Warn("Error while assigning subcluster to job")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !job.State.Valid() {
|
if !job.State.Valid() {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package importer
|
package importer
|
||||||
@ -7,7 +7,7 @@ package importer
|
|||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
ccunits "github.com/ClusterCockpit/cc-units"
|
ccunits "github.com/ClusterCockpit/cc-lib/ccUnits"
|
||||||
)
|
)
|
||||||
|
|
||||||
func getNormalizationFactor(v float64) (float64, int) {
|
func getNormalizationFactor(v float64) (float64, int) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package importer
|
package importer
|
||||||
@ -8,7 +8,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
ccunits "github.com/ClusterCockpit/cc-units"
|
ccunits "github.com/ClusterCockpit/cc-lib/ccUnits"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNormalizeFactor(t *testing.T) {
|
func TestNormalizeFactor(t *testing.T) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package metricDataDispatcher
|
package metricDataDispatcher
|
||||||
@ -14,10 +14,10 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
"github.com/ClusterCockpit/cc-lib/lrucache"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/resampler"
|
"github.com/ClusterCockpit/cc-lib/resampler"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
|
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
|
||||||
@ -68,10 +68,10 @@ func LoadData(job *schema.Job,
|
|||||||
jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution)
|
jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if len(jd) != 0 {
|
if len(jd) != 0 {
|
||||||
log.Warnf("partial error: %s", err.Error())
|
cclog.Warnf("partial error: %s", err.Error())
|
||||||
// return err, 0, 0 // Reactivating will block archiving on one partial error
|
// return err, 0, 0 // Reactivating will block archiving on one partial error
|
||||||
} else {
|
} else {
|
||||||
log.Error("Error while loading job data from metric repository")
|
cclog.Error("Error while loading job data from metric repository")
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -80,15 +80,15 @@ func LoadData(job *schema.Job,
|
|||||||
var jd_temp schema.JobData
|
var jd_temp schema.JobData
|
||||||
jd_temp, err = archive.GetHandle().LoadJobData(job)
|
jd_temp, err = archive.GetHandle().LoadJobData(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while loading job data from archive")
|
cclog.Error("Error while loading job data from archive")
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
//Deep copy the cached archive hashmap
|
// Deep copy the cached archive hashmap
|
||||||
jd = metricdata.DeepCopy(jd_temp)
|
jd = metricdata.DeepCopy(jd_temp)
|
||||||
|
|
||||||
//Resampling for archived data.
|
// Resampling for archived data.
|
||||||
//Pass the resolution from frontend here.
|
// Pass the resolution from frontend here.
|
||||||
for _, v := range jd {
|
for _, v := range jd {
|
||||||
for _, v_ := range v {
|
for _, v_ := range v {
|
||||||
timestep := 0
|
timestep := 0
|
||||||
@ -178,7 +178,7 @@ func LoadData(job *schema.Job,
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err, ok := data.(error); ok {
|
if err, ok := data.(error); ok {
|
||||||
log.Error("Error in returned dataset")
|
cclog.Error("Error in returned dataset")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +203,7 @@ func LoadAverages(
|
|||||||
|
|
||||||
stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion?
|
stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion?
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
|
cclog.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,7 +231,6 @@ func LoadScopedJobStats(
|
|||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
) (schema.ScopedJobStats, error) {
|
) (schema.ScopedJobStats, error) {
|
||||||
|
|
||||||
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
||||||
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
|
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
|
||||||
}
|
}
|
||||||
@ -243,7 +242,7 @@ func LoadScopedJobStats(
|
|||||||
|
|
||||||
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
|
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
|
cclog.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,7 +267,7 @@ func LoadJobStats(
|
|||||||
|
|
||||||
stats, err := repo.LoadStats(job, metrics, ctx)
|
stats, err := repo.LoadStats(job, metrics, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
|
cclog.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
|
||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,9 +317,9 @@ func LoadNodeData(
|
|||||||
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if len(data) != 0 {
|
if len(data) != 0 {
|
||||||
log.Warnf("partial error: %s", err.Error())
|
cclog.Warnf("partial error: %s", err.Error())
|
||||||
} else {
|
} else {
|
||||||
log.Error("Error while loading node data from metric repository")
|
cclog.Error("Error while loading node data from metric repository")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -355,9 +354,9 @@ func LoadNodeListData(
|
|||||||
data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx)
|
data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if len(data) != 0 {
|
if len(data) != 0 {
|
||||||
log.Warnf("partial error: %s", err.Error())
|
cclog.Warnf("partial error: %s", err.Error())
|
||||||
} else {
|
} else {
|
||||||
log.Error("Error while loading node data from metric repository")
|
cclog.Error("Error while loading node data from metric repository")
|
||||||
return nil, totalNodes, hasNextPage, err
|
return nil, totalNodes, hasNextPage, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package metricdata
|
package metricdata
|
||||||
@ -18,8 +18,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CCMetricStoreConfig struct {
|
type CCMetricStoreConfig struct {
|
||||||
@ -82,7 +82,7 @@ type ApiMetricData struct {
|
|||||||
func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
|
func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
|
||||||
var config CCMetricStoreConfig
|
var config CCMetricStoreConfig
|
||||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw json config")
|
cclog.Warn("Error while unmarshaling raw json config")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,13 +129,13 @@ func (ccms *CCMetricStore) doRequest(
|
|||||||
) (*ApiQueryResponse, error) {
|
) (*ApiQueryResponse, error) {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
if err := json.NewEncoder(buf).Encode(body); err != nil {
|
if err := json.NewEncoder(buf).Encode(body); err != nil {
|
||||||
log.Errorf("Error while encoding request body: %s", err.Error())
|
cclog.Errorf("Error while encoding request body: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while building request body: %s", err.Error())
|
cclog.Errorf("Error while building request body: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ccms.jwt != "" {
|
if ccms.jwt != "" {
|
||||||
@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest(
|
|||||||
|
|
||||||
res, err := ccms.client.Do(req)
|
res, err := ccms.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while performing request: %s", err.Error())
|
cclog.Errorf("Error while performing request: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +161,7 @@ func (ccms *CCMetricStore) doRequest(
|
|||||||
|
|
||||||
var resBody ApiQueryResponse
|
var resBody ApiQueryResponse
|
||||||
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
|
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
|
||||||
log.Errorf("Error while decoding result body: %s", err.Error())
|
cclog.Errorf("Error while decoding result body: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +177,7 @@ func (ccms *CCMetricStore) LoadData(
|
|||||||
) (schema.JobData, error) {
|
) (schema.JobData, error) {
|
||||||
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution)
|
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,7 +192,7 @@ func (ccms *CCMetricStore) LoadData(
|
|||||||
|
|
||||||
resBody, err := ccms.doRequest(ctx, &req)
|
resBody, err := ccms.doRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while performing request: %s", err.Error())
|
cclog.Errorf("Error while performing request: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -298,7 +298,7 @@ func (ccms *CCMetricStore) buildQueries(
|
|||||||
mc := archive.GetMetricConfig(job.Cluster, metric)
|
mc := archive.GetMetricConfig(job.Cluster, metric)
|
||||||
if mc == nil {
|
if mc == nil {
|
||||||
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
||||||
log.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
cclog.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -572,7 +572,7 @@ func (ccms *CCMetricStore) LoadStats(
|
|||||||
) (map[string]map[string]schema.MetricStatistics, error) {
|
) (map[string]map[string]schema.MetricStatistics, error) {
|
||||||
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
|
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
|
cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -587,7 +587,7 @@ func (ccms *CCMetricStore) LoadStats(
|
|||||||
|
|
||||||
resBody, err := ccms.doRequest(ctx, &req)
|
resBody, err := ccms.doRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while performing request: %s", err.Error())
|
cclog.Errorf("Error while performing request: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -597,7 +597,7 @@ func (ccms *CCMetricStore) LoadStats(
|
|||||||
metric := ccms.toLocalName(query.Metric)
|
metric := ccms.toLocalName(query.Metric)
|
||||||
data := res[0]
|
data := res[0]
|
||||||
if data.Error != nil {
|
if data.Error != nil {
|
||||||
log.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
|
cclog.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -608,7 +608,7 @@ func (ccms *CCMetricStore) LoadStats(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
|
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
|
||||||
log.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
|
cclog.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -631,7 +631,7 @@ func (ccms *CCMetricStore) LoadScopedStats(
|
|||||||
) (schema.ScopedJobStats, error) {
|
) (schema.ScopedJobStats, error) {
|
||||||
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
|
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -646,7 +646,7 @@ func (ccms *CCMetricStore) LoadScopedStats(
|
|||||||
|
|
||||||
resBody, err := ccms.doRequest(ctx, &req)
|
resBody, err := ccms.doRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while performing request: %s", err.Error())
|
cclog.Errorf("Error while performing request: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -747,7 +747,7 @@ func (ccms *CCMetricStore) LoadNodeData(
|
|||||||
|
|
||||||
resBody, err := ccms.doRequest(ctx, &req)
|
resBody, err := ccms.doRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while performing request: %s", err.Error())
|
cclog.Errorf("Error while performing request: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -863,7 +863,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
|
|||||||
|
|
||||||
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
|
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
|
cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
|
||||||
return nil, totalNodes, hasNextPage, err
|
return nil, totalNodes, hasNextPage, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -878,7 +878,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
|
|||||||
|
|
||||||
resBody, err := ccms.doRequest(ctx, &req)
|
resBody, err := ccms.doRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while performing request: %s", err.Error())
|
cclog.Errorf("Error while performing request: %s", err.Error())
|
||||||
return nil, totalNodes, hasNextPage, err
|
return nil, totalNodes, hasNextPage, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -982,7 +982,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
|
|||||||
if subCluster != "" {
|
if subCluster != "" {
|
||||||
subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster)
|
subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster)
|
||||||
if scterr != nil {
|
if scterr != nil {
|
||||||
log.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error())
|
cclog.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error())
|
||||||
return nil, nil, scterr
|
return nil, nil, scterr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -992,7 +992,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
|
|||||||
mc := archive.GetMetricConfig(cluster, metric)
|
mc := archive.GetMetricConfig(cluster, metric)
|
||||||
if mc == nil {
|
if mc == nil {
|
||||||
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
|
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
|
||||||
log.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster)
|
cclog.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package metricdata
|
package metricdata
|
||||||
@ -12,8 +12,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MetricDataRepository interface {
|
type MetricDataRepository interface {
|
||||||
@ -46,7 +46,7 @@ func Init() error {
|
|||||||
Kind string `json:"kind"`
|
Kind string `json:"kind"`
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
|
if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw json MetricDataRepository")
|
cclog.Warn("Error while unmarshaling raw json MetricDataRepository")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ func Init() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := mdr.Init(cluster.MetricDataRepository); err != nil {
|
if err := mdr.Init(cluster.MetricDataRepository); err != nil {
|
||||||
log.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
|
cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
metricDataRepos[cluster.Name] = mdr
|
metricDataRepos[cluster.Name] = mdr
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) 2022 DKRZ
|
// Copyright (C) 2022 DKRZ
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package metricdata
|
package metricdata
|
||||||
@ -22,8 +22,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
promapi "github.com/prometheus/client_golang/api"
|
promapi "github.com/prometheus/client_golang/api"
|
||||||
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
promcfg "github.com/prometheus/common/config"
|
promcfg "github.com/prometheus/common/config"
|
||||||
@ -160,7 +160,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
|||||||
var config PrometheusDataRepositoryConfig
|
var config PrometheusDataRepositoryConfig
|
||||||
// parse config
|
// parse config
|
||||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw json config")
|
cclog.Warn("Error while unmarshaling raw json config")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// support basic authentication
|
// support basic authentication
|
||||||
@ -179,7 +179,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
|||||||
RoundTripper: rt,
|
RoundTripper: rt,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while initializing new prometheus client")
|
cclog.Error("Error while initializing new prometheus client")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// init query client
|
// init query client
|
||||||
@ -192,9 +192,9 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
|||||||
for metric, templ := range config.Templates {
|
for metric, templ := range config.Templates {
|
||||||
pdb.templates[metric], err = template.New(metric).Parse(templ)
|
pdb.templates[metric], err = template.New(metric).Parse(templ)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
log.Debugf("Added PromQL template for %s: %s", metric, templ)
|
cclog.Debugf("Added PromQL template for %s: %s", metric, templ)
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
|
cclog.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -221,7 +221,7 @@ func (pdb *PrometheusDataRepository) FormatQuery(
|
|||||||
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ))
|
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ))
|
||||||
} else {
|
} else {
|
||||||
query := buf.String()
|
query := buf.String()
|
||||||
log.Debugf("PromQL: %s", query)
|
cclog.Debugf("PromQL: %s", query)
|
||||||
return query, nil
|
return query, nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -285,7 +285,7 @@ func (pdb *PrometheusDataRepository) LoadData(
|
|||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
if scope != schema.MetricScopeNode {
|
if scope != schema.MetricScopeNode {
|
||||||
logOnce.Do(func() {
|
logOnce.Do(func() {
|
||||||
log.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
cclog.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||||
})
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -293,12 +293,12 @@ func (pdb *PrometheusDataRepository) LoadData(
|
|||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
metricConfig := archive.GetMetricConfig(job.Cluster, metric)
|
metricConfig := archive.GetMetricConfig(job.Cluster, metric)
|
||||||
if metricConfig == nil {
|
if metricConfig == nil {
|
||||||
log.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
|
cclog.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
|
||||||
return nil, errors.New("Prometheus config error")
|
return nil, errors.New("Prometheus config error")
|
||||||
}
|
}
|
||||||
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
|
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while formatting prometheus query")
|
cclog.Warn("Error while formatting prometheus query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,11 +310,11 @@ func (pdb *PrometheusDataRepository) LoadData(
|
|||||||
}
|
}
|
||||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
|
cclog.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
|
||||||
return nil, errors.New("Prometheus query error")
|
return nil, errors.New("Prometheus query error")
|
||||||
}
|
}
|
||||||
if len(warnings) > 0 {
|
if len(warnings) > 0 {
|
||||||
log.Warnf("Warnings: %v\n", warnings)
|
cclog.Warnf("Warnings: %v\n", warnings)
|
||||||
}
|
}
|
||||||
|
|
||||||
// init data structures
|
// init data structures
|
||||||
@ -360,7 +360,7 @@ func (pdb *PrometheusDataRepository) LoadStats(
|
|||||||
|
|
||||||
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
|
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while loading job for stats")
|
cclog.Warn("Error while loading job for stats")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for metric, metricData := range data {
|
for metric, metricData := range data {
|
||||||
@ -391,19 +391,19 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
|
|||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
if scope != schema.MetricScopeNode {
|
if scope != schema.MetricScopeNode {
|
||||||
logOnce.Do(func() {
|
logOnce.Do(func() {
|
||||||
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||||
})
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
metricConfig := archive.GetMetricConfig(cluster, metric)
|
metricConfig := archive.GetMetricConfig(cluster, metric)
|
||||||
if metricConfig == nil {
|
if metricConfig == nil {
|
||||||
log.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
|
cclog.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
|
||||||
return nil, errors.New("Prometheus config error")
|
return nil, errors.New("Prometheus config error")
|
||||||
}
|
}
|
||||||
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while formatting prometheus query")
|
cclog.Warn("Error while formatting prometheus query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,11 +415,11 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
|
|||||||
}
|
}
|
||||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
||||||
return nil, errors.New("Prometheus query error")
|
return nil, errors.New("Prometheus query error")
|
||||||
}
|
}
|
||||||
if len(warnings) > 0 {
|
if len(warnings) > 0 {
|
||||||
log.Warnf("Warnings: %v\n", warnings)
|
cclog.Warnf("Warnings: %v\n", warnings)
|
||||||
}
|
}
|
||||||
|
|
||||||
step := int64(metricConfig.Timestep)
|
step := int64(metricConfig.Timestep)
|
||||||
@ -444,7 +444,7 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
t1 := time.Since(t0)
|
t1 := time.Since(t0)
|
||||||
log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
|
cclog.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
|
||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,7 +459,7 @@ func (pdb *PrometheusDataRepository) LoadScopedStats(
|
|||||||
scopedJobStats := make(schema.ScopedJobStats)
|
scopedJobStats := make(schema.ScopedJobStats)
|
||||||
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
|
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while loading job for scopedJobStats")
|
cclog.Warn("Error while loading job for scopedJobStats")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -467,7 +467,7 @@ func (pdb *PrometheusDataRepository) LoadScopedStats(
|
|||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
if scope != schema.MetricScopeNode {
|
if scope != schema.MetricScopeNode {
|
||||||
logOnce.Do(func() {
|
logOnce.Do(func() {
|
||||||
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||||
})
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -563,7 +563,7 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
|
|||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
if scope != schema.MetricScopeNode {
|
if scope != schema.MetricScopeNode {
|
||||||
logOnce.Do(func() {
|
logOnce.Do(func() {
|
||||||
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||||
})
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -571,12 +571,12 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
|
|||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
metricConfig := archive.GetMetricConfig(cluster, metric)
|
metricConfig := archive.GetMetricConfig(cluster, metric)
|
||||||
if metricConfig == nil {
|
if metricConfig == nil {
|
||||||
log.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
|
cclog.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
|
||||||
return nil, totalNodes, hasNextPage, errors.New("Prometheus config error")
|
return nil, totalNodes, hasNextPage, errors.New("Prometheus config error")
|
||||||
}
|
}
|
||||||
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while formatting prometheus query")
|
cclog.Warn("Error while formatting prometheus query")
|
||||||
return nil, totalNodes, hasNextPage, err
|
return nil, totalNodes, hasNextPage, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -588,11 +588,11 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
|
|||||||
}
|
}
|
||||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
||||||
return nil, totalNodes, hasNextPage, errors.New("Prometheus query error")
|
return nil, totalNodes, hasNextPage, errors.New("Prometheus query error")
|
||||||
}
|
}
|
||||||
if len(warnings) > 0 {
|
if len(warnings) > 0 {
|
||||||
log.Warnf("Warnings: %v\n", warnings)
|
cclog.Warnf("Warnings: %v\n", warnings)
|
||||||
}
|
}
|
||||||
|
|
||||||
step := int64(metricConfig.Timestep)
|
step := int64(metricConfig.Timestep)
|
||||||
@ -628,6 +628,6 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
t1 := time.Since(t0)
|
t1 := time.Since(t0)
|
||||||
log.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
|
cclog.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
|
||||||
return data, totalNodes, hasNextPage, nil
|
return data, totalNodes, hasNextPage, nil
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package metricdata
|
package metricdata
|
||||||
@ -10,7 +10,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
|
var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
|
||||||
@ -29,16 +29,16 @@ func (tmdr *TestMetricDataRepository) LoadData(
|
|||||||
metrics []string,
|
metrics []string,
|
||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
resolution int) (schema.JobData, error) {
|
resolution int,
|
||||||
|
) (schema.JobData, error) {
|
||||||
return TestLoadDataCallback(job, metrics, scopes, ctx, resolution)
|
return TestLoadDataCallback(job, metrics, scopes, ctx, resolution)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tmdr *TestMetricDataRepository) LoadStats(
|
func (tmdr *TestMetricDataRepository) LoadStats(
|
||||||
job *schema.Job,
|
job *schema.Job,
|
||||||
metrics []string,
|
metrics []string,
|
||||||
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
ctx context.Context,
|
||||||
|
) (map[string]map[string]schema.MetricStatistics, error) {
|
||||||
panic("TODO")
|
panic("TODO")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,8 +46,8 @@ func (tmdr *TestMetricDataRepository) LoadScopedStats(
|
|||||||
job *schema.Job,
|
job *schema.Job,
|
||||||
metrics []string,
|
metrics []string,
|
||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
ctx context.Context) (schema.ScopedJobStats, error) {
|
ctx context.Context,
|
||||||
|
) (schema.ScopedJobStats, error) {
|
||||||
panic("TODO")
|
panic("TODO")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,8 +56,8 @@ func (tmdr *TestMetricDataRepository) LoadNodeData(
|
|||||||
metrics, nodes []string,
|
metrics, nodes []string,
|
||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
from, to time.Time,
|
from, to time.Time,
|
||||||
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
ctx context.Context,
|
||||||
|
) (map[string]map[string][]*schema.JobMetric, error) {
|
||||||
panic("TODO")
|
panic("TODO")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,7 +70,6 @@ func (tmdr *TestMetricDataRepository) LoadNodeListData(
|
|||||||
page *model.PageRequest,
|
page *model.PageRequest,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
) (map[string]schema.JobData, int, bool, error) {
|
) (map[string]schema.JobData, int, bool, error) {
|
||||||
|
|
||||||
panic("TODO")
|
panic("TODO")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -9,7 +9,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/mattn/go-sqlite3"
|
"github.com/mattn/go-sqlite3"
|
||||||
"github.com/qustavo/sqlhooks/v2"
|
"github.com/qustavo/sqlhooks/v2"
|
||||||
@ -53,7 +53,7 @@ func Connect(driver string, db string) {
|
|||||||
// - Enable foreign key checks
|
// - Enable foreign key checks
|
||||||
opts.URL += "?_journal=WAL&_timeout=5000&_fk=true"
|
opts.URL += "?_journal=WAL&_timeout=5000&_fk=true"
|
||||||
|
|
||||||
if log.Loglevel() == "debug" {
|
if cclog.Loglevel() == "debug" {
|
||||||
sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{}))
|
sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{}))
|
||||||
dbHandle, err = sqlx.Open("sqlite3WithHooks", opts.URL)
|
dbHandle, err = sqlx.Open("sqlite3WithHooks", opts.URL)
|
||||||
} else {
|
} else {
|
||||||
@ -63,11 +63,11 @@ func Connect(driver string, db string) {
|
|||||||
opts.URL += "?multiStatements=true"
|
opts.URL += "?multiStatements=true"
|
||||||
dbHandle, err = sqlx.Open("mysql", opts.URL)
|
dbHandle, err = sqlx.Open("mysql", opts.URL)
|
||||||
default:
|
default:
|
||||||
log.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
|
cclog.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
|
cclog.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
|
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
|
||||||
@ -78,14 +78,14 @@ func Connect(driver string, db string) {
|
|||||||
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
|
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
|
||||||
err = checkDBVersion(driver, dbHandle.DB)
|
err = checkDBVersion(driver, dbHandle.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
|
cclog.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetConnection() *DBConnection {
|
func GetConnection() *DBConnection {
|
||||||
if dbConnInstance == nil {
|
if dbConnInstance == nil {
|
||||||
log.Fatalf("Database connection not initialized!")
|
cclog.Fatalf("Database connection not initialized!")
|
||||||
}
|
}
|
||||||
|
|
||||||
return dbConnInstance
|
return dbConnInstance
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -8,7 +8,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Hooks satisfies the sqlhook.Hooks interface
|
// Hooks satisfies the sqlhook.Hooks interface
|
||||||
@ -16,13 +16,13 @@ type Hooks struct{}
|
|||||||
|
|
||||||
// Before hook will print the query with it's args and return the context with the timestamp
|
// Before hook will print the query with it's args and return the context with the timestamp
|
||||||
func (h *Hooks) Before(ctx context.Context, query string, args ...any) (context.Context, error) {
|
func (h *Hooks) Before(ctx context.Context, query string, args ...any) (context.Context, error) {
|
||||||
log.Debugf("SQL query %s %q", query, args)
|
cclog.Debugf("SQL query %s %q", query, args)
|
||||||
return context.WithValue(ctx, "begin", time.Now()), nil
|
return context.WithValue(ctx, "begin", time.Now()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// After hook will get the timestamp registered on the Before hook and print the elapsed time
|
// After hook will get the timestamp registered on the Before hook and print the elapsed time
|
||||||
func (h *Hooks) After(ctx context.Context, query string, args ...any) (context.Context, error) {
|
func (h *Hooks) After(ctx context.Context, query string, args ...any) (context.Context, error) {
|
||||||
begin := ctx.Value("begin").(time.Time)
|
begin := ctx.Value("begin").(time.Time)
|
||||||
log.Debugf("Took: %s\n", time.Since(begin))
|
cclog.Debugf("Took: %s\n", time.Since(begin))
|
||||||
return ctx, nil
|
return ctx, nil
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -16,9 +16,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
"github.com/ClusterCockpit/cc-lib/lrucache"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
@ -76,18 +76,18 @@ func scanJob(row interface{ Scan(...any) error }) (*schema.Job, error) {
|
|||||||
&job.StartTime, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads,
|
&job.StartTime, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads,
|
||||||
&job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
|
&job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
|
||||||
&job.Duration, &job.Walltime, &job.RawResources, &job.RawFootprint, &job.Energy); err != nil {
|
&job.Duration, &job.Walltime, &job.RawResources, &job.RawFootprint, &job.Energy); err != nil {
|
||||||
log.Warnf("Error while scanning rows (Job): %v", err)
|
cclog.Warnf("Error while scanning rows (Job): %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
|
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw resources json")
|
cclog.Warn("Error while unmarshaling raw resources json")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
job.RawResources = nil
|
job.RawResources = nil
|
||||||
|
|
||||||
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
|
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
|
||||||
log.Warnf("Error while unmarshaling raw footprint json: %v", err)
|
cclog.Warnf("Error while unmarshaling raw footprint json: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
job.RawFootprint = nil
|
job.RawFootprint = nil
|
||||||
@ -109,7 +109,7 @@ func (r *JobRepository) Optimize() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case "mysql":
|
case "mysql":
|
||||||
log.Info("Optimize currently not supported for mysql driver")
|
cclog.Info("Optimize currently not supported for mysql driver")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -160,7 +160,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
|
|||||||
|
|
||||||
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
|
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
|
||||||
log.Warn("Error while scanning for job metadata")
|
cclog.Warn("Error while scanning for job metadata")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,12 +169,12 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw metadata json")
|
cclog.Warn("Error while unmarshaling raw metadata json")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
||||||
log.Debugf("Timer FetchMetadata %s", time.Since(start))
|
cclog.Debugf("Timer FetchMetadata %s", time.Since(start))
|
||||||
return job.MetaData, nil
|
return job.MetaData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,7 +183,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
|
|||||||
r.cache.Del(cachekey)
|
r.cache.Del(cachekey)
|
||||||
if job.MetaData == nil {
|
if job.MetaData == nil {
|
||||||
if _, err = r.FetchMetadata(job); err != nil {
|
if _, err = r.FetchMetadata(job); err != nil {
|
||||||
log.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
|
cclog.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -198,7 +198,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
|
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
|
||||||
log.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
|
cclog.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,7 +206,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
|
|||||||
Set("meta_data", job.RawMetaData).
|
Set("meta_data", job.RawMetaData).
|
||||||
Where("job.id = ?", job.ID).
|
Where("job.id = ?", job.ID).
|
||||||
RunWith(r.stmtCache).Exec(); err != nil {
|
RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
|
cclog.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,7 +219,7 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err
|
|||||||
|
|
||||||
if err := sq.Select("job.footprint").From("job").Where("job.id = ?", job.ID).
|
if err := sq.Select("job.footprint").From("job").Where("job.id = ?", job.ID).
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&job.RawFootprint); err != nil {
|
RunWith(r.stmtCache).QueryRow().Scan(&job.RawFootprint); err != nil {
|
||||||
log.Warn("Error while scanning for job footprint")
|
cclog.Warn("Error while scanning for job footprint")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,11 +228,11 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
|
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw footprint json")
|
cclog.Warn("Error while unmarshaling raw footprint json")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer FetchFootprint %s", time.Since(start))
|
cclog.Debugf("Timer FetchFootprint %s", time.Since(start))
|
||||||
return job.Footprint, nil
|
return job.Footprint, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,7 +246,7 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6
|
|||||||
|
|
||||||
if err := sq.Select("job.energy_footprint").From("job").Where("job.id = ?", job.ID).
|
if err := sq.Select("job.energy_footprint").From("job").Where("job.id = ?", job.ID).
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&job.RawEnergyFootprint); err != nil {
|
RunWith(r.stmtCache).QueryRow().Scan(&job.RawEnergyFootprint); err != nil {
|
||||||
log.Warn("Error while scanning for job energy_footprint")
|
cclog.Warn("Error while scanning for job energy_footprint")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -255,12 +255,12 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(job.RawEnergyFootprint, &job.EnergyFootprint); err != nil {
|
if err := json.Unmarshal(job.RawEnergyFootprint, &job.EnergyFootprint); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw energy footprint json")
|
cclog.Warn("Error while unmarshaling raw energy footprint json")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.cache.Put(cachekey, job.EnergyFootprint, len(job.EnergyFootprint), 24*time.Hour)
|
r.cache.Put(cachekey, job.EnergyFootprint, len(job.EnergyFootprint), 24*time.Hour)
|
||||||
log.Debugf("Timer FetchEnergyFootprint %s", time.Since(start))
|
cclog.Debugf("Timer FetchEnergyFootprint %s", time.Since(start))
|
||||||
return job.EnergyFootprint, nil
|
return job.EnergyFootprint, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,9 +273,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s, _, _ := qd.ToSql()
|
s, _, _ := qd.ToSql()
|
||||||
log.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err)
|
cclog.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err)
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
cclog.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
||||||
}
|
}
|
||||||
return cnt, err
|
return cnt, err
|
||||||
}
|
}
|
||||||
@ -286,9 +286,9 @@ func (r *JobRepository) DeleteJobById(id int64) error {
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s, _, _ := qd.ToSql()
|
s, _, _ := qd.ToSql()
|
||||||
log.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err)
|
cclog.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err)
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("DeleteJobById(%d): Success", id)
|
cclog.Debugf("DeleteJobById(%d): Success", id)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -351,7 +351,7 @@ func (r *JobRepository) FindColumnValue(user *schema.User, searchterm string, ta
|
|||||||
}
|
}
|
||||||
return "", ErrNotFound
|
return "", ErrNotFound
|
||||||
} else {
|
} else {
|
||||||
log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
||||||
return "", ErrForbidden
|
return "", ErrForbidden
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -370,7 +370,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
|
|||||||
err := rows.Scan(&result)
|
err := rows.Scan(&result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
log.Warnf("Error while scanning rows: %v", err)
|
cclog.Warnf("Error while scanning rows: %v", err)
|
||||||
return emptyResult, err
|
return emptyResult, err
|
||||||
}
|
}
|
||||||
results = append(results, result)
|
results = append(results, result)
|
||||||
@ -380,7 +380,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
|
|||||||
return emptyResult, ErrNotFound
|
return emptyResult, ErrNotFound
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
||||||
return emptyResult, ErrForbidden
|
return emptyResult, ErrForbidden
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -399,7 +399,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debugf("Timer Partitions %s", time.Since(start))
|
cclog.Debugf("Timer Partitions %s", time.Since(start))
|
||||||
return partitions.([]string), nil
|
return partitions.([]string), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,7 +413,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
|||||||
Where("job.cluster = ?", cluster).
|
Where("job.cluster = ?", cluster).
|
||||||
RunWith(r.stmtCache).Query()
|
RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while running query")
|
cclog.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,11 +424,11 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
|||||||
var resources []*schema.Resource
|
var resources []*schema.Resource
|
||||||
var subcluster string
|
var subcluster string
|
||||||
if err := rows.Scan(&raw, &subcluster); err != nil {
|
if err := rows.Scan(&raw, &subcluster); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(raw, &resources); err != nil {
|
if err := json.Unmarshal(raw, &resources); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw resources json")
|
cclog.Warn("Error while unmarshaling raw resources json")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -443,7 +443,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer AllocatedNodes %s", time.Since(start))
|
cclog.Debugf("Timer AllocatedNodes %s", time.Since(start))
|
||||||
return subclusters, nil
|
return subclusters, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,20 +459,20 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
|||||||
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
|
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
|
||||||
RunWith(r.DB).Exec()
|
RunWith(r.DB).Exec()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while stopping jobs exceeding walltime")
|
cclog.Warn("Error while stopping jobs exceeding walltime")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rowsAffected, err := res.RowsAffected()
|
rowsAffected, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
|
cclog.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if rowsAffected > 0 {
|
if rowsAffected > 0 {
|
||||||
log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
|
cclog.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
|
||||||
}
|
}
|
||||||
log.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
|
cclog.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -482,7 +482,7 @@ func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) {
|
|||||||
Where(sq.Eq{"jobtag.tag_id": tagId}).Distinct()
|
Where(sq.Eq{"jobtag.tag_id": tagId}).Distinct()
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while running query")
|
cclog.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jobIds := make([]int64, 0, 100)
|
jobIds := make([]int64, 0, 100)
|
||||||
@ -492,7 +492,7 @@ func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) {
|
|||||||
|
|
||||||
if err := rows.Scan(&jobId); err != nil {
|
if err := rows.Scan(&jobId); err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -511,7 +511,7 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
|
|||||||
|
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while running query")
|
cclog.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -520,13 +520,13 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
|
|||||||
job, err := scanJob(rows)
|
job, err := scanJob(rows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jobs = append(jobs, job)
|
jobs = append(jobs, job)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Return job count %d", len(jobs))
|
cclog.Infof("Return job count %d", len(jobs))
|
||||||
return jobs, nil
|
return jobs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -551,18 +551,18 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
|
|||||||
}
|
}
|
||||||
|
|
||||||
if startTimeBegin == 0 {
|
if startTimeBegin == 0 {
|
||||||
log.Infof("Find jobs before %d", startTimeEnd)
|
cclog.Infof("Find jobs before %d", startTimeEnd)
|
||||||
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
|
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
|
||||||
"job.start_time < %d", startTimeEnd))
|
"job.start_time < %d", startTimeEnd))
|
||||||
} else {
|
} else {
|
||||||
log.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
|
cclog.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
|
||||||
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
|
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
|
||||||
"job.start_time BETWEEN %d AND %d", startTimeBegin, startTimeEnd))
|
"job.start_time BETWEEN %d AND %d", startTimeBegin, startTimeEnd))
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while running query")
|
cclog.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -571,13 +571,13 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
|
|||||||
job, err := scanJob(rows)
|
job, err := scanJob(rows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jobs = append(jobs, job)
|
jobs = append(jobs, job)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Return job count %d", len(jobs))
|
cclog.Infof("Return job count %d", len(jobs))
|
||||||
return jobs, nil
|
return jobs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,7 +612,7 @@ func (r *JobRepository) UpdateEnergy(
|
|||||||
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
|
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
|
||||||
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
|
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("cannot get subcluster: %s", err.Error())
|
cclog.Errorf("cannot get subcluster: %s", err.Error())
|
||||||
return stmt, err
|
return stmt, err
|
||||||
}
|
}
|
||||||
energyFootprint := make(map[string]float64)
|
energyFootprint := make(map[string]float64)
|
||||||
@ -625,7 +625,7 @@ func (r *JobRepository) UpdateEnergy(
|
|||||||
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
|
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
|
||||||
// Note: For DB data, calculate and save as kWh
|
// Note: For DB data, calculate and save as kWh
|
||||||
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh)
|
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh)
|
||||||
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
|
cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
|
||||||
// FIXME: Needs sum as stats type
|
// FIXME: Needs sum as stats type
|
||||||
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
|
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
|
||||||
// Energy: Power (in Watts) * Time (in Seconds)
|
// Energy: Power (in Watts) * Time (in Seconds)
|
||||||
@ -637,18 +637,18 @@ func (r *JobRepository) UpdateEnergy(
|
|||||||
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
|
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
|
cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
energyFootprint[fp] = metricEnergy
|
energyFootprint[fp] = metricEnergy
|
||||||
totalEnergy += metricEnergy
|
totalEnergy += metricEnergy
|
||||||
|
|
||||||
// log.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
|
// cclog.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
|
||||||
}
|
}
|
||||||
|
|
||||||
var rawFootprint []byte
|
var rawFootprint []byte
|
||||||
if rawFootprint, err = json.Marshal(energyFootprint); err != nil {
|
if rawFootprint, err = json.Marshal(energyFootprint); err != nil {
|
||||||
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
||||||
return stmt, err
|
return stmt, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -662,7 +662,7 @@ func (r *JobRepository) UpdateFootprint(
|
|||||||
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
|
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
|
||||||
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
|
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("cannot get subcluster: %s", err.Error())
|
cclog.Errorf("cannot get subcluster: %s", err.Error())
|
||||||
return stmt, err
|
return stmt, err
|
||||||
}
|
}
|
||||||
footprint := make(map[string]float64)
|
footprint := make(map[string]float64)
|
||||||
@ -676,7 +676,7 @@ func (r *JobRepository) UpdateFootprint(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if statType != "avg" && statType != "min" && statType != "max" {
|
if statType != "avg" && statType != "min" && statType != "max" {
|
||||||
log.Warnf("unknown statType for footprint update: %s", statType)
|
cclog.Warnf("unknown statType for footprint update: %s", statType)
|
||||||
return stmt, fmt.Errorf("unknown statType for footprint update: %s", statType)
|
return stmt, fmt.Errorf("unknown statType for footprint update: %s", statType)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -690,7 +690,7 @@ func (r *JobRepository) UpdateFootprint(
|
|||||||
|
|
||||||
var rawFootprint []byte
|
var rawFootprint []byte
|
||||||
if rawFootprint, err = json.Marshal(footprint); err != nil {
|
if rawFootprint, err = json.Marshal(footprint); err != nil {
|
||||||
log.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
cclog.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
||||||
return stmt, err
|
return stmt, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -8,8 +8,8 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -34,12 +34,12 @@ func (r *JobRepository) InsertJob(job *schema.Job) (int64, error) {
|
|||||||
res, err := r.DB.NamedExec(NamedJobCacheInsert, job)
|
res, err := r.DB.NamedExec(NamedJobCacheInsert, job)
|
||||||
r.Mutex.Unlock()
|
r.Mutex.Unlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while NamedJobInsert")
|
cclog.Warn("Error while NamedJobInsert")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
id, err := res.LastInsertId()
|
id, err := res.LastInsertId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting last insert ID")
|
cclog.Warn("Error while getting last insert ID")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
|
|||||||
|
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while running query %v", err)
|
cclog.Errorf("Error while running query %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
|
|||||||
job, err := scanJob(rows)
|
job, err := scanJob(rows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jobs = append(jobs, job)
|
jobs = append(jobs, job)
|
||||||
@ -72,13 +72,13 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
|
|||||||
_, err = r.DB.Exec(
|
_, err = r.DB.Exec(
|
||||||
"INSERT INTO job (job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data) SELECT job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data FROM job_cache")
|
"INSERT INTO job (job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data) SELECT job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data FROM job_cache")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while Job sync: %v", err)
|
cclog.Warnf("Error while Job sync: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = r.DB.Exec("DELETE FROM job_cache")
|
_, err = r.DB.Exec("DELETE FROM job_cache")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while Job cache clean: %v", err)
|
cclog.Warnf("Error while Job cache clean: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -11,8 +11,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ func (r *JobRepository) Find(
|
|||||||
|
|
||||||
q = q.OrderBy("job.id DESC") // always use newest matching job by db id if more than one match
|
q = q.OrderBy("job.id DESC") // always use newest matching job by db id if more than one match
|
||||||
|
|
||||||
log.Debugf("Timer Find %s", time.Since(start))
|
cclog.Debugf("Timer Find %s", time.Since(start))
|
||||||
return scanJob(q.RunWith(r.stmtCache).QueryRow())
|
return scanJob(q.RunWith(r.stmtCache).QueryRow())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ func (r *JobRepository) FindAll(
|
|||||||
|
|
||||||
rows, err := q.RunWith(r.stmtCache).Query()
|
rows, err := q.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while running query")
|
cclog.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,12 +94,12 @@ func (r *JobRepository) FindAll(
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
job, err := scanJob(rows)
|
job, err := scanJob(rows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jobs = append(jobs, job)
|
jobs = append(jobs, job)
|
||||||
}
|
}
|
||||||
log.Debugf("Timer FindAll %s", time.Since(start))
|
cclog.Debugf("Timer FindAll %s", time.Since(start))
|
||||||
return jobs, nil
|
return jobs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,7 +112,7 @@ func (r *JobRepository) GetJobList() ([]int64, error) {
|
|||||||
|
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while running query")
|
cclog.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,13 +122,13 @@ func (r *JobRepository) GetJobList() ([]int64, error) {
|
|||||||
err := rows.Scan(&id)
|
err := rows.Scan(&id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jl = append(jl, id)
|
jl = append(jl, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Return job count %d", len(jl))
|
cclog.Infof("Return job count %d", len(jl))
|
||||||
return jl, nil
|
return jl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,7 +253,7 @@ func (r *JobRepository) FindConcurrentJobs(
|
|||||||
|
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while running query: %v", err)
|
cclog.Errorf("Error while running query: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -264,7 +264,7 @@ func (r *JobRepository) FindConcurrentJobs(
|
|||||||
var id, jobId, startTime sql.NullInt64
|
var id, jobId, startTime sql.NullInt64
|
||||||
|
|
||||||
if err = rows.Scan(&id, &jobId, &startTime); err != nil {
|
if err = rows.Scan(&id, &jobId, &startTime); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,7 +280,7 @@ func (r *JobRepository) FindConcurrentJobs(
|
|||||||
|
|
||||||
rows, err = queryRunning.RunWith(r.stmtCache).Query()
|
rows, err = queryRunning.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while running query: %v", err)
|
cclog.Errorf("Error while running query: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,7 +288,7 @@ func (r *JobRepository) FindConcurrentJobs(
|
|||||||
var id, jobId, startTime sql.NullInt64
|
var id, jobId, startTime sql.NullInt64
|
||||||
|
|
||||||
if err := rows.Scan(&id, &jobId, &startTime); err != nil {
|
if err := rows.Scan(&id, &jobId, &startTime); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -7,7 +7,7 @@ package repository
|
|||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
type JobHook interface {
|
type JobHook interface {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -13,8 +13,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ func (r *JobRepository) QueryJobs(
|
|||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
queryString, queryVars, _ := query.ToSql()
|
queryString, queryVars, _ := query.ToSql()
|
||||||
log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,7 +77,7 @@ func (r *JobRepository) QueryJobs(
|
|||||||
job, err := scanJob(rows)
|
job, err := scanJob(rows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
log.Warn("Error while scanning rows (Jobs)")
|
cclog.Warn("Error while scanning rows (Jobs)")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jobs = append(jobs, job)
|
jobs = append(jobs, job)
|
||||||
@ -123,7 +123,7 @@ func SecurityCheckWithUser(user *schema.User, query sq.SelectBuilder) (sq.Select
|
|||||||
if len(user.Projects) != 0 {
|
if len(user.Projects) != 0 {
|
||||||
return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.hpc_user": user.Username}}), nil
|
return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.hpc_user": user.Username}}), nil
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
|
cclog.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
|
||||||
return query.Where("job.hpc_user = ?", user.Username), nil
|
return query.Where("job.hpc_user = ?", user.Username), nil
|
||||||
}
|
}
|
||||||
case user.HasRole(schema.RoleUser): // User : Only personal jobs
|
case user.HasRole(schema.RoleUser): // User : Only personal jobs
|
||||||
@ -244,7 +244,7 @@ func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBui
|
|||||||
case "last30d":
|
case "last30d":
|
||||||
then = now - (60 * 60 * 24 * 30)
|
then = now - (60 * 60 * 24 * 30)
|
||||||
default:
|
default:
|
||||||
log.Debugf("No known named timeRange: startTime.range = %s", cond.Range)
|
cclog.Debugf("No known named timeRange: startTime.range = %s", cond.Range)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
return query.Where(field+" BETWEEN ? AND ?", then, now)
|
return query.Where(field+" BETWEEN ? AND ?", then, now)
|
||||||
@ -335,7 +335,7 @@ var (
|
|||||||
func toSnakeCase(str string) string {
|
func toSnakeCase(str string) string {
|
||||||
for _, c := range str {
|
for _, c := range str {
|
||||||
if c == '\'' || c == '\\' {
|
if c == '\'' || c == '\\' {
|
||||||
log.Panic("toSnakeCase() attack vector!")
|
cclog.Panic("toSnakeCase() attack vector!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -9,7 +9,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
_ "github.com/mattn/go-sqlite3"
|
_ "github.com/mattn/go-sqlite3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -9,7 +9,7 @@ import (
|
|||||||
"embed"
|
"embed"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/golang-migrate/migrate/v4"
|
"github.com/golang-migrate/migrate/v4"
|
||||||
"github.com/golang-migrate/migrate/v4/database/mysql"
|
"github.com/golang-migrate/migrate/v4/database/mysql"
|
||||||
"github.com/golang-migrate/migrate/v4/database/sqlite3"
|
"github.com/golang-migrate/migrate/v4/database/sqlite3"
|
||||||
@ -54,13 +54,13 @@ func checkDBVersion(backend string, db *sql.DB) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||||
}
|
}
|
||||||
|
|
||||||
v, dirty, err := m.Version()
|
v, dirty, err := m.Version()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == migrate.ErrNilVersion {
|
if err == migrate.ErrNilVersion {
|
||||||
log.Warn("Legacy database without version or missing database file!")
|
cclog.Warn("Legacy database without version or missing database file!")
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -84,7 +84,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
|
|||||||
case "sqlite3":
|
case "sqlite3":
|
||||||
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
|
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
cclog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
|
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
|
||||||
@ -102,7 +102,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
|
|||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
@ -117,14 +117,14 @@ func MigrateDB(backend string, db string) error {
|
|||||||
v, dirty, err := m.Version()
|
v, dirty, err := m.Version()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == migrate.ErrNilVersion {
|
if err == migrate.ErrNilVersion {
|
||||||
log.Warn("Legacy database without version or missing database file!")
|
cclog.Warn("Legacy database without version or missing database file!")
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if v < Version {
|
if v < Version {
|
||||||
log.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
|
cclog.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
if dirty {
|
if dirty {
|
||||||
@ -133,7 +133,7 @@ func MigrateDB(backend string, db string) error {
|
|||||||
|
|
||||||
if err := m.Up(); err != nil {
|
if err := m.Up(); err != nil {
|
||||||
if err == migrate.ErrNoChange {
|
if err == migrate.ErrNoChange {
|
||||||
log.Info("DB already up to date!")
|
cclog.Info("DB already up to date!")
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -151,7 +151,7 @@ func RevertDB(backend string, db string) error {
|
|||||||
|
|
||||||
if err := m.Migrate(Version - 1); err != nil {
|
if err := m.Migrate(Version - 1); err != nil {
|
||||||
if err == migrate.ErrNoChange {
|
if err == migrate.ErrNoChange {
|
||||||
log.Info("DB already up to date!")
|
cclog.Info("DB already up to date!")
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -15,9 +15,9 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
"github.com/ClusterCockpit/cc-lib/lrucache"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
@ -59,7 +59,7 @@ func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, er
|
|||||||
|
|
||||||
if err := sq.Select("node.meta_data").From("node").Where("node.id = ?", node.ID).
|
if err := sq.Select("node.meta_data").From("node").Where("node.id = ?", node.ID).
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&node.RawMetaData); err != nil {
|
RunWith(r.stmtCache).QueryRow().Scan(&node.RawMetaData); err != nil {
|
||||||
log.Warn("Error while scanning for node metadata")
|
cclog.Warn("Error while scanning for node metadata")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,12 +68,12 @@ func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(node.RawMetaData, &node.MetaData); err != nil {
|
if err := json.Unmarshal(node.RawMetaData, &node.MetaData); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw metadata json")
|
cclog.Warn("Error while unmarshaling raw metadata json")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.cache.Put(cachekey, node.MetaData, len(node.RawMetaData), 24*time.Hour)
|
r.cache.Put(cachekey, node.MetaData, len(node.RawMetaData), 24*time.Hour)
|
||||||
log.Debugf("Timer FetchMetadata %s", time.Since(start))
|
cclog.Debugf("Timer FetchMetadata %s", time.Since(start))
|
||||||
return node.MetaData, nil
|
return node.MetaData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
|
|||||||
r.cache.Del(cachekey)
|
r.cache.Del(cachekey)
|
||||||
if node.MetaData == nil {
|
if node.MetaData == nil {
|
||||||
if _, err = r.FetchMetadata(node); err != nil {
|
if _, err = r.FetchMetadata(node); err != nil {
|
||||||
log.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID)
|
cclog.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -97,7 +97,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
|
|||||||
}
|
}
|
||||||
|
|
||||||
if node.RawMetaData, err = json.Marshal(node.MetaData); err != nil {
|
if node.RawMetaData, err = json.Marshal(node.MetaData); err != nil {
|
||||||
log.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID)
|
cclog.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
|
|||||||
Set("meta_data", node.RawMetaData).
|
Set("meta_data", node.RawMetaData).
|
||||||
Where("node.id = ?", node.ID).
|
Where("node.id = ?", node.ID).
|
||||||
RunWith(r.stmtCache).Exec(); err != nil {
|
RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
log.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID)
|
cclog.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error)
|
|||||||
Where("node.id = ?", id).RunWith(r.DB).
|
Where("node.id = ?", id).RunWith(r.DB).
|
||||||
QueryRow().Scan(&node.ID, &node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState,
|
QueryRow().Scan(&node.ID, &node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState,
|
||||||
&node.HealthState); err != nil {
|
&node.HealthState); err != nil {
|
||||||
log.Warnf("Error while querying node '%v' from database", id)
|
cclog.Warnf("Error while querying node '%v' from database", id)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error)
|
|||||||
var err error
|
var err error
|
||||||
var meta map[string]string
|
var meta map[string]string
|
||||||
if meta, err = r.FetchMetadata(node); err != nil {
|
if meta, err = r.FetchMetadata(node); err != nil {
|
||||||
log.Warnf("Error while fetching metadata for node '%v'", id)
|
cclog.Warnf("Error while fetching metadata for node '%v'", id)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
node.MetaData = meta
|
node.MetaData = meta
|
||||||
@ -146,12 +146,12 @@ func (r *NodeRepository) AddNode(node *schema.Node) (int64, error) {
|
|||||||
|
|
||||||
res, err := r.DB.NamedExec(NamedNodeInsert, node)
|
res, err := r.DB.NamedExec(NamedNodeInsert, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while adding node '%v' to database", node.Hostname)
|
cclog.Errorf("Error while adding node '%v' to database", node.Hostname)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
node.ID, err = res.LastInsertId()
|
node.ID, err = res.LastInsertId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname)
|
cclog.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,7 +166,7 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
|
|||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
subcluster, err := archive.GetSubClusterByNode(cluster, hostname)
|
subcluster, err := archive.GetSubClusterByNode(cluster, hostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err)
|
cclog.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
node := schema.Node{
|
node := schema.Node{
|
||||||
@ -175,29 +175,29 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
|
|||||||
}
|
}
|
||||||
_, err = r.AddNode(&node)
|
_, err = r.AddNode(&node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while adding node '%s' to database: %v", hostname, err)
|
cclog.Errorf("Error while adding node '%s' to database: %v", hostname, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Added node '%s' to database", hostname)
|
cclog.Infof("Added node '%s' to database", hostname)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("Error while querying node '%v' from database", id)
|
cclog.Warnf("Error while querying node '%v' from database", id)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := sq.Update("node").Set("node_state", nodeState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
|
if _, err := sq.Update("node").Set("node_state", nodeState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
|
||||||
log.Errorf("error while updating node '%s'", hostname)
|
cclog.Errorf("error while updating node '%s'", hostname)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof("Updated node '%s' in database", hostname)
|
cclog.Infof("Updated node '%s' in database", hostname)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// func (r *NodeRepository) UpdateHealthState(hostname string, healthState *schema.MonitoringState) error {
|
// func (r *NodeRepository) UpdateHealthState(hostname string, healthState *schema.MonitoringState) error {
|
||||||
// if _, err := sq.Update("node").Set("health_state", healthState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
|
// if _, err := sq.Update("node").Set("health_state", healthState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
|
||||||
// log.Errorf("error while updating node '%d'", id)
|
// cclog.Errorf("error while updating node '%d'", id)
|
||||||
// return err
|
// return err
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
@ -207,10 +207,10 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
|
|||||||
func (r *NodeRepository) DeleteNode(id int64) error {
|
func (r *NodeRepository) DeleteNode(id int64) error {
|
||||||
_, err := r.DB.Exec(`DELETE FROM node WHERE node.id = ?`, id)
|
_, err := r.DB.Exec(`DELETE FROM node WHERE node.id = ?`, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while deleting node '%d' from DB", id)
|
cclog.Errorf("Error while deleting node '%d' from DB", id)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof("deleted node '%d' from DB", id)
|
cclog.Infof("deleted node '%d' from DB", id)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -243,7 +243,7 @@ func (r *NodeRepository) QueryNodes(
|
|||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
queryString, queryVars, _ := query.ToSql()
|
queryString, queryVars, _ := query.ToSql()
|
||||||
log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -254,7 +254,7 @@ func (r *NodeRepository) QueryNodes(
|
|||||||
if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster,
|
if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster,
|
||||||
&node.NodeState, &node.HealthState); err != nil {
|
&node.NodeState, &node.HealthState); err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
log.Warn("Error while scanning rows (Nodes)")
|
cclog.Warn("Error while scanning rows (Nodes)")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nodes = append(nodes, &node)
|
nodes = append(nodes, &node)
|
||||||
@ -269,7 +269,7 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
|
|||||||
|
|
||||||
rows, err := q.RunWith(r.DB).Query()
|
rows, err := q.RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying user list")
|
cclog.Warn("Error while querying user list")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nodeList := make([]*schema.Node, 0, 100)
|
nodeList := make([]*schema.Node, 0, 100)
|
||||||
@ -278,7 +278,7 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
|
|||||||
node := &schema.Node{}
|
node := &schema.Node{}
|
||||||
if err := rows.Scan(&node.Hostname, &node.Cluster,
|
if err := rows.Scan(&node.Hostname, &node.Cluster,
|
||||||
&node.SubCluster, &node.NodeState, &node.HealthState); err != nil {
|
&node.SubCluster, &node.NodeState, &node.HealthState); err != nil {
|
||||||
log.Warn("Error while scanning node list")
|
cclog.Warn("Error while scanning node list")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -9,8 +9,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
_ "github.com/mattn/go-sqlite3"
|
_ "github.com/mattn/go-sqlite3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ func BenchmarkDB_FindJobById(b *testing.B) {
|
|||||||
func BenchmarkDB_FindJob(b *testing.B) {
|
func BenchmarkDB_FindJob(b *testing.B) {
|
||||||
var jobId int64 = 107266
|
var jobId int64 = 107266
|
||||||
var startTime int64 = 1657557241
|
var startTime int64 = 1657557241
|
||||||
var cluster = "fritz"
|
cluster := "fritz"
|
||||||
|
|
||||||
b.Run("FindJob", func(b *testing.B) {
|
b.Run("FindJob", func(b *testing.B) {
|
||||||
db := setup(b)
|
db := setup(b)
|
||||||
@ -147,7 +147,7 @@ func getContext(tb testing.TB) context.Context {
|
|||||||
|
|
||||||
func setup(tb testing.TB) *JobRepository {
|
func setup(tb testing.TB) *JobRepository {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
log.Init("warn", true)
|
cclog.Init("warn", true)
|
||||||
dbfile := "testdata/job.db"
|
dbfile := "testdata/job.db"
|
||||||
err := MigrateDB("sqlite3", dbfile)
|
err := MigrateDB("sqlite3", dbfile)
|
||||||
noErr(tb, err)
|
noErr(tb, err)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -14,8 +14,8 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -158,7 +158,7 @@ func (r *JobRepository) JobsStatsGrouped(
|
|||||||
|
|
||||||
rows, err := query.RunWith(r.DB).Query()
|
rows, err := query.RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying DB for job statistics")
|
cclog.Warn("Error while querying DB for job statistics")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,7 +169,7 @@ func (r *JobRepository) JobsStatsGrouped(
|
|||||||
var name sql.NullString
|
var name sql.NullString
|
||||||
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
|
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
|
||||||
if err := rows.Scan(&id, &jobs, &name, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
|
if err := rows.Scan(&id, &jobs, &name, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -241,7 +241,7 @@ func (r *JobRepository) JobsStatsGrouped(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
|
cclog.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,7 +261,7 @@ func (r *JobRepository) JobsStats(
|
|||||||
|
|
||||||
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
|
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
|
||||||
if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
|
if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,7 +287,7 @@ func (r *JobRepository) JobsStats(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer JobStats %s", time.Since(start))
|
cclog.Debugf("Timer JobStats %s", time.Since(start))
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,7 +301,7 @@ func LoadJobStat(job *schema.Job, metric string, statType string) float64 {
|
|||||||
case "min":
|
case "min":
|
||||||
return stats.Min
|
return stats.Min
|
||||||
default:
|
default:
|
||||||
log.Errorf("Unknown stat type %s", statType)
|
cclog.Errorf("Unknown stat type %s", statType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,7 +322,7 @@ func (r *JobRepository) JobCountGrouped(
|
|||||||
}
|
}
|
||||||
rows, err := query.RunWith(r.DB).Query()
|
rows, err := query.RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying DB for job statistics")
|
cclog.Warn("Error while querying DB for job statistics")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -332,7 +332,7 @@ func (r *JobRepository) JobCountGrouped(
|
|||||||
var id sql.NullString
|
var id sql.NullString
|
||||||
var cnt sql.NullInt64
|
var cnt sql.NullInt64
|
||||||
if err := rows.Scan(&id, &cnt); err != nil {
|
if err := rows.Scan(&id, &cnt); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if id.Valid {
|
if id.Valid {
|
||||||
@ -344,7 +344,7 @@ func (r *JobRepository) JobCountGrouped(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer JobCountGrouped %s", time.Since(start))
|
cclog.Debugf("Timer JobCountGrouped %s", time.Since(start))
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,7 +364,7 @@ func (r *JobRepository) AddJobCountGrouped(
|
|||||||
}
|
}
|
||||||
rows, err := query.RunWith(r.DB).Query()
|
rows, err := query.RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying DB for job statistics")
|
cclog.Warn("Error while querying DB for job statistics")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,7 +374,7 @@ func (r *JobRepository) AddJobCountGrouped(
|
|||||||
var id sql.NullString
|
var id sql.NullString
|
||||||
var cnt sql.NullInt64
|
var cnt sql.NullInt64
|
||||||
if err := rows.Scan(&id, &cnt); err != nil {
|
if err := rows.Scan(&id, &cnt); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if id.Valid {
|
if id.Valid {
|
||||||
@ -393,7 +393,7 @@ func (r *JobRepository) AddJobCountGrouped(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
|
cclog.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -411,7 +411,7 @@ func (r *JobRepository) AddJobCount(
|
|||||||
}
|
}
|
||||||
rows, err := query.RunWith(r.DB).Query()
|
rows, err := query.RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying DB for job statistics")
|
cclog.Warn("Error while querying DB for job statistics")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -420,7 +420,7 @@ func (r *JobRepository) AddJobCount(
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var cnt sql.NullInt64
|
var cnt sql.NullInt64
|
||||||
if err := rows.Scan(&cnt); err != nil {
|
if err := rows.Scan(&cnt); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -438,7 +438,7 @@ func (r *JobRepository) AddJobCount(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer AddJobCount %s", time.Since(start))
|
cclog.Debugf("Timer AddJobCount %s", time.Since(start))
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -479,29 +479,29 @@ func (r *JobRepository) AddHistograms(
|
|||||||
value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType)
|
value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType)
|
||||||
stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount)
|
stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while loading job statistics histogram: job duration")
|
cclog.Warn("Error while loading job statistics histogram: job duration")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter)
|
stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while loading job statistics histogram: num nodes")
|
cclog.Warn("Error while loading job statistics histogram: num nodes")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
stat.HistNumCores, err = r.jobsStatisticsHistogram(ctx, "job.num_hwthreads as value", filter)
|
stat.HistNumCores, err = r.jobsStatisticsHistogram(ctx, "job.num_hwthreads as value", filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while loading job statistics histogram: num hwthreads")
|
cclog.Warn("Error while loading job statistics histogram: num hwthreads")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
stat.HistNumAccs, err = r.jobsStatisticsHistogram(ctx, "job.num_acc as value", filter)
|
stat.HistNumAccs, err = r.jobsStatisticsHistogram(ctx, "job.num_acc as value", filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while loading job statistics histogram: num acc")
|
cclog.Warn("Error while loading job statistics histogram: num acc")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer AddHistograms %s", time.Since(start))
|
cclog.Debugf("Timer AddHistograms %s", time.Since(start))
|
||||||
return stat, nil
|
return stat, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -520,7 +520,7 @@ func (r *JobRepository) AddMetricHistograms(
|
|||||||
if f.State != nil {
|
if f.State != nil {
|
||||||
if len(f.State) == 1 && f.State[0] == "running" {
|
if len(f.State) == 1 && f.State[0] == "running" {
|
||||||
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount)
|
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount)
|
||||||
log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
|
cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start))
|
||||||
return stat, nil
|
return stat, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -530,13 +530,13 @@ func (r *JobRepository) AddMetricHistograms(
|
|||||||
for _, m := range metrics {
|
for _, m := range metrics {
|
||||||
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount)
|
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while loading job metric statistics histogram: %s", m)
|
cclog.Warnf("Error while loading job metric statistics histogram: %s", m)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
stat.HistMetrics = append(stat.HistMetrics, metricHisto)
|
stat.HistMetrics = append(stat.HistMetrics, metricHisto)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
|
cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start))
|
||||||
return stat, nil
|
return stat, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -560,7 +560,7 @@ func (r *JobRepository) jobsStatisticsHistogram(
|
|||||||
|
|
||||||
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
|
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while running query")
|
cclog.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -569,13 +569,13 @@ func (r *JobRepository) jobsStatisticsHistogram(
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
point := model.HistoPoint{}
|
point := model.HistoPoint{}
|
||||||
if err := rows.Scan(&point.Value, &point.Count); err != nil {
|
if err := rows.Scan(&point.Value, &point.Count); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
points = append(points, &point)
|
points = append(points, &point)
|
||||||
}
|
}
|
||||||
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||||
return points, nil
|
return points, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -607,7 +607,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
|
|||||||
|
|
||||||
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
|
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while running query")
|
cclog.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -615,7 +615,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
point := model.HistoPoint{}
|
point := model.HistoPoint{}
|
||||||
if err := rows.Scan(&point.Value, &point.Count); err != nil {
|
if err := rows.Scan(&point.Value, &point.Count); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -630,7 +630,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||||
return points, nil
|
return points, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -652,7 +652,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
|||||||
peak = metricConfig.Peak
|
peak = metricConfig.Peak
|
||||||
unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base
|
unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base
|
||||||
footprintStat = metricConfig.Footprint
|
footprintStat = metricConfig.Footprint
|
||||||
log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
|
cclog.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -674,7 +674,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
|
// cclog.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
|
||||||
// Make bins, see https://jereze.com/code/sql-histogram/ (Modified here)
|
// Make bins, see https://jereze.com/code/sql-histogram/ (Modified here)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
@ -709,7 +709,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
|||||||
|
|
||||||
rows, err := mainQuery.RunWith(r.DB).Query()
|
rows, err := mainQuery.RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while running mainQuery: %s", err)
|
cclog.Errorf("Error while running mainQuery: %s", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -726,7 +726,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
|||||||
for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!)
|
for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!)
|
||||||
rpoint := model.MetricHistoPoint{}
|
rpoint := model.MetricHistoPoint{}
|
||||||
if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max
|
if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max
|
||||||
log.Warnf("Error while scanning rows for %s", metric)
|
cclog.Warnf("Error while scanning rows for %s", metric)
|
||||||
return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested?
|
return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested?
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -736,10 +736,10 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
|||||||
e.Count = rpoint.Count
|
e.Count = rpoint.Count
|
||||||
// Only Required For Debug: Check DB returned Min/Max against Backend Init above
|
// Only Required For Debug: Check DB returned Min/Max against Backend Init above
|
||||||
// if rpoint.Min != nil {
|
// if rpoint.Min != nil {
|
||||||
// log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
|
// cclog.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
|
||||||
// }
|
// }
|
||||||
// if rpoint.Max != nil {
|
// if rpoint.Max != nil {
|
||||||
// log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
|
// cclog.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
|
||||||
// }
|
// }
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -749,7 +749,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
|||||||
|
|
||||||
result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points}
|
result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points}
|
||||||
|
|
||||||
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -762,11 +762,11 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
|
|||||||
// Get Jobs
|
// Get Jobs
|
||||||
jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil)
|
jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while querying jobs for footprint: %s", err)
|
cclog.Errorf("Error while querying jobs for footprint: %s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(jobs) > 500 {
|
if len(jobs) > 500 {
|
||||||
log.Errorf("too many jobs matched (max: %d)", 500)
|
cclog.Errorf("too many jobs matched (max: %d)", 500)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -782,7 +782,7 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
||||||
log.Errorf("Error while loading averages for histogram: %s", err)
|
cclog.Errorf("Error while loading averages for histogram: %s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -19,7 +19,6 @@ func TestBuildJobStatsQuery(t *testing.T) {
|
|||||||
noErr(t, err)
|
noErr(t, err)
|
||||||
|
|
||||||
fmt.Printf("SQL: %s\n", sql)
|
fmt.Printf("SQL: %s\n", sql)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobStats(t *testing.T) {
|
func TestJobStats(t *testing.T) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -9,8 +9,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ import (
|
|||||||
func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*schema.Tag, error) {
|
func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*schema.Tag, error) {
|
||||||
j, err := r.FindByIdWithUser(user, job)
|
j, err := r.FindByIdWithUser(user, job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while finding job by id")
|
cclog.Warn("Error while finding job by id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -26,19 +26,19 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche
|
|||||||
|
|
||||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error adding tag with %s: %v", s, err)
|
cclog.Errorf("Error adding tag with %s: %v", s, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags, err := r.GetTags(user, &job)
|
tags, err := r.GetTags(user, &job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting tags for job")
|
cclog.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
archiveTags, err := r.getArchiveTags(&job)
|
archiveTags, err := r.getArchiveTags(&job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting tags for job")
|
cclog.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche
|
|||||||
func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error) {
|
func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error) {
|
||||||
j, err := r.FindByIdDirect(job)
|
j, err := r.FindByIdDirect(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while finding job by id")
|
cclog.Warn("Error while finding job by id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,19 +56,19 @@ func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error
|
|||||||
|
|
||||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error adding tag with %s: %v", s, err)
|
cclog.Errorf("Error adding tag with %s: %v", s, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags, err := r.GetTagsDirect(&job)
|
tags, err := r.GetTagsDirect(&job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting tags for job")
|
cclog.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
archiveTags, err := r.getArchiveTags(&job)
|
archiveTags, err := r.getArchiveTags(&job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting tags for job")
|
cclog.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error
|
|||||||
func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) {
|
func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) {
|
||||||
j, err := r.FindByIdWithUser(user, job)
|
j, err := r.FindByIdWithUser(user, job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while finding job by id")
|
cclog.Warn("Error while finding job by id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,19 +88,19 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.
|
|||||||
|
|
||||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error removing tag with %s: %v", s, err)
|
cclog.Errorf("Error removing tag with %s: %v", s, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags, err := r.GetTags(user, &job)
|
tags, err := r.GetTags(user, &job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting tags for job")
|
cclog.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
archiveTags, err := r.getArchiveTags(&job)
|
archiveTags, err := r.getArchiveTags(&job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting tags for job")
|
cclog.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,14 +113,14 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT
|
|||||||
// Get Tag ID to delete
|
// Get Tag ID to delete
|
||||||
tagID, exists := r.TagId(tagType, tagName, tagScope)
|
tagID, exists := r.TagId(tagType, tagName, tagScope)
|
||||||
if !exists {
|
if !exists {
|
||||||
log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||||
return nil, fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
return nil, fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get Job
|
// Get Job
|
||||||
j, err := r.FindByIdWithUser(user, job)
|
j, err := r.FindByIdWithUser(user, job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while finding job by id")
|
cclog.Warn("Error while finding job by id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,19 +129,19 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT
|
|||||||
|
|
||||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
|
cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags, err := r.GetTags(user, &job)
|
tags, err := r.GetTags(user, &job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting tags for job")
|
cclog.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
archiveTags, err := r.getArchiveTags(&job)
|
archiveTags, err := r.getArchiveTags(&job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while getting tags for job")
|
cclog.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,13 +152,13 @@ func (r *JobRepository) removeTagFromArchiveJobs(jobIds []int64) {
|
|||||||
for _, j := range jobIds {
|
for _, j := range jobIds {
|
||||||
tags, err := r.getArchiveTags(&j)
|
tags, err := r.getArchiveTags(&j)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while getting tags for job %d", j)
|
cclog.Warnf("Error while getting tags for job %d", j)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
job, err := r.FindByIdDirect(j)
|
job, err := r.FindByIdDirect(j)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while getting job %d", j)
|
cclog.Warnf("Error while getting job %d", j)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,7 +172,7 @@ func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagSc
|
|||||||
// Get Tag ID to delete
|
// Get Tag ID to delete
|
||||||
tagID, exists := r.TagId(tagType, tagName, tagScope)
|
tagID, exists := r.TagId(tagType, tagName, tagScope)
|
||||||
if !exists {
|
if !exists {
|
||||||
log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||||
return fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
return fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,7 +192,7 @@ func (r *JobRepository) RemoveTagById(tagID int64) error {
|
|||||||
|
|
||||||
if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
s, _, _ := qJobTag.ToSql()
|
s, _, _ := qJobTag.ToSql()
|
||||||
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
|
cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,7 +201,7 @@ func (r *JobRepository) RemoveTagById(tagID int64) error {
|
|||||||
|
|
||||||
if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
s, _, _ := qTag.ToSql()
|
s, _, _ := qTag.ToSql()
|
||||||
log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
|
cclog.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -223,7 +223,7 @@ func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope strin
|
|||||||
res, err := q.RunWith(r.stmtCache).Exec()
|
res, err := q.RunWith(r.stmtCache).Exec()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error inserting tag with %s: %v", s, err)
|
cclog.Errorf("Error inserting tag with %s: %v", s, err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,7 +272,7 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
|
|||||||
|
|
||||||
// Handle Job Ownership
|
// Handle Job Ownership
|
||||||
if user != nil && user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { // ADMIN || SUPPORT: Count all jobs
|
if user != nil && user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { // ADMIN || SUPPORT: Count all jobs
|
||||||
// log.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
|
// cclog.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
|
||||||
// Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case
|
// Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case
|
||||||
} else if user != nil && user.HasRole(schema.RoleManager) { // MANAGER: Count own jobs plus project's jobs
|
} else if user != nil && user.HasRole(schema.RoleManager) { // MANAGER: Count own jobs plus project's jobs
|
||||||
// Build ("project1", "project2", ...) list of variable length directly in SQL string
|
// Build ("project1", "project2", ...) list of variable length directly in SQL string
|
||||||
@ -396,7 +396,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
|
|||||||
rows, err := q.RunWith(r.stmtCache).Query()
|
rows, err := q.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error get tags with %s: %v", s, err)
|
cclog.Errorf("Error get tags with %s: %v", s, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -404,7 +404,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
tag := &schema.Tag{}
|
tag := &schema.Tag{}
|
||||||
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Handle Scope Filtering: Tag Scope is Global, Private (== Username) or User is auth'd to view Admin Tags
|
// Handle Scope Filtering: Tag Scope is Global, Private (== Username) or User is auth'd to view Admin Tags
|
||||||
@ -429,7 +429,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
|
|||||||
rows, err := q.RunWith(r.stmtCache).Query()
|
rows, err := q.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error get tags with %s: %v", s, err)
|
cclog.Errorf("Error get tags with %s: %v", s, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -437,7 +437,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
tag := &schema.Tag{}
|
tag := &schema.Tag{}
|
||||||
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tags = append(tags, tag)
|
tags = append(tags, tag)
|
||||||
@ -456,7 +456,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
|
|||||||
rows, err := q.RunWith(r.stmtCache).Query()
|
rows, err := q.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error get tags with %s: %v", s, err)
|
cclog.Errorf("Error get tags with %s: %v", s, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -464,7 +464,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
tag := &schema.Tag{}
|
tag := &schema.Tag{}
|
||||||
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
||||||
log.Warn("Error while scanning rows")
|
cclog.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tags = append(tags, tag)
|
tags = append(tags, tag)
|
||||||
@ -488,7 +488,7 @@ func (r *JobRepository) ImportTag(jobId int64, tagType string, tagName string, t
|
|||||||
|
|
||||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
s, _, _ := q.ToSql()
|
s, _, _ := q.ToSql()
|
||||||
log.Errorf("Error adding tag on import with %s: %v", s, err)
|
cclog.Errorf("Error adding tag on import with %s: %v", s, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,7 +20,7 @@ func (r *JobRepository) TransactionInit() (*Transaction, error) {
|
|||||||
|
|
||||||
t.tx, err = r.DB.Beginx()
|
t.tx, err = r.DB.Beginx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while bundling transactions")
|
cclog.Warn("Error while bundling transactions")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return t, nil
|
return t, nil
|
||||||
@ -30,14 +30,14 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error {
|
|||||||
var err error
|
var err error
|
||||||
if t.tx != nil {
|
if t.tx != nil {
|
||||||
if err = t.tx.Commit(); err != nil {
|
if err = t.tx.Commit(); err != nil {
|
||||||
log.Warn("Error while committing transactions")
|
cclog.Warn("Error while committing transactions")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t.tx, err = r.DB.Beginx()
|
t.tx, err = r.DB.Beginx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while bundling transactions")
|
cclog.Warn("Error while bundling transactions")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error {
|
|||||||
|
|
||||||
func (r *JobRepository) TransactionEnd(t *Transaction) error {
|
func (r *JobRepository) TransactionEnd(t *Transaction) error {
|
||||||
if err := t.tx.Commit(); err != nil {
|
if err := t.tx.Commit(); err != nil {
|
||||||
log.Warn("Error while committing SQL transactions")
|
cclog.Warn("Error while committing SQL transactions")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -59,13 +59,13 @@ func (r *JobRepository) TransactionAddNamed(
|
|||||||
) (int64, error) {
|
) (int64, error) {
|
||||||
res, err := t.tx.NamedExec(query, args)
|
res, err := t.tx.NamedExec(query, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Named Exec failed: %v", err)
|
cclog.Errorf("Named Exec failed: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := res.LastInsertId()
|
id, err := res.LastInsertId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("repository initDB(): %v", err)
|
cclog.Errorf("repository initDB(): %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,16 +73,15 @@ func (r *JobRepository) TransactionAddNamed(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) {
|
func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) {
|
||||||
|
|
||||||
res, err := t.tx.Exec(query, args...)
|
res, err := t.tx.Exec(query, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("TransactionAdd(), Exec() Error: %v", err)
|
cclog.Errorf("TransactionAdd(), Exec() Error: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := res.LastInsertId()
|
id, err := res.LastInsertId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("TransactionAdd(), LastInsertId() Error: %v", err)
|
cclog.Errorf("TransactionAdd(), LastInsertId() Error: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -13,13 +13,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -50,7 +50,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) {
|
|||||||
if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("hpc_user").
|
if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("hpc_user").
|
||||||
Where("hpc_user.username = ?", username).RunWith(r.DB).
|
Where("hpc_user.username = ?", username).RunWith(r.DB).
|
||||||
QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil {
|
QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil {
|
||||||
log.Warnf("Error while querying user '%v' from database", username)
|
cclog.Warnf("Error while querying user '%v' from database", username)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,7 +59,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) {
|
|||||||
user.Email = email.String
|
user.Email = email.String
|
||||||
if rawRoles.Valid {
|
if rawRoles.Valid {
|
||||||
if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil {
|
if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw roles from DB")
|
cclog.Warn("Error while unmarshaling raw roles from DB")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -76,14 +76,14 @@ func (r *UserRepository) GetLdapUsernames() ([]string, error) {
|
|||||||
var users []string
|
var users []string
|
||||||
rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE hpc_user.ldap = 1`)
|
rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE hpc_user.ldap = 1`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying usernames")
|
cclog.Warn("Error while querying usernames")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var username string
|
var username string
|
||||||
if err := rows.Scan(&username); err != nil {
|
if err := rows.Scan(&username); err != nil {
|
||||||
log.Warnf("Error while scanning for user '%s'", username)
|
cclog.Warnf("Error while scanning for user '%s'", username)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ func (r *UserRepository) AddUser(user *schema.User) error {
|
|||||||
if user.Password != "" {
|
if user.Password != "" {
|
||||||
password, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
|
password, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while encrypting new user password")
|
cclog.Error("Error while encrypting new user password")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cols = append(cols, "password")
|
cols = append(cols, "password")
|
||||||
@ -123,21 +123,21 @@ func (r *UserRepository) AddUser(user *schema.User) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := sq.Insert("hpc_user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil {
|
if _, err := sq.Insert("hpc_user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil {
|
||||||
log.Errorf("Error while inserting new user '%v' into DB", user.Username)
|
cclog.Errorf("Error while inserting new user '%v' into DB", user.Username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
|
cclog.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
|
||||||
|
|
||||||
defaultMetricsCfg, err := config.LoadDefaultMetricsConfig()
|
defaultMetricsCfg, err := config.LoadDefaultMetricsConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error loading default metrics config: %v", err)
|
cclog.Errorf("Error loading default metrics config: %v", err)
|
||||||
} else if defaultMetricsCfg != nil {
|
} else if defaultMetricsCfg != nil {
|
||||||
for _, cluster := range defaultMetricsCfg.Clusters {
|
for _, cluster := range defaultMetricsCfg.Clusters {
|
||||||
metricsArray := config.ParseMetricsString(cluster.DefaultMetrics)
|
metricsArray := config.ParseMetricsString(cluster.DefaultMetrics)
|
||||||
metricsJSON, err := json.Marshal(metricsArray)
|
metricsJSON, err := json.Marshal(metricsArray)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err)
|
cclog.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
confKey := "job_view_selectedMetrics:" + cluster.Name
|
confKey := "job_view_selectedMetrics:" + cluster.Name
|
||||||
@ -145,9 +145,9 @@ func (r *UserRepository) AddUser(user *schema.User) error {
|
|||||||
Columns("username", "confkey", "value").
|
Columns("username", "confkey", "value").
|
||||||
Values(user.Username, confKey, string(metricsJSON)).
|
Values(user.Username, confKey, string(metricsJSON)).
|
||||||
RunWith(r.DB).Exec(); err != nil {
|
RunWith(r.DB).Exec(); err != nil {
|
||||||
log.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err)
|
cclog.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err)
|
||||||
} else {
|
} else {
|
||||||
log.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON))
|
cclog.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -160,7 +160,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro
|
|||||||
// TODO: Discuss updatable fields
|
// TODO: Discuss updatable fields
|
||||||
if dbUser.Name != user.Name {
|
if dbUser.Name != user.Name {
|
||||||
if _, err := sq.Update("hpc_user").Set("name", user.Name).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil {
|
if _, err := sq.Update("hpc_user").Set("name", user.Name).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil {
|
||||||
log.Errorf("error while updating name of user '%s'", user.Username)
|
cclog.Errorf("error while updating name of user '%s'", user.Username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -179,10 +179,10 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro
|
|||||||
func (r *UserRepository) DelUser(username string) error {
|
func (r *UserRepository) DelUser(username string) error {
|
||||||
_, err := r.DB.Exec(`DELETE FROM hpc_user WHERE hpc_user.username = ?`, username)
|
_, err := r.DB.Exec(`DELETE FROM hpc_user WHERE hpc_user.username = ?`, username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while deleting user '%s' from DB", username)
|
cclog.Errorf("Error while deleting user '%s' from DB", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof("deleted user '%s' from DB", username)
|
cclog.Infof("deleted user '%s' from DB", username)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {
|
|||||||
|
|
||||||
rows, err := q.RunWith(r.DB).Query()
|
rows, err := q.RunWith(r.DB).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while querying user list")
|
cclog.Warn("Error while querying user list")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,12 +206,12 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {
|
|||||||
user := &schema.User{}
|
user := &schema.User{}
|
||||||
var name, email sql.NullString
|
var name, email sql.NullString
|
||||||
if err := rows.Scan(&user.Username, &name, &email, &rawroles, &rawprojects); err != nil {
|
if err := rows.Scan(&user.Username, &name, &email, &rawroles, &rawprojects); err != nil {
|
||||||
log.Warn("Error while scanning user list")
|
cclog.Warn("Error while scanning user list")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil {
|
if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw role list")
|
cclog.Warn("Error while unmarshaling raw role list")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,7 +234,7 @@ func (r *UserRepository) AddRole(
|
|||||||
newRole := strings.ToLower(queryrole)
|
newRole := strings.ToLower(queryrole)
|
||||||
user, err := r.GetUser(username)
|
user, err := r.GetUser(username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Could not load user '%s'", username)
|
cclog.Warnf("Could not load user '%s'", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,7 +249,7 @@ func (r *UserRepository) AddRole(
|
|||||||
|
|
||||||
roles, _ := json.Marshal(append(user.Roles, newRole))
|
roles, _ := json.Marshal(append(user.Roles, newRole))
|
||||||
if _, err := sq.Update("hpc_user").Set("roles", roles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
|
if _, err := sq.Update("hpc_user").Set("roles", roles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
|
||||||
log.Errorf("error while adding new role for user '%s'", user.Username)
|
cclog.Errorf("error while adding new role for user '%s'", user.Username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -259,7 +259,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr
|
|||||||
oldRole := strings.ToLower(queryrole)
|
oldRole := strings.ToLower(queryrole)
|
||||||
user, err := r.GetUser(username)
|
user, err := r.GetUser(username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Could not load user '%s'", username)
|
cclog.Warnf("Could not load user '%s'", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,7 +285,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr
|
|||||||
|
|
||||||
mroles, _ := json.Marshal(newroles)
|
mroles, _ := json.Marshal(newroles)
|
||||||
if _, err := sq.Update("hpc_user").Set("roles", mroles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
|
if _, err := sq.Update("hpc_user").Set("roles", mroles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
|
||||||
log.Errorf("Error while removing role for user '%s'", user.Username)
|
cclog.Errorf("Error while removing role for user '%s'", user.Username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -364,10 +364,10 @@ const ContextUserKey ContextKey = "user"
|
|||||||
func GetUserFromContext(ctx context.Context) *schema.User {
|
func GetUserFromContext(ctx context.Context) *schema.User {
|
||||||
x := ctx.Value(ContextUserKey)
|
x := ctx.Value(ContextUserKey)
|
||||||
if x == nil {
|
if x == nil {
|
||||||
log.Warnf("no user retrieved from context")
|
cclog.Warnf("no user retrieved from context")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// log.Infof("user retrieved from context: %v", x.(*schema.User))
|
// cclog.Infof("user retrieved from context: %v", x.(*schema.User))
|
||||||
return x.(*schema.User)
|
return x.(*schema.User)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,11 +385,11 @@ func (r *UserRepository) FetchUserInCtx(ctx context.Context, username string) (*
|
|||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
/* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */
|
/* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */
|
||||||
/* since FetchUser will be called to retrieve full name and mail for every job in query/list */
|
/* since FetchUser will be called to retrieve full name and mail for every job in query/list */
|
||||||
// log.Warnf("User '%s' Not found in DB", username)
|
// cclog.Warnf("User '%s' Not found in DB", username)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warnf("Error while fetching user '%s'", username)
|
cclog.Warnf("Error while fetching user '%s'", username)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -10,9 +10,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
"github.com/ClusterCockpit/cc-lib/lrucache"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ func GetUserCfgRepo() *UserCfgRepo {
|
|||||||
|
|
||||||
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
|
cclog.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
userCfgRepoInstance = &UserCfgRepo{
|
userCfgRepoInstance = &UserCfgRepo{
|
||||||
@ -70,7 +70,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
|
|||||||
|
|
||||||
rows, err := uCfg.Lookup.Query(user.Username)
|
rows, err := uCfg.Lookup.Query(user.Username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
|
cclog.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,13 +79,13 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var key, rawval string
|
var key, rawval string
|
||||||
if err := rows.Scan(&key, &rawval); err != nil {
|
if err := rows.Scan(&key, &rawval); err != nil {
|
||||||
log.Warn("Error while scanning user uiconfig values")
|
cclog.Warn("Error while scanning user uiconfig values")
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
var val interface{}
|
var val interface{}
|
||||||
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
|
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw user uiconfig json")
|
cclog.Warn("Error while unmarshaling raw user uiconfig json")
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
|
|||||||
return uiconfig, 24 * time.Hour, size
|
return uiconfig, 24 * time.Hour, size
|
||||||
})
|
})
|
||||||
if err, ok := data.(error); ok {
|
if err, ok := data.(error); ok {
|
||||||
log.Error("Error in returned dataset")
|
cclog.Error("Error in returned dataset")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,7 +117,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
|
|||||||
if user == nil {
|
if user == nil {
|
||||||
var val interface{}
|
var val interface{}
|
||||||
if err := json.Unmarshal([]byte(value), &val); err != nil {
|
if err := json.Unmarshal([]byte(value), &val); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw user config json")
|
cclog.Warn("Error while unmarshaling raw user config json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`, user.Username, key, value); err != nil {
|
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`, user.Username, key, value); err != nil {
|
||||||
log.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
|
cclog.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package repository
|
package repository
|
||||||
@ -10,8 +10,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
_ "github.com/mattn/go-sqlite3"
|
_ "github.com/mattn/go-sqlite3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
|
|||||||
} } ]
|
} } ]
|
||||||
}`
|
}`
|
||||||
|
|
||||||
log.Init("info", true)
|
cclog.Init("info", true)
|
||||||
dbfilepath := "testdata/job.db"
|
dbfilepath := "testdata/job.db"
|
||||||
err := MigrateDB("sqlite3", dbfilepath)
|
err := MigrateDB("sqlite3", dbfilepath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package routerConfig
|
package routerConfig
|
||||||
@ -16,10 +16,10 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/web"
|
"github.com/ClusterCockpit/cc-backend/web"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -57,23 +57,23 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
|
|||||||
// startJobCount := time.Now()
|
// startJobCount := time.Now()
|
||||||
stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy)
|
stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("failed to count jobs: %s", err.Error())
|
cclog.Warnf("failed to count jobs: %s", err.Error())
|
||||||
}
|
}
|
||||||
// log.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount))
|
// cclog.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount))
|
||||||
|
|
||||||
// startRunningJobCount := time.Now()
|
// startRunningJobCount := time.Now()
|
||||||
stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running")
|
stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("failed to count running jobs: %s", err.Error())
|
cclog.Warnf("failed to count running jobs: %s", err.Error())
|
||||||
}
|
}
|
||||||
// log.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount))
|
// cclog.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount))
|
||||||
|
|
||||||
i["clusters"] = stats
|
i["clusters"] = stats
|
||||||
|
|
||||||
if util.CheckFileExists("./var/notice.txt") {
|
if util.CheckFileExists("./var/notice.txt") {
|
||||||
msg, err := os.ReadFile("./var/notice.txt")
|
msg, err := os.ReadFile("./var/notice.txt")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("failed to read notice.txt file: %s", err.Error())
|
cclog.Warnf("failed to read notice.txt file: %s", err.Error())
|
||||||
} else {
|
} else {
|
||||||
i["message"] = string(msg)
|
i["message"] = string(msg)
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
|||||||
tags, counts, err := jobRepo.CountTags(repository.GetUserFromContext(r.Context()))
|
tags, counts, err := jobRepo.CountTags(repository.GetUserFromContext(r.Context()))
|
||||||
tagMap := make(map[string][]map[string]interface{})
|
tagMap := make(map[string][]map[string]interface{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("GetTags failed: %s", err.Error())
|
cclog.Warnf("GetTags failed: %s", err.Error())
|
||||||
i["tagmap"] = tagMap
|
i["tagmap"] = tagMap
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
@ -15,10 +15,10 @@ import (
|
|||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
"github.com/expr-lang/expr"
|
"github.com/expr-lang/expr"
|
||||||
"github.com/expr-lang/expr/vm"
|
"github.com/expr-lang/expr/vm"
|
||||||
)
|
)
|
||||||
@ -66,7 +66,7 @@ type JobClassTagger struct {
|
|||||||
func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
||||||
var rule RuleFormat
|
var rule RuleFormat
|
||||||
if err := json.NewDecoder(bytes.NewReader(b)).Decode(&rule); err != nil {
|
if err := json.NewDecoder(bytes.NewReader(b)).Decode(&rule); err != nil {
|
||||||
log.Warn("Error while decoding raw job meta json")
|
cclog.Warn("Error while decoding raw job meta json")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
|||||||
for _, p := range rule.Parameters {
|
for _, p := range rule.Parameters {
|
||||||
param, ok := t.parameters[p]
|
param, ok := t.parameters[p]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warnf("prepareRule() > missing parameter %s in rule %s", p, fns)
|
cclog.Warnf("prepareRule() > missing parameter %s in rule %s", p, fns)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ri.env[p] = param
|
ri.env[p] = param
|
||||||
@ -93,7 +93,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
|||||||
for _, r := range rule.Requirements {
|
for _, r := range rule.Requirements {
|
||||||
req, err := expr.Compile(r, expr.AsBool())
|
req, err := expr.Compile(r, expr.AsBool())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error compiling requirement %s: %#v", r, err)
|
cclog.Errorf("error compiling requirement %s: %#v", r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ri.requirements = append(ri.requirements, req)
|
ri.requirements = append(ri.requirements, req)
|
||||||
@ -103,7 +103,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
|||||||
for _, v := range rule.Variables {
|
for _, v := range rule.Variables {
|
||||||
req, err := expr.Compile(v.Expr, expr.AsFloat64())
|
req, err := expr.Compile(v.Expr, expr.AsFloat64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error compiling requirement %s: %#v", v.Name, err)
|
cclog.Errorf("error compiling requirement %s: %#v", v.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ri.variables = append(ri.variables, ruleVariable{name: v.Name, expr: req})
|
ri.variables = append(ri.variables, ruleVariable{name: v.Name, expr: req})
|
||||||
@ -112,7 +112,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
|||||||
// compile rule
|
// compile rule
|
||||||
exp, err := expr.Compile(rule.Rule, expr.AsBool())
|
exp, err := expr.Compile(rule.Rule, expr.AsBool())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error compiling rule %s: %#v", fns, err)
|
cclog.Errorf("error compiling rule %s: %#v", fns, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ri.rule = exp
|
ri.rule = exp
|
||||||
@ -120,9 +120,9 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
|||||||
// prepare hint template
|
// prepare hint template
|
||||||
ri.hint, err = template.New(fns).Parse(rule.Hint)
|
ri.hint, err = template.New(fns).Parse(rule.Hint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error processing template %s: %#v", fns, err)
|
cclog.Errorf("error processing template %s: %#v", fns, err)
|
||||||
}
|
}
|
||||||
log.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables))
|
cclog.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables))
|
||||||
|
|
||||||
t.rules[rule.Tag] = ri
|
t.rules[rule.Tag] = ri
|
||||||
}
|
}
|
||||||
@ -135,19 +135,19 @@ func (t *JobClassTagger) EventMatch(s string) bool {
|
|||||||
func (t *JobClassTagger) EventCallback() {
|
func (t *JobClassTagger) EventCallback() {
|
||||||
files, err := os.ReadDir(t.cfgPath)
|
files, err := os.ReadDir(t.cfgPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
cclog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if util.CheckFileExists(t.cfgPath + "/parameters.json") {
|
if util.CheckFileExists(t.cfgPath + "/parameters.json") {
|
||||||
log.Info("Merge parameters")
|
cclog.Info("Merge parameters")
|
||||||
b, err := os.ReadFile(t.cfgPath + "/parameters.json")
|
b, err := os.ReadFile(t.cfgPath + "/parameters.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("prepareRule() > open file error: %v", err)
|
cclog.Warnf("prepareRule() > open file error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var paramTmp map[string]any
|
var paramTmp map[string]any
|
||||||
if err := json.NewDecoder(bytes.NewReader(b)).Decode(¶mTmp); err != nil {
|
if err := json.NewDecoder(bytes.NewReader(b)).Decode(¶mTmp); err != nil {
|
||||||
log.Warn("Error while decoding parameters.json")
|
cclog.Warn("Error while decoding parameters.json")
|
||||||
}
|
}
|
||||||
|
|
||||||
maps.Copy(t.parameters, paramTmp)
|
maps.Copy(t.parameters, paramTmp)
|
||||||
@ -156,11 +156,11 @@ func (t *JobClassTagger) EventCallback() {
|
|||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
fns := fn.Name()
|
fns := fn.Name()
|
||||||
if fns != "parameters.json" {
|
if fns != "parameters.json" {
|
||||||
log.Debugf("Process: %s", fns)
|
cclog.Debugf("Process: %s", fns)
|
||||||
filename := fmt.Sprintf("%s/%s", t.cfgPath, fns)
|
filename := fmt.Sprintf("%s/%s", t.cfgPath, fns)
|
||||||
b, err := os.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("prepareRule() > open file error: %v", err)
|
cclog.Warnf("prepareRule() > open file error: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t.prepareRule(b, fns)
|
t.prepareRule(b, fns)
|
||||||
@ -169,15 +169,15 @@ func (t *JobClassTagger) EventCallback() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *JobClassTagger) initParameters() error {
|
func (t *JobClassTagger) initParameters() error {
|
||||||
log.Info("Initialize parameters")
|
cclog.Info("Initialize parameters")
|
||||||
b, err := jobclassFiles.ReadFile("jobclasses/parameters.json")
|
b, err := jobclassFiles.ReadFile("jobclasses/parameters.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("prepareRule() > open file error: %v", err)
|
cclog.Warnf("prepareRule() > open file error: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.NewDecoder(bytes.NewReader(b)).Decode(&t.parameters); err != nil {
|
if err := json.NewDecoder(bytes.NewReader(b)).Decode(&t.parameters); err != nil {
|
||||||
log.Warn("Error while decoding parameters.json")
|
cclog.Warn("Error while decoding parameters.json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,7 +190,7 @@ func (t *JobClassTagger) Register() error {
|
|||||||
|
|
||||||
err := t.initParameters()
|
err := t.initParameters()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error reading parameters.json: %v", err)
|
cclog.Warnf("error reading parameters.json: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,11 +203,11 @@ func (t *JobClassTagger) Register() error {
|
|||||||
fns := fn.Name()
|
fns := fn.Name()
|
||||||
if fns != "parameters.json" {
|
if fns != "parameters.json" {
|
||||||
filename := fmt.Sprintf("jobclasses/%s", fns)
|
filename := fmt.Sprintf("jobclasses/%s", fns)
|
||||||
log.Infof("Process: %s", fns)
|
cclog.Infof("Process: %s", fns)
|
||||||
|
|
||||||
b, err := jobclassFiles.ReadFile(filename)
|
b, err := jobclassFiles.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("prepareRule() > open file error: %v", err)
|
cclog.Warnf("prepareRule() > open file error: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t.prepareRule(b, fns)
|
t.prepareRule(b, fns)
|
||||||
@ -216,7 +216,7 @@ func (t *JobClassTagger) Register() error {
|
|||||||
|
|
||||||
if util.CheckFileExists(t.cfgPath) {
|
if util.CheckFileExists(t.cfgPath) {
|
||||||
t.EventCallback()
|
t.EventCallback()
|
||||||
log.Infof("Setup file watch for %s", t.cfgPath)
|
cclog.Infof("Setup file watch for %s", t.cfgPath)
|
||||||
util.AddListener(t.cfgPath, t)
|
util.AddListener(t.cfgPath, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,16 +227,16 @@ func (t *JobClassTagger) Match(job *schema.Job) {
|
|||||||
r := repository.GetJobRepository()
|
r := repository.GetJobRepository()
|
||||||
jobstats, err := archive.GetStatistics(job)
|
jobstats, err := archive.GetStatistics(job)
|
||||||
metricsList := archive.GetMetricConfigSubCluster(job.Cluster, job.SubCluster)
|
metricsList := archive.GetMetricConfigSubCluster(job.Cluster, job.SubCluster)
|
||||||
log.Infof("Enter match rule with %d rules for job %d", len(t.rules), job.JobID)
|
cclog.Infof("Enter match rule with %d rules for job %d", len(t.rules), job.JobID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("job classification failed for job %d: %#v", job.JobID, err)
|
cclog.Errorf("job classification failed for job %d: %#v", job.JobID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for tag, ri := range t.rules {
|
for tag, ri := range t.rules {
|
||||||
env := make(map[string]any)
|
env := make(map[string]any)
|
||||||
maps.Copy(env, ri.env)
|
maps.Copy(env, ri.env)
|
||||||
log.Infof("Try to match rule %s for job %d", tag, job.JobID)
|
cclog.Infof("Try to match rule %s for job %d", tag, job.JobID)
|
||||||
|
|
||||||
// Initialize environment
|
// Initialize environment
|
||||||
env["job"] = map[string]any{
|
env["job"] = map[string]any{
|
||||||
@ -253,7 +253,7 @@ func (t *JobClassTagger) Match(job *schema.Job) {
|
|||||||
for _, m := range ri.metrics {
|
for _, m := range ri.metrics {
|
||||||
stats, ok := jobstats[m]
|
stats, ok := jobstats[m]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Errorf("job classification failed for job %d: missing metric '%s'", job.JobID, m)
|
cclog.Errorf("job classification failed for job %d: missing metric '%s'", job.JobID, m)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
env[m] = map[string]any{
|
env[m] = map[string]any{
|
||||||
@ -273,11 +273,11 @@ func (t *JobClassTagger) Match(job *schema.Job) {
|
|||||||
for _, r := range ri.requirements {
|
for _, r := range ri.requirements {
|
||||||
ok, err := expr.Run(r, env)
|
ok, err := expr.Run(r, env)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error running requirement for rule %s: %#v", tag, err)
|
cclog.Errorf("error running requirement for rule %s: %#v", tag, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !ok.(bool) {
|
if !ok.(bool) {
|
||||||
log.Infof("requirement for rule %s not met", tag)
|
cclog.Infof("requirement for rule %s not met", tag)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -286,7 +286,7 @@ func (t *JobClassTagger) Match(job *schema.Job) {
|
|||||||
for _, v := range ri.variables {
|
for _, v := range ri.variables {
|
||||||
value, err := expr.Run(v.expr, env)
|
value, err := expr.Run(v.expr, env)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error running rule %s: %#v", tag, err)
|
cclog.Errorf("error running rule %s: %#v", tag, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
env[v.name] = value
|
env[v.name] = value
|
||||||
@ -296,11 +296,11 @@ func (t *JobClassTagger) Match(job *schema.Job) {
|
|||||||
|
|
||||||
match, err := expr.Run(ri.rule, env)
|
match, err := expr.Run(ri.rule, env)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error running rule %s: %#v", tag, err)
|
cclog.Errorf("error running rule %s: %#v", tag, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if match.(bool) {
|
if match.(bool) {
|
||||||
log.Info("Rule matches!")
|
cclog.Info("Rule matches!")
|
||||||
id := *job.ID
|
id := *job.ID
|
||||||
if !r.HasTag(id, t.tagType, tag) {
|
if !r.HasTag(id, t.tagType, tag) {
|
||||||
r.AddTagOrCreateDirect(id, t.tagType, tag)
|
r.AddTagOrCreateDirect(id, t.tagType, tag)
|
||||||
@ -309,14 +309,14 @@ func (t *JobClassTagger) Match(job *schema.Job) {
|
|||||||
// process hint template
|
// process hint template
|
||||||
var msg bytes.Buffer
|
var msg bytes.Buffer
|
||||||
if err := ri.hint.Execute(&msg, env); err != nil {
|
if err := ri.hint.Execute(&msg, env); err != nil {
|
||||||
log.Errorf("Template error: %s", err.Error())
|
cclog.Errorf("Template error: %s", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Handle case where multiple tags apply
|
// FIXME: Handle case where multiple tags apply
|
||||||
r.UpdateMetadata(job, "message", msg.String())
|
r.UpdateMetadata(job, "message", msg.String())
|
||||||
} else {
|
} else {
|
||||||
log.Info("Rule does not match!")
|
cclog.Info("Rule does not match!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,9 +15,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed apps/*
|
//go:embed apps/*
|
||||||
@ -53,15 +53,15 @@ func (t *AppTagger) EventMatch(s string) bool {
|
|||||||
func (t *AppTagger) EventCallback() {
|
func (t *AppTagger) EventCallback() {
|
||||||
files, err := os.ReadDir(t.cfgPath)
|
files, err := os.ReadDir(t.cfgPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
cclog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
fns := fn.Name()
|
fns := fn.Name()
|
||||||
log.Debugf("Process: %s", fns)
|
cclog.Debugf("Process: %s", fns)
|
||||||
f, err := os.Open(fmt.Sprintf("%s/%s", t.cfgPath, fns))
|
f, err := os.Open(fmt.Sprintf("%s/%s", t.cfgPath, fns))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error opening app file %s: %#v", fns, err)
|
cclog.Errorf("error opening app file %s: %#v", fns, err)
|
||||||
}
|
}
|
||||||
t.scanApp(f, fns)
|
t.scanApp(f, fns)
|
||||||
}
|
}
|
||||||
@ -78,7 +78,7 @@ func (t *AppTagger) Register() error {
|
|||||||
t.apps = make(map[string]appInfo, 0)
|
t.apps = make(map[string]appInfo, 0)
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
fns := fn.Name()
|
fns := fn.Name()
|
||||||
log.Debugf("Process: %s", fns)
|
cclog.Debugf("Process: %s", fns)
|
||||||
f, err := appFiles.Open(fmt.Sprintf("apps/%s", fns))
|
f, err := appFiles.Open(fmt.Sprintf("apps/%s", fns))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error opening app file %s: %#v", fns, err)
|
return fmt.Errorf("error opening app file %s: %#v", fns, err)
|
||||||
@ -89,7 +89,7 @@ func (t *AppTagger) Register() error {
|
|||||||
|
|
||||||
if util.CheckFileExists(t.cfgPath) {
|
if util.CheckFileExists(t.cfgPath) {
|
||||||
t.EventCallback()
|
t.EventCallback()
|
||||||
log.Infof("Setup file watch for %s", t.cfgPath)
|
cclog.Infof("Setup file watch for %s", t.cfgPath)
|
||||||
util.AddListener(t.cfgPath, t)
|
util.AddListener(t.cfgPath, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ func (t *AppTagger) Match(job *schema.Job) {
|
|||||||
r := repository.GetJobRepository()
|
r := repository.GetJobRepository()
|
||||||
metadata, err := r.FetchMetadata(job)
|
metadata, err := r.FetchMetadata(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("Cannot fetch metadata for job: %d on %s", job.JobID, job.Cluster)
|
cclog.Infof("Cannot fetch metadata for job: %d on %s", job.JobID, job.Cluster)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,6 +122,6 @@ func (t *AppTagger) Match(job *schema.Job) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Infof("Cannot extract job script for job: %d on %s", job.JobID, job.Cluster)
|
cclog.Infof("Cannot extract job script for job: %d on %s", job.JobID, job.Cluster)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package tagger
|
package tagger
|
||||||
@ -8,12 +8,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setup(tb testing.TB) *repository.JobRepository {
|
func setup(tb testing.TB) *repository.JobRepository {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
log.Init("warn", true)
|
cclog.Init("warn", true)
|
||||||
dbfile := "../repository/testdata/job.db"
|
dbfile := "../repository/testdata/job.db"
|
||||||
err := repository.MigrateDB("sqlite3", dbfile)
|
err := repository.MigrateDB("sqlite3", dbfile)
|
||||||
noErr(tb, err)
|
noErr(tb, err)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package tagger
|
package tagger
|
||||||
@ -8,8 +8,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Tagger interface {
|
type Tagger interface {
|
||||||
@ -66,21 +66,21 @@ func RunTaggers() error {
|
|||||||
r := repository.GetJobRepository()
|
r := repository.GetJobRepository()
|
||||||
jl, err := r.GetJobList()
|
jl, err := r.GetJobList()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while getting job list %s", err)
|
cclog.Errorf("Error while getting job list %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, id := range jl {
|
for _, id := range jl {
|
||||||
job, err := r.FindByIdDirect(id)
|
job, err := r.FindByIdDirect(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while getting job %s", err)
|
cclog.Errorf("Error while getting job %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, tagger := range jobTagger.startTaggers {
|
for _, tagger := range jobTagger.startTaggers {
|
||||||
tagger.Match(job)
|
tagger.Match(job)
|
||||||
}
|
}
|
||||||
for _, tagger := range jobTagger.stopTaggers {
|
for _, tagger := range jobTagger.stopTaggers {
|
||||||
log.Infof("Run stop tagger for job %d", job.ID)
|
cclog.Infof("Run stop tagger for job %d", job.ID)
|
||||||
tagger.Match(job)
|
tagger.Match(job)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package tagger
|
package tagger
|
||||||
@ -8,7 +8,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInit(t *testing.T) {
|
func TestInit(t *testing.T) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package taskManager
|
package taskManager
|
||||||
@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -21,15 +21,15 @@ func RegisterCommitJobService() {
|
|||||||
frequency = "2m"
|
frequency = "2m"
|
||||||
}
|
}
|
||||||
d, _ := time.ParseDuration(frequency)
|
d, _ := time.ParseDuration(frequency)
|
||||||
log.Infof("Register commitJob service with %s interval", frequency)
|
cclog.Infof("Register commitJob service with %s interval", frequency)
|
||||||
|
|
||||||
s.NewJob(gocron.DurationJob(d),
|
s.NewJob(gocron.DurationJob(d),
|
||||||
gocron.NewTask(
|
gocron.NewTask(
|
||||||
func() {
|
func() {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Printf("Jobcache sync started at %s", start.Format(time.RFC3339))
|
cclog.Printf("Jobcache sync started at %s", start.Format(time.RFC3339))
|
||||||
jobs, _ := jobRepo.SyncJobs()
|
jobs, _ := jobRepo.SyncJobs()
|
||||||
repository.CallJobStartHooks(jobs)
|
repository.CallJobStartHooks(jobs)
|
||||||
log.Printf("Jobcache sync and job callbacks are done and took %s", time.Since(start))
|
cclog.Printf("Jobcache sync and job callbacks are done and took %s", time.Since(start))
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package taskManager
|
package taskManager
|
||||||
@ -8,13 +8,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RegisterCompressionService(compressOlderThan int) {
|
func RegisterCompressionService(compressOlderThan int) {
|
||||||
log.Info("Register compression service")
|
cclog.Info("Register compression service")
|
||||||
|
|
||||||
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(05, 0, 0))),
|
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(05, 0, 0))),
|
||||||
gocron.NewTask(
|
gocron.NewTask(
|
||||||
@ -26,7 +26,7 @@ func RegisterCompressionService(compressOlderThan int) {
|
|||||||
startTime := time.Now().Unix() - int64(compressOlderThan*24*3600)
|
startTime := time.Now().Unix() - int64(compressOlderThan*24*3600)
|
||||||
lastTime := ar.CompressLast(startTime)
|
lastTime := ar.CompressLast(startTime)
|
||||||
if startTime == lastTime {
|
if startTime == lastTime {
|
||||||
log.Info("Compression Service - Complete archive run")
|
cclog.Info("Compression Service - Complete archive run")
|
||||||
jobs, err = jobRepo.FindJobsBetween(0, startTime)
|
jobs, err = jobRepo.FindJobsBetween(0, startTime)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@ -34,7 +34,7 @@ func RegisterCompressionService(compressOlderThan int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while looking for compression jobs: %v", err)
|
cclog.Warnf("Error while looking for compression jobs: %v", err)
|
||||||
}
|
}
|
||||||
ar.Compress(jobs)
|
ar.Compress(jobs)
|
||||||
}))
|
}))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package taskManager
|
package taskManager
|
||||||
@ -8,29 +8,29 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RegisterLdapSyncService(ds string) {
|
func RegisterLdapSyncService(ds string) {
|
||||||
interval, err := parseDuration(ds)
|
interval, err := parseDuration(ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Could not parse duration for sync interval: %v",
|
cclog.Warnf("Could not parse duration for sync interval: %v",
|
||||||
ds)
|
ds)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
auth := auth.GetAuthInstance()
|
auth := auth.GetAuthInstance()
|
||||||
|
|
||||||
log.Info("Register LDAP sync service")
|
cclog.Info("Register LDAP sync service")
|
||||||
s.NewJob(gocron.DurationJob(interval),
|
s.NewJob(gocron.DurationJob(interval),
|
||||||
gocron.NewTask(
|
gocron.NewTask(
|
||||||
func() {
|
func() {
|
||||||
t := time.Now()
|
t := time.Now()
|
||||||
log.Printf("ldap sync started at %s", t.Format(time.RFC3339))
|
cclog.Printf("ldap sync started at %s", t.Format(time.RFC3339))
|
||||||
if err := auth.LdapAuth.Sync(); err != nil {
|
if err := auth.LdapAuth.Sync(); err != nil {
|
||||||
log.Errorf("ldap sync failed: %s", err.Error())
|
cclog.Errorf("ldap sync failed: %s", err.Error())
|
||||||
}
|
}
|
||||||
log.Print("ldap sync done")
|
cclog.Print("ldap sync done")
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package taskManager
|
package taskManager
|
||||||
@ -8,12 +8,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RegisterRetentionDeleteService(age int, includeDB bool) {
|
func RegisterRetentionDeleteService(age int, includeDB bool) {
|
||||||
log.Info("Register retention delete service")
|
cclog.Info("Register retention delete service")
|
||||||
|
|
||||||
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))),
|
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))),
|
||||||
gocron.NewTask(
|
gocron.NewTask(
|
||||||
@ -21,26 +21,26 @@ func RegisterRetentionDeleteService(age int, includeDB bool) {
|
|||||||
startTime := time.Now().Unix() - int64(age*24*3600)
|
startTime := time.Now().Unix() - int64(age*24*3600)
|
||||||
jobs, err := jobRepo.FindJobsBetween(0, startTime)
|
jobs, err := jobRepo.FindJobsBetween(0, startTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while looking for retention jobs: %s", err.Error())
|
cclog.Warnf("Error while looking for retention jobs: %s", err.Error())
|
||||||
}
|
}
|
||||||
archive.GetHandle().CleanUp(jobs)
|
archive.GetHandle().CleanUp(jobs)
|
||||||
|
|
||||||
if includeDB {
|
if includeDB {
|
||||||
cnt, err := jobRepo.DeleteJobsBefore(startTime)
|
cnt, err := jobRepo.DeleteJobsBefore(startTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while deleting retention jobs from db: %s", err.Error())
|
cclog.Errorf("Error while deleting retention jobs from db: %s", err.Error())
|
||||||
} else {
|
} else {
|
||||||
log.Infof("Retention: Removed %d jobs from db", cnt)
|
cclog.Infof("Retention: Removed %d jobs from db", cnt)
|
||||||
}
|
}
|
||||||
if err = jobRepo.Optimize(); err != nil {
|
if err = jobRepo.Optimize(); err != nil {
|
||||||
log.Errorf("Error occured in db optimization: %s", err.Error())
|
cclog.Errorf("Error occured in db optimization: %s", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterRetentionMoveService(age int, includeDB bool, location string) {
|
func RegisterRetentionMoveService(age int, includeDB bool, location string) {
|
||||||
log.Info("Register retention move service")
|
cclog.Info("Register retention move service")
|
||||||
|
|
||||||
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))),
|
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))),
|
||||||
gocron.NewTask(
|
gocron.NewTask(
|
||||||
@ -48,19 +48,19 @@ func RegisterRetentionMoveService(age int, includeDB bool, location string) {
|
|||||||
startTime := time.Now().Unix() - int64(age*24*3600)
|
startTime := time.Now().Unix() - int64(age*24*3600)
|
||||||
jobs, err := jobRepo.FindJobsBetween(0, startTime)
|
jobs, err := jobRepo.FindJobsBetween(0, startTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while looking for retention jobs: %s", err.Error())
|
cclog.Warnf("Error while looking for retention jobs: %s", err.Error())
|
||||||
}
|
}
|
||||||
archive.GetHandle().Move(jobs, location)
|
archive.GetHandle().Move(jobs, location)
|
||||||
|
|
||||||
if includeDB {
|
if includeDB {
|
||||||
cnt, err := jobRepo.DeleteJobsBefore(startTime)
|
cnt, err := jobRepo.DeleteJobsBefore(startTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while deleting retention jobs from db: %v", err)
|
cclog.Errorf("Error while deleting retention jobs from db: %v", err)
|
||||||
} else {
|
} else {
|
||||||
log.Infof("Retention: Removed %d jobs from db", cnt)
|
cclog.Infof("Retention: Removed %d jobs from db", cnt)
|
||||||
}
|
}
|
||||||
if err = jobRepo.Optimize(); err != nil {
|
if err = jobRepo.Optimize(); err != nil {
|
||||||
log.Errorf("Error occured in db optimization: %v", err)
|
cclog.Errorf("Error occured in db optimization: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package taskManager
|
package taskManager
|
||||||
@ -8,19 +8,19 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RegisterStopJobsExceedTime() {
|
func RegisterStopJobsExceedTime() {
|
||||||
log.Info("Register undead jobs service")
|
cclog.Info("Register undead jobs service")
|
||||||
|
|
||||||
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(03, 0, 0))),
|
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(03, 0, 0))),
|
||||||
gocron.NewTask(
|
gocron.NewTask(
|
||||||
func() {
|
func() {
|
||||||
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
|
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
|
cclog.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
|
||||||
}
|
}
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
}))
|
}))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package taskManager
|
package taskManager
|
||||||
@ -10,8 +10,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -23,13 +23,13 @@ var (
|
|||||||
func parseDuration(s string) (time.Duration, error) {
|
func parseDuration(s string) (time.Duration, error) {
|
||||||
interval, err := time.ParseDuration(s)
|
interval, err := time.ParseDuration(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Could not parse duration for sync interval: %v",
|
cclog.Warnf("Could not parse duration for sync interval: %v",
|
||||||
s)
|
s)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if interval == 0 {
|
if interval == 0 {
|
||||||
log.Info("TaskManager: Sync interval is zero")
|
cclog.Info("TaskManager: Sync interval is zero")
|
||||||
}
|
}
|
||||||
|
|
||||||
return interval, nil
|
return interval, nil
|
||||||
@ -40,7 +40,7 @@ func Start() {
|
|||||||
jobRepo = repository.GetJobRepository()
|
jobRepo = repository.GetJobRepository()
|
||||||
s, err = gocron.NewScheduler()
|
s, err = gocron.NewScheduler()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
|
cclog.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Keys.StopJobsExceedingWalltime > 0 {
|
if config.Keys.StopJobsExceedingWalltime > 0 {
|
||||||
@ -54,7 +54,7 @@ func Start() {
|
|||||||
cfg.Retention.IncludeDB = true
|
cfg.Retention.IncludeDB = true
|
||||||
|
|
||||||
if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil {
|
if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw config json")
|
cclog.Warn("Error while unmarshaling raw config json")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch cfg.Retention.Policy {
|
switch cfg.Retention.Policy {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package taskManager
|
package taskManager
|
||||||
@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,14 +20,14 @@ func RegisterUpdateDurationWorker() {
|
|||||||
frequency = "5m"
|
frequency = "5m"
|
||||||
}
|
}
|
||||||
d, _ := time.ParseDuration(frequency)
|
d, _ := time.ParseDuration(frequency)
|
||||||
log.Infof("Register Duration Update service with %s interval", frequency)
|
cclog.Infof("Register Duration Update service with %s interval", frequency)
|
||||||
|
|
||||||
s.NewJob(gocron.DurationJob(d),
|
s.NewJob(gocron.DurationJob(d),
|
||||||
gocron.NewTask(
|
gocron.NewTask(
|
||||||
func() {
|
func() {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Printf("Update duration started at %s", start.Format(time.RFC3339))
|
cclog.Printf("Update duration started at %s", start.Format(time.RFC3339))
|
||||||
jobRepo.UpdateDuration()
|
jobRepo.UpdateDuration()
|
||||||
log.Printf("Update duration is done and took %s", time.Since(start))
|
cclog.Printf("Update duration is done and took %s", time.Since(start))
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package taskManager
|
package taskManager
|
||||||
@ -12,8 +12,8 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
)
|
)
|
||||||
@ -26,7 +26,7 @@ func RegisterFootprintWorker() {
|
|||||||
frequency = "10m"
|
frequency = "10m"
|
||||||
}
|
}
|
||||||
d, _ := time.ParseDuration(frequency)
|
d, _ := time.ParseDuration(frequency)
|
||||||
log.Infof("Register Footprint Update service with %s interval", frequency)
|
cclog.Infof("Register Footprint Update service with %s interval", frequency)
|
||||||
|
|
||||||
s.NewJob(gocron.DurationJob(d),
|
s.NewJob(gocron.DurationJob(d),
|
||||||
gocron.NewTask(
|
gocron.NewTask(
|
||||||
@ -35,7 +35,7 @@ func RegisterFootprintWorker() {
|
|||||||
c := 0
|
c := 0
|
||||||
ce := 0
|
ce := 0
|
||||||
cl := 0
|
cl := 0
|
||||||
log.Printf("Update Footprints started at %s", s.Format(time.RFC3339))
|
cclog.Printf("Update Footprints started at %s", s.Format(time.RFC3339))
|
||||||
|
|
||||||
for _, cluster := range archive.Clusters {
|
for _, cluster := range archive.Clusters {
|
||||||
s_cluster := time.Now()
|
s_cluster := time.Now()
|
||||||
@ -54,21 +54,21 @@ func RegisterFootprintWorker() {
|
|||||||
|
|
||||||
repo, err := metricdata.GetMetricDataRepo(cluster.Name)
|
repo, err := metricdata.GetMetricDataRepo(cluster.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("no metric data repository configured for '%s'", cluster.Name)
|
cclog.Errorf("no metric data repository configured for '%s'", cluster.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
pendingStatements := []sq.UpdateBuilder{}
|
pendingStatements := []sq.UpdateBuilder{}
|
||||||
|
|
||||||
for _, job := range jobs {
|
for _, job := range jobs {
|
||||||
log.Debugf("Prepare job %d", job.JobID)
|
cclog.Debugf("Prepare job %d", job.JobID)
|
||||||
cl++
|
cl++
|
||||||
|
|
||||||
s_job := time.Now()
|
s_job := time.Now()
|
||||||
|
|
||||||
jobStats, err := repo.LoadStats(job, allMetrics, context.Background())
|
jobStats, err := repo.LoadStats(job, allMetrics, context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error wile loading job data stats for footprint update: %v", err)
|
cclog.Errorf("error wile loading job data stats for footprint update: %v", err)
|
||||||
ce++
|
ce++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -106,26 +106,26 @@ func RegisterFootprintWorker() {
|
|||||||
stmt := sq.Update("job")
|
stmt := sq.Update("job")
|
||||||
stmt, err = jobRepo.UpdateFootprint(stmt, job)
|
stmt, err = jobRepo.UpdateFootprint(stmt, job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error())
|
cclog.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error())
|
||||||
ce++
|
ce++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
stmt = stmt.Where("job.id = ?", job.ID)
|
stmt = stmt.Where("job.id = ?", job.ID)
|
||||||
|
|
||||||
pendingStatements = append(pendingStatements, stmt)
|
pendingStatements = append(pendingStatements, stmt)
|
||||||
log.Debugf("Job %d took %s", job.JobID, time.Since(s_job))
|
cclog.Debugf("Job %d took %s", job.JobID, time.Since(s_job))
|
||||||
}
|
}
|
||||||
|
|
||||||
t, err := jobRepo.TransactionInit()
|
t, err := jobRepo.TransactionInit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed TransactionInit %v", err)
|
cclog.Errorf("failed TransactionInit %v", err)
|
||||||
log.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name)
|
cclog.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name)
|
||||||
ce += len(pendingStatements)
|
ce += len(pendingStatements)
|
||||||
} else {
|
} else {
|
||||||
for _, ps := range pendingStatements {
|
for _, ps := range pendingStatements {
|
||||||
query, args, err := ps.ToSql()
|
query, args, err := ps.ToSql()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed in ToSQL conversion: %v", err)
|
cclog.Errorf("failed in ToSQL conversion: %v", err)
|
||||||
ce++
|
ce++
|
||||||
} else {
|
} else {
|
||||||
// args...: Footprint-JSON, Energyfootprint-JSON, TotalEnergy, JobID
|
// args...: Footprint-JSON, Energyfootprint-JSON, TotalEnergy, JobID
|
||||||
@ -135,8 +135,8 @@ func RegisterFootprintWorker() {
|
|||||||
}
|
}
|
||||||
jobRepo.TransactionEnd(t)
|
jobRepo.TransactionEnd(t)
|
||||||
}
|
}
|
||||||
log.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster))
|
cclog.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster))
|
||||||
}
|
}
|
||||||
log.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s))
|
cclog.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s))
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package util
|
|
||||||
|
|
||||||
func Contains[T comparable](items []T, item T) bool {
|
|
||||||
for _, v := range items {
|
|
||||||
if v == item {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
@ -1,77 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/gzip"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CompressFile(fileIn string, fileOut string) error {
|
|
||||||
originalFile, err := os.Open(fileIn)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("CompressFile() error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer originalFile.Close()
|
|
||||||
|
|
||||||
gzippedFile, err := os.Create(fileOut)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("CompressFile() error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer gzippedFile.Close()
|
|
||||||
|
|
||||||
gzipWriter := gzip.NewWriter(gzippedFile)
|
|
||||||
defer gzipWriter.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(gzipWriter, originalFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("CompressFile() error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
gzipWriter.Flush()
|
|
||||||
if err := os.Remove(fileIn); err != nil {
|
|
||||||
log.Errorf("CompressFile() error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func UncompressFile(fileIn string, fileOut string) error {
|
|
||||||
gzippedFile, err := os.Open(fileIn)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("UncompressFile() error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer gzippedFile.Close()
|
|
||||||
|
|
||||||
gzipReader, _ := gzip.NewReader(gzippedFile)
|
|
||||||
defer gzipReader.Close()
|
|
||||||
|
|
||||||
uncompressedFile, err := os.Create(fileOut)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("UncompressFile() error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer uncompressedFile.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(uncompressedFile, gzipReader)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("UncompressFile() error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Remove(fileIn); err != nil {
|
|
||||||
log.Errorf("UncompressFile() error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,107 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CopyFile(src, dst string) (err error) {
|
|
||||||
in, err := os.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer in.Close()
|
|
||||||
|
|
||||||
out, err := os.Create(dst)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if e := out.Close(); e != nil {
|
|
||||||
err = e
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, err = io.Copy(out, in)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = out.Sync()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
si, err := os.Stat(src)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = os.Chmod(dst, si.Mode())
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func CopyDir(src string, dst string) (err error) {
|
|
||||||
src = filepath.Clean(src)
|
|
||||||
dst = filepath.Clean(dst)
|
|
||||||
|
|
||||||
si, err := os.Stat(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !si.IsDir() {
|
|
||||||
return fmt.Errorf("source is not a directory")
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = os.Stat(dst)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("destination already exists")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.MkdirAll(dst, si.Mode())
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := ioutil.ReadDir(src)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
srcPath := filepath.Join(src, entry.Name())
|
|
||||||
dstPath := filepath.Join(dst, entry.Name())
|
|
||||||
|
|
||||||
if entry.IsDir() {
|
|
||||||
err = CopyDir(srcPath, dstPath)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Skip symlinks.
|
|
||||||
if entry.Mode()&os.ModeSymlink != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err = CopyFile(srcPath, dstPath)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
func DiskUsage(dirpath string) float64 {
|
|
||||||
var size int64
|
|
||||||
|
|
||||||
dir, err := os.Open(dirpath)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("DiskUsage() error: %v", err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
defer dir.Close()
|
|
||||||
|
|
||||||
files, err := dir.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("DiskUsage() error: %v", err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
size += file.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
return float64(size) * 1e-6
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CheckFileExists(filePath string) bool {
|
|
||||||
_, err := os.Stat(filePath)
|
|
||||||
return !errors.Is(err, os.ErrNotExist)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetFilesize(filePath string) int64 {
|
|
||||||
fileInfo, err := os.Stat(filePath)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Error on Stat %s: %v", filePath, err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return fileInfo.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetFilecount(path string) int {
|
|
||||||
files, err := os.ReadDir(path)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Error on ReadDir %s: %v", path, err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(files)
|
|
||||||
}
|
|
@ -1,75 +0,0 @@
|
|||||||
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Listener interface {
|
|
||||||
EventCallback()
|
|
||||||
EventMatch(event string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
initOnce sync.Once
|
|
||||||
w *fsnotify.Watcher
|
|
||||||
listeners []Listener
|
|
||||||
)
|
|
||||||
|
|
||||||
func AddListener(path string, l Listener) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
initOnce.Do(func() {
|
|
||||||
var err error
|
|
||||||
w, err = fsnotify.NewWatcher()
|
|
||||||
if err != nil {
|
|
||||||
log.Error("creating a new watcher: %w", err)
|
|
||||||
}
|
|
||||||
listeners = make([]Listener, 0)
|
|
||||||
|
|
||||||
go watchLoop(w)
|
|
||||||
})
|
|
||||||
|
|
||||||
listeners = append(listeners, l)
|
|
||||||
err = w.Add(path)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("%q: %s", path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func FsWatcherShutdown() {
|
|
||||||
if w != nil {
|
|
||||||
w.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func watchLoop(w *fsnotify.Watcher) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
// Read from Errors.
|
|
||||||
case err, ok := <-w.Errors:
|
|
||||||
if !ok { // Channel was closed (i.e. Watcher.Close() was called).
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Errorf("watch event loop: %s", err)
|
|
||||||
// Read from Events.
|
|
||||||
case e, ok := <-w.Events:
|
|
||||||
if !ok { // Channel was closed (i.e. Watcher.Close() was called).
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("Event %s", e)
|
|
||||||
for _, l := range listeners {
|
|
||||||
if l.EventMatch(e.String()) {
|
|
||||||
l.EventCallback()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,60 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/exp/constraints"
|
|
||||||
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Min[T constraints.Ordered](a, b T) T {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func Max[T constraints.Ordered](a, b T) T {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func sortedCopy(input []float64) []float64 {
|
|
||||||
sorted := make([]float64, len(input))
|
|
||||||
copy(sorted, input)
|
|
||||||
sort.Float64s(sorted)
|
|
||||||
return sorted
|
|
||||||
}
|
|
||||||
|
|
||||||
func Mean(input []float64) (float64, error) {
|
|
||||||
if len(input) == 0 {
|
|
||||||
return math.NaN(), fmt.Errorf("input array is empty: %#v", input)
|
|
||||||
}
|
|
||||||
sum := 0.0
|
|
||||||
for _, n := range input {
|
|
||||||
sum += n
|
|
||||||
}
|
|
||||||
return sum / float64(len(input)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Median(input []float64) (median float64, err error) {
|
|
||||||
c := sortedCopy(input)
|
|
||||||
// Even numbers: add the two middle numbers, divide by two (use mean function)
|
|
||||||
// Odd numbers: Use the middle number
|
|
||||||
l := len(c)
|
|
||||||
if l == 0 {
|
|
||||||
return math.NaN(), fmt.Errorf("input array is empty: %#v", input)
|
|
||||||
} else if l%2 == 0 {
|
|
||||||
median, _ = Mean(c[l/2-1 : l/2+1])
|
|
||||||
} else {
|
|
||||||
median = c[l/2]
|
|
||||||
}
|
|
||||||
return median, nil
|
|
||||||
}
|
|
@ -1,75 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package util_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCheckFileExists(t *testing.T) {
|
|
||||||
tmpdir := t.TempDir()
|
|
||||||
if !util.CheckFileExists(tmpdir) {
|
|
||||||
t.Fatal("expected true, got false")
|
|
||||||
}
|
|
||||||
|
|
||||||
filePath := filepath.Join(tmpdir, "version.txt")
|
|
||||||
|
|
||||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !util.CheckFileExists(filePath) {
|
|
||||||
t.Fatal("expected true, got false")
|
|
||||||
}
|
|
||||||
|
|
||||||
filePath = filepath.Join(tmpdir, "version-test.txt")
|
|
||||||
if util.CheckFileExists(filePath) {
|
|
||||||
t.Fatal("expected false, got true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetFileSize(t *testing.T) {
|
|
||||||
tmpdir := t.TempDir()
|
|
||||||
filePath := filepath.Join(tmpdir, "data.json")
|
|
||||||
|
|
||||||
if s := util.GetFilesize(filePath); s > 0 {
|
|
||||||
t.Fatalf("expected 0, got %d", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if s := util.GetFilesize(filePath); s == 0 {
|
|
||||||
t.Fatal("expected not 0, got 0")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetFileCount(t *testing.T) {
|
|
||||||
tmpdir := t.TempDir()
|
|
||||||
|
|
||||||
if c := util.GetFilecount(tmpdir); c != 0 {
|
|
||||||
t.Fatalf("expected 0, got %d", c)
|
|
||||||
}
|
|
||||||
|
|
||||||
filePath := filepath.Join(tmpdir, "data-1.json")
|
|
||||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
filePath = filepath.Join(tmpdir, "data-2.json")
|
|
||||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if c := util.GetFilecount(tmpdir); c != 2 {
|
|
||||||
t.Fatalf("expected 2, got %d", c)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c := util.GetFilecount(filePath); c != 0 {
|
|
||||||
t.Fatalf("expected 0, got %d", c)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive
|
package archive
|
||||||
@ -10,9 +10,9 @@ import (
|
|||||||
"maps"
|
"maps"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
"github.com/ClusterCockpit/cc-lib/lrucache"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
const Version uint64 = 2
|
const Version uint64 = 2
|
||||||
@ -75,7 +75,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err = json.Unmarshal(rawConfig, &cfg); err != nil {
|
if err = json.Unmarshal(rawConfig, &cfg); err != nil {
|
||||||
log.Warn("Error while unmarshaling raw config json")
|
cclog.Warn("Error while unmarshaling raw config json")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,10 +91,10 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
|
|||||||
var version uint64
|
var version uint64
|
||||||
version, err = ar.Init(rawConfig)
|
version, err = ar.Init(rawConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while initializing archiveBackend: %s", err.Error())
|
cclog.Errorf("Error while initializing archiveBackend: %s", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Infof("Load archive version %d", version)
|
cclog.Infof("Load archive version %d", version)
|
||||||
|
|
||||||
err = initClusterConfig()
|
err = initClusterConfig()
|
||||||
})
|
})
|
||||||
@ -114,7 +114,7 @@ func LoadAveragesFromArchive(
|
|||||||
) error {
|
) error {
|
||||||
metaFile, err := ar.LoadJobMeta(job)
|
metaFile, err := ar.LoadJobMeta(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ func LoadStatsFromArchive(
|
|||||||
data := make(map[string]schema.MetricStatistics, len(metrics))
|
data := make(map[string]schema.MetricStatistics, len(metrics))
|
||||||
metaFile, err := ar.LoadJobMeta(job)
|
metaFile, err := ar.LoadJobMeta(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,7 +166,7 @@ func LoadScopedStatsFromArchive(
|
|||||||
) (schema.ScopedJobStats, error) {
|
) (schema.ScopedJobStats, error) {
|
||||||
data, err := ar.LoadJobStats(job)
|
data, err := ar.LoadJobStats(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())
|
cclog.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ func LoadScopedStatsFromArchive(
|
|||||||
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
|
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
|
||||||
metaFile, err := ar.LoadJobMeta(job)
|
metaFile, err := ar.LoadJobMeta(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +195,7 @@ func UpdateMetadata(job *schema.Job, metadata map[string]string) error {
|
|||||||
|
|
||||||
jobMeta, err := ar.LoadJobMeta(job)
|
jobMeta, err := ar.LoadJobMeta(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,7 +216,7 @@ func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
|
|||||||
|
|
||||||
jobMeta, err := ar.LoadJobMeta(job)
|
jobMeta, err := ar.LoadJobMeta(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive_test
|
package archive_test
|
||||||
@ -10,9 +10,9 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
var jobs []*schema.Job
|
var jobs []*schema.Job
|
||||||
@ -41,18 +41,18 @@ func setup(t *testing.T) archive.ArchiveBackend {
|
|||||||
return archive.GetHandle()
|
return archive.GetHandle()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCleanUp(t *testing.T) {
|
// func TestCleanUp(t *testing.T) {
|
||||||
a := setup(t)
|
// a := setup(t)
|
||||||
if !a.Exists(jobs[0]) {
|
// if !a.Exists(jobs[0]) {
|
||||||
t.Error("Job does not exist")
|
// t.Error("Job does not exist")
|
||||||
}
|
// }
|
||||||
|
|
||||||
a.CleanUp(jobs)
|
// a.CleanUp(jobs)
|
||||||
|
|
||||||
if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
|
// if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
|
||||||
t.Error("Jobs still exist")
|
// t.Error("Jobs still exist")
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
// func TestCompress(t *testing.T) {
|
// func TestCompress(t *testing.T) {
|
||||||
// a := setup(t)
|
// a := setup(t)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive
|
package archive
|
||||||
@ -8,8 +8,8 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -27,7 +27,7 @@ func initClusterConfig() error {
|
|||||||
|
|
||||||
cluster, err := ar.LoadClusterCfg(c)
|
cluster, err := ar.LoadClusterCfg(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error while loading cluster config for cluster '%v'", c)
|
cclog.Warnf("Error while loading cluster config for cluster '%v'", c)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive_test
|
package archive_test
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive
|
package archive
|
||||||
@ -21,9 +21,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
"github.com/santhosh-tekuri/jsonschema/v5"
|
"github.com/santhosh-tekuri/jsonschema/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ func getPath(
|
|||||||
func loadJobMeta(filename string) (*schema.Job, error) {
|
func loadJobMeta(filename string) (*schema.Job, error) {
|
||||||
b, err := os.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("loadJobMeta() > open file error: %v", err)
|
cclog.Errorf("loadJobMeta() > open file error: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if config.Keys.Validate {
|
if config.Keys.Validate {
|
||||||
@ -83,7 +83,7 @@ func loadJobMeta(filename string) (*schema.Job, error) {
|
|||||||
func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
|
func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
|
||||||
f, err := os.Open(filename)
|
f, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("fsBackend LoadJobData()- %v", err)
|
cclog.Errorf("fsBackend LoadJobData()- %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
@ -91,7 +91,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
|
|||||||
if isCompressed {
|
if isCompressed {
|
||||||
r, err := gzip.NewReader(f)
|
r, err := gzip.NewReader(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf(" %v", err)
|
cclog.Errorf(" %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
@ -116,7 +116,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
|
|||||||
func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) {
|
func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) {
|
||||||
f, err := os.Open(filename)
|
f, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("fsBackend LoadJobStats()- %v", err)
|
cclog.Errorf("fsBackend LoadJobStats()- %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
@ -124,7 +124,7 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er
|
|||||||
if isCompressed {
|
if isCompressed {
|
||||||
r, err := gzip.NewReader(f)
|
r, err := gzip.NewReader(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf(" %v", err)
|
cclog.Errorf(" %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
@ -149,25 +149,25 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er
|
|||||||
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
|
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
|
||||||
var config FsArchiveConfig
|
var config FsArchiveConfig
|
||||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||||
log.Warnf("Init() > Unmarshal error: %#v", err)
|
cclog.Warnf("Init() > Unmarshal error: %#v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if config.Path == "" {
|
if config.Path == "" {
|
||||||
err := fmt.Errorf("Init() : empty config.Path")
|
err := fmt.Errorf("Init() : empty config.Path")
|
||||||
log.Errorf("Init() > config.Path error: %v", err)
|
cclog.Errorf("Init() > config.Path error: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
fsa.path = config.Path
|
fsa.path = config.Path
|
||||||
|
|
||||||
b, err := os.ReadFile(filepath.Join(fsa.path, "version.txt"))
|
b, err := os.ReadFile(filepath.Join(fsa.path, "version.txt"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("fsBackend Init() - %v", err)
|
cclog.Warnf("fsBackend Init() - %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
version, err := strconv.ParseUint(strings.TrimSuffix(string(b), "\n"), 10, 64)
|
version, err := strconv.ParseUint(strings.TrimSuffix(string(b), "\n"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("fsBackend Init()- %v", err)
|
cclog.Errorf("fsBackend Init()- %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +177,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
|
|||||||
|
|
||||||
entries, err := os.ReadDir(fsa.path)
|
entries, err := os.ReadDir(fsa.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Init() > ReadDir() error: %v", err)
|
cclog.Errorf("Init() > ReadDir() error: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +195,7 @@ func (fsa *FsArchive) Info() {
|
|||||||
fmt.Printf("Job archive %s\n", fsa.path)
|
fmt.Printf("Job archive %s\n", fsa.path)
|
||||||
clusters, err := os.ReadDir(fsa.path)
|
clusters, err := os.ReadDir(fsa.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading clusters failed: %s", err.Error())
|
cclog.Fatalf("Reading clusters failed: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := make(map[string]*clusterInfo)
|
ci := make(map[string]*clusterInfo)
|
||||||
@ -209,7 +209,7 @@ func (fsa *FsArchive) Info() {
|
|||||||
ci[cc] = &clusterInfo{dateFirst: time.Now().Unix()}
|
ci[cc] = &clusterInfo{dateFirst: time.Now().Unix()}
|
||||||
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
|
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl1Dir := range lvl1Dirs {
|
for _, lvl1Dir := range lvl1Dirs {
|
||||||
@ -218,14 +218,14 @@ func (fsa *FsArchive) Info() {
|
|||||||
}
|
}
|
||||||
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
|
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl2Dir := range lvl2Dirs {
|
for _, lvl2Dir := range lvl2Dirs {
|
||||||
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
||||||
startTimeDirs, err := os.ReadDir(dirpath)
|
startTimeDirs, err := os.ReadDir(dirpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, startTimeDir := range startTimeDirs {
|
for _, startTimeDir := range startTimeDirs {
|
||||||
@ -233,7 +233,7 @@ func (fsa *FsArchive) Info() {
|
|||||||
ci[cc].numJobs++
|
ci[cc].numJobs++
|
||||||
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
|
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Cannot parse starttime: %s", err.Error())
|
cclog.Fatalf("Cannot parse starttime: %s", err.Error())
|
||||||
}
|
}
|
||||||
ci[cc].dateFirst = util.Min(ci[cc].dateFirst, startTime)
|
ci[cc].dateFirst = util.Min(ci[cc].dateFirst, startTime)
|
||||||
ci[cc].dateLast = util.Max(ci[cc].dateLast, startTime)
|
ci[cc].dateLast = util.Max(ci[cc].dateLast, startTime)
|
||||||
@ -278,7 +278,7 @@ func (fsa *FsArchive) Clean(before int64, after int64) {
|
|||||||
|
|
||||||
clusters, err := os.ReadDir(fsa.path)
|
clusters, err := os.ReadDir(fsa.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading clusters failed: %s", err.Error())
|
cclog.Fatalf("Reading clusters failed: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cluster := range clusters {
|
for _, cluster := range clusters {
|
||||||
@ -288,7 +288,7 @@ func (fsa *FsArchive) Clean(before int64, after int64) {
|
|||||||
|
|
||||||
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
|
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl1Dir := range lvl1Dirs {
|
for _, lvl1Dir := range lvl1Dirs {
|
||||||
@ -297,33 +297,33 @@ func (fsa *FsArchive) Clean(before int64, after int64) {
|
|||||||
}
|
}
|
||||||
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
|
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl2Dir := range lvl2Dirs {
|
for _, lvl2Dir := range lvl2Dirs {
|
||||||
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
||||||
startTimeDirs, err := os.ReadDir(dirpath)
|
startTimeDirs, err := os.ReadDir(dirpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, startTimeDir := range startTimeDirs {
|
for _, startTimeDir := range startTimeDirs {
|
||||||
if startTimeDir.IsDir() {
|
if startTimeDir.IsDir() {
|
||||||
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
|
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Cannot parse starttime: %s", err.Error())
|
cclog.Fatalf("Cannot parse starttime: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if startTime < before || startTime > after {
|
if startTime < before || startTime > after {
|
||||||
if err := os.RemoveAll(filepath.Join(dirpath, startTimeDir.Name())); err != nil {
|
if err := os.RemoveAll(filepath.Join(dirpath, startTimeDir.Name())); err != nil {
|
||||||
log.Errorf("JobArchive Cleanup() error: %v", err)
|
cclog.Errorf("JobArchive Cleanup() error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if util.GetFilecount(dirpath) == 0 {
|
if util.GetFilecount(dirpath) == 0 {
|
||||||
if err := os.Remove(dirpath); err != nil {
|
if err := os.Remove(dirpath); err != nil {
|
||||||
log.Errorf("JobArchive Clean() error: %v", err)
|
cclog.Errorf("JobArchive Clean() error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -337,16 +337,16 @@ func (fsa *FsArchive) Move(jobs []*schema.Job, path string) {
|
|||||||
target := getDirectory(job, path)
|
target := getDirectory(job, path)
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Clean(filepath.Join(target, "..")), 0777); err != nil {
|
if err := os.MkdirAll(filepath.Clean(filepath.Join(target, "..")), 0777); err != nil {
|
||||||
log.Errorf("JobArchive Move MkDir error: %v", err)
|
cclog.Errorf("JobArchive Move MkDir error: %v", err)
|
||||||
}
|
}
|
||||||
if err := os.Rename(source, target); err != nil {
|
if err := os.Rename(source, target); err != nil {
|
||||||
log.Errorf("JobArchive Move() error: %v", err)
|
cclog.Errorf("JobArchive Move() error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parent := filepath.Clean(filepath.Join(source, ".."))
|
parent := filepath.Clean(filepath.Join(source, ".."))
|
||||||
if util.GetFilecount(parent) == 0 {
|
if util.GetFilecount(parent) == 0 {
|
||||||
if err := os.Remove(parent); err != nil {
|
if err := os.Remove(parent); err != nil {
|
||||||
log.Errorf("JobArchive Move() error: %v", err)
|
cclog.Errorf("JobArchive Move() error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -355,20 +355,24 @@ func (fsa *FsArchive) Move(jobs []*schema.Job, path string) {
|
|||||||
func (fsa *FsArchive) CleanUp(jobs []*schema.Job) {
|
func (fsa *FsArchive) CleanUp(jobs []*schema.Job) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
for _, job := range jobs {
|
for _, job := range jobs {
|
||||||
|
if job == nil {
|
||||||
|
cclog.Errorf("JobArchive Cleanup() error: job is nil")
|
||||||
|
continue
|
||||||
|
}
|
||||||
dir := getDirectory(job, fsa.path)
|
dir := getDirectory(job, fsa.path)
|
||||||
if err := os.RemoveAll(dir); err != nil {
|
if err := os.RemoveAll(dir); err != nil {
|
||||||
log.Errorf("JobArchive Cleanup() error: %v", err)
|
cclog.Errorf("JobArchive Cleanup() error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parent := filepath.Clean(filepath.Join(dir, ".."))
|
parent := filepath.Clean(filepath.Join(dir, ".."))
|
||||||
if util.GetFilecount(parent) == 0 {
|
if util.GetFilecount(parent) == 0 {
|
||||||
if err := os.Remove(parent); err != nil {
|
if err := os.Remove(parent); err != nil {
|
||||||
log.Errorf("JobArchive Cleanup() error: %v", err)
|
cclog.Errorf("JobArchive Cleanup() error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start))
|
cclog.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsa *FsArchive) Compress(jobs []*schema.Job) {
|
func (fsa *FsArchive) Compress(jobs []*schema.Job) {
|
||||||
@ -383,24 +387,24 @@ func (fsa *FsArchive) Compress(jobs []*schema.Job) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Compression Service - %d files took %s", cnt, time.Since(start))
|
cclog.Infof("Compression Service - %d files took %s", cnt, time.Since(start))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsa *FsArchive) CompressLast(starttime int64) int64 {
|
func (fsa *FsArchive) CompressLast(starttime int64) int64 {
|
||||||
filename := filepath.Join(fsa.path, "compress.txt")
|
filename := filepath.Join(fsa.path, "compress.txt")
|
||||||
b, err := os.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("fsBackend Compress - %v", err)
|
cclog.Errorf("fsBackend Compress - %v", err)
|
||||||
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
|
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
|
||||||
return starttime
|
return starttime
|
||||||
}
|
}
|
||||||
last, err := strconv.ParseInt(strings.TrimSuffix(string(b), "\n"), 10, 64)
|
last, err := strconv.ParseInt(strings.TrimSuffix(string(b), "\n"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("fsBackend Compress - %v", err)
|
cclog.Errorf("fsBackend Compress - %v", err)
|
||||||
return starttime
|
return starttime
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("fsBackend Compress - start %d last %d", starttime, last)
|
cclog.Infof("fsBackend Compress - start %d last %d", starttime, last)
|
||||||
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
|
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
|
||||||
return last
|
return last
|
||||||
}
|
}
|
||||||
@ -437,10 +441,10 @@ func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.Job, error) {
|
|||||||
func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
|
func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
|
||||||
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
|
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("LoadClusterCfg() > open file error: %v", err)
|
cclog.Errorf("LoadClusterCfg() > open file error: %v", err)
|
||||||
// if config.Keys.Validate {
|
// if config.Keys.Validate {
|
||||||
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
|
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
|
||||||
log.Warnf("Validate cluster config: %v\n", err)
|
cclog.Warnf("Validate cluster config: %v\n", err)
|
||||||
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
|
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -453,7 +457,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
go func() {
|
go func() {
|
||||||
clustersDir, err := os.ReadDir(fsa.path)
|
clustersDir, err := os.ReadDir(fsa.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
|
cclog.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, clusterDir := range clustersDir {
|
for _, clusterDir := range clustersDir {
|
||||||
@ -462,7 +466,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
}
|
}
|
||||||
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
|
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl1Dir := range lvl1Dirs {
|
for _, lvl1Dir := range lvl1Dirs {
|
||||||
@ -473,21 +477,21 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
|
|
||||||
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
|
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl2Dir := range lvl2Dirs {
|
for _, lvl2Dir := range lvl2Dirs {
|
||||||
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
||||||
startTimeDirs, err := os.ReadDir(dirpath)
|
startTimeDirs, err := os.ReadDir(dirpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, startTimeDir := range startTimeDirs {
|
for _, startTimeDir := range startTimeDirs {
|
||||||
if startTimeDir.IsDir() {
|
if startTimeDir.IsDir() {
|
||||||
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
|
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
|
||||||
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
||||||
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if loadMetricData {
|
if loadMetricData {
|
||||||
@ -501,10 +505,10 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
|
|
||||||
data, err := loadJobData(filename, isCompressed)
|
data, err := loadJobData(filename, isCompressed)
|
||||||
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
||||||
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||||
}
|
}
|
||||||
ch <- JobContainer{Meta: job, Data: &data}
|
ch <- JobContainer{Meta: job, Data: &data}
|
||||||
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||||
} else {
|
} else {
|
||||||
ch <- JobContainer{Meta: job, Data: nil}
|
ch <- JobContainer{Meta: job, Data: nil}
|
||||||
}
|
}
|
||||||
@ -521,15 +525,15 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
func (fsa *FsArchive) StoreJobMeta(job *schema.Job) error {
|
func (fsa *FsArchive) StoreJobMeta(job *schema.Job) error {
|
||||||
f, err := os.Create(getPath(job, fsa.path, "meta.json"))
|
f, err := os.Create(getPath(job, fsa.path, "meta.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while creating filepath for meta.json")
|
cclog.Error("Error while creating filepath for meta.json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := EncodeJobMeta(f, job); err != nil {
|
if err := EncodeJobMeta(f, job); err != nil {
|
||||||
log.Error("Error while encoding job metadata to meta.json file")
|
cclog.Error("Error while encoding job metadata to meta.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
log.Warn("Error while closing meta.json file")
|
cclog.Warn("Error while closing meta.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -546,35 +550,35 @@ func (fsa *FsArchive) ImportJob(
|
|||||||
) error {
|
) error {
|
||||||
dir := getPath(jobMeta, fsa.path, "")
|
dir := getPath(jobMeta, fsa.path, "")
|
||||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||||
log.Error("Error while creating job archive path")
|
cclog.Error("Error while creating job archive path")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.Create(path.Join(dir, "meta.json"))
|
f, err := os.Create(path.Join(dir, "meta.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while creating filepath for meta.json")
|
cclog.Error("Error while creating filepath for meta.json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
||||||
log.Error("Error while encoding job metadata to meta.json file")
|
cclog.Error("Error while encoding job metadata to meta.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
log.Warn("Error while closing meta.json file")
|
cclog.Warn("Error while closing meta.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err = os.Create(path.Join(dir, "data.json"))
|
f, err = os.Create(path.Join(dir, "data.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error while creating filepath for data.json")
|
cclog.Error("Error while creating filepath for data.json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := EncodeJobData(f, jobData); err != nil {
|
if err := EncodeJobData(f, jobData); err != nil {
|
||||||
log.Error("Error while encoding job metricdata to data.json file")
|
cclog.Error("Error while encoding job metricdata to data.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
log.Warn("Error while closing data.json file")
|
cclog.Warn("Error while closing data.json file")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive
|
package archive
|
||||||
@ -10,8 +10,8 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInitEmptyPath(t *testing.T) {
|
func TestInitEmptyPath(t *testing.T) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive
|
package archive
|
||||||
@ -9,15 +9,15 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-lib/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
||||||
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
|
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
|
||||||
var d schema.JobData
|
var d schema.JobData
|
||||||
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
||||||
log.Warn("Error while decoding raw job data json")
|
cclog.Warn("Error while decoding raw job data json")
|
||||||
return err, 0, 1000
|
return err, 0, 1000
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -25,7 +25,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err, ok := data.(error); ok {
|
if err, ok := data.(error); ok {
|
||||||
log.Warn("Error in decoded job data set")
|
cclog.Warn("Error in decoded job data set")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) {
|
|||||||
func DecodeJobMeta(r io.Reader) (*schema.Job, error) {
|
func DecodeJobMeta(r io.Reader) (*schema.Job, error) {
|
||||||
var d schema.Job
|
var d schema.Job
|
||||||
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
||||||
log.Warn("Error while decoding raw job meta json")
|
cclog.Warn("Error while decoding raw job meta json")
|
||||||
return &d, err
|
return &d, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,7 +84,7 @@ func DecodeJobMeta(r io.Reader) (*schema.Job, error) {
|
|||||||
func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
|
func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
|
||||||
var c schema.Cluster
|
var c schema.Cluster
|
||||||
if err := json.NewDecoder(r).Decode(&c); err != nil {
|
if err := json.NewDecoder(r).Decode(&c); err != nil {
|
||||||
log.Warn("Error while decoding raw cluster json")
|
cclog.Warn("Error while decoding raw cluster json")
|
||||||
return &c, err
|
return &c, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,7 +96,7 @@ func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
|
|||||||
func EncodeJobData(w io.Writer, d *schema.JobData) error {
|
func EncodeJobData(w io.Writer, d *schema.JobData) error {
|
||||||
// Sanitize parameters
|
// Sanitize parameters
|
||||||
if err := json.NewEncoder(w).Encode(d); err != nil {
|
if err := json.NewEncoder(w).Encode(d); err != nil {
|
||||||
log.Warn("Error while encoding new job data json")
|
cclog.Warn("Error while encoding new job data json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,7 +106,7 @@ func EncodeJobData(w io.Writer, d *schema.JobData) error {
|
|||||||
func EncodeJobMeta(w io.Writer, d *schema.Job) error {
|
func EncodeJobMeta(w io.Writer, d *schema.Job) error {
|
||||||
// Sanitize parameters
|
// Sanitize parameters
|
||||||
if err := json.NewEncoder(w).Encode(d); err != nil {
|
if err := json.NewEncoder(w).Encode(d); err != nil {
|
||||||
log.Warn("Error while encoding new job meta json")
|
cclog.Warn("Error while encoding new job meta json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive
|
package archive
|
||||||
@ -9,7 +9,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeList [][]interface {
|
type NodeList [][]interface {
|
||||||
@ -51,7 +51,7 @@ func (nl *NodeList) PrintList() []string {
|
|||||||
if inner["zeroPadded"] == 1 {
|
if inner["zeroPadded"] == 1 {
|
||||||
out = append(out, fmt.Sprintf("%s%0*d", prefix, inner["digits"], i))
|
out = append(out, fmt.Sprintf("%s%0*d", prefix, inner["digits"], i))
|
||||||
} else {
|
} else {
|
||||||
log.Error("node list: only zero-padded ranges are allowed")
|
cclog.Error("node list: only zero-padded ranges are allowed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -129,7 +129,7 @@ type NLExprIntRange struct {
|
|||||||
|
|
||||||
func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
|
func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
|
||||||
if !nle.zeroPadded || nle.digits < 1 {
|
if !nle.zeroPadded || nle.digits < 1 {
|
||||||
log.Error("only zero-padded ranges are allowed")
|
cclog.Error("only zero-padded ranges are allowed")
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive
|
package archive
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package archive
|
package archive
|
||||||
|
220
pkg/log/log.go
220
pkg/log/log.go
@ -1,220 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package log
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Provides a simple way of logging with different levels.
|
|
||||||
// Time/Date are not logged because systemd adds
|
|
||||||
// them for us (Default, can be changed by flag '--logdate true').
|
|
||||||
//
|
|
||||||
// Uses these prefixes: https://www.freedesktop.org/software/systemd/man/sd-daemon.html
|
|
||||||
|
|
||||||
var (
|
|
||||||
DebugWriter io.Writer = os.Stderr
|
|
||||||
InfoWriter io.Writer = os.Stderr
|
|
||||||
WarnWriter io.Writer = os.Stderr
|
|
||||||
ErrWriter io.Writer = os.Stderr
|
|
||||||
CritWriter io.Writer = os.Stderr
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
DebugPrefix string = "<7>[DEBUG] "
|
|
||||||
InfoPrefix string = "<6>[INFO] "
|
|
||||||
WarnPrefix string = "<4>[WARNING] "
|
|
||||||
ErrPrefix string = "<3>[ERROR] "
|
|
||||||
CritPrefix string = "<2>[CRITICAL] "
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
|
|
||||||
InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
|
|
||||||
WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
|
|
||||||
ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
|
|
||||||
CritLog *log.Logger = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
|
|
||||||
)
|
|
||||||
|
|
||||||
var loglevel string = "info"
|
|
||||||
|
|
||||||
/* CONFIG */
|
|
||||||
|
|
||||||
func Init(lvl string, logdate bool) {
|
|
||||||
// Discard I/O for all writers below selected loglevel; <CRITICAL> is always written.
|
|
||||||
switch lvl {
|
|
||||||
case "crit":
|
|
||||||
ErrWriter = io.Discard
|
|
||||||
fallthrough
|
|
||||||
case "err":
|
|
||||||
WarnWriter = io.Discard
|
|
||||||
fallthrough
|
|
||||||
case "warn":
|
|
||||||
InfoWriter = io.Discard
|
|
||||||
fallthrough
|
|
||||||
case "info":
|
|
||||||
DebugWriter = io.Discard
|
|
||||||
case "debug":
|
|
||||||
// Nothing to do...
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel '%s'\n", lvl, loglevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !logdate {
|
|
||||||
DebugLog = log.New(DebugWriter, DebugPrefix, 0)
|
|
||||||
InfoLog = log.New(InfoWriter, InfoPrefix, log.Lshortfile)
|
|
||||||
WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
|
|
||||||
ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
|
|
||||||
CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
|
|
||||||
} else {
|
|
||||||
DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
|
|
||||||
InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
|
|
||||||
WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
|
|
||||||
ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
|
|
||||||
CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
loglevel = lvl
|
|
||||||
}
|
|
||||||
|
|
||||||
/* HELPER */
|
|
||||||
|
|
||||||
func Loglevel() string {
|
|
||||||
return loglevel
|
|
||||||
}
|
|
||||||
|
|
||||||
/* PRIVATE HELPER */
|
|
||||||
|
|
||||||
// Return unformatted string
|
|
||||||
func printStr(v ...interface{}) string {
|
|
||||||
return fmt.Sprint(v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return formatted string
|
|
||||||
func printfStr(format string, v ...interface{}) string {
|
|
||||||
return fmt.Sprintf(format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* PRINT */
|
|
||||||
|
|
||||||
// Prints to STDOUT without string formatting; application continues.
|
|
||||||
// Used for special cases not requiring log information like date or location.
|
|
||||||
func Print(v ...interface{}) {
|
|
||||||
fmt.Fprintln(os.Stdout, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to STDOUT without string formatting; application exits with error code 0.
|
|
||||||
// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
|
|
||||||
func Exit(v ...interface{}) {
|
|
||||||
fmt.Fprintln(os.Stdout, v...)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to STDOUT without string formatting; application exits with error code 1.
|
|
||||||
// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
|
|
||||||
func Abort(v ...interface{}) {
|
|
||||||
fmt.Fprintln(os.Stdout, v...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to DEBUG writer without string formatting; application continues.
|
|
||||||
// Used for logging additional information, primarily for development.
|
|
||||||
func Debug(v ...interface{}) {
|
|
||||||
DebugLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to INFO writer without string formatting; application continues.
|
|
||||||
// Used for logging additional information, e.g. notable returns or common fail-cases.
|
|
||||||
func Info(v ...interface{}) {
|
|
||||||
InfoLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to WARNING writer without string formatting; application continues.
|
|
||||||
// Used for logging important information, e.g. uncommon edge-cases or administration related information.
|
|
||||||
func Warn(v ...interface{}) {
|
|
||||||
WarnLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to ERROR writer without string formatting; application continues.
|
|
||||||
// Used for logging errors, but code still can return default(s) or nil.
|
|
||||||
func Error(v ...interface{}) {
|
|
||||||
ErrLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to CRITICAL writer without string formatting; application exits with error code 1.
|
|
||||||
// Used for terminating on unexpected errors with date and code location.
|
|
||||||
func Fatal(v ...interface{}) {
|
|
||||||
CritLog.Output(2, printStr(v...))
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to PANIC function without string formatting; application exits with panic.
|
|
||||||
// Used for terminating on unexpected errors with stacktrace.
|
|
||||||
func Panic(v ...interface{}) {
|
|
||||||
panic(printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* PRINT FORMAT*/
|
|
||||||
|
|
||||||
// Prints to STDOUT with string formatting; application continues.
|
|
||||||
// Used for special cases not requiring log information like date or location.
|
|
||||||
func Printf(format string, v ...interface{}) {
|
|
||||||
fmt.Fprintf(os.Stdout, format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to STDOUT with string formatting; application exits with error code 0.
|
|
||||||
// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
|
|
||||||
func Exitf(format string, v ...interface{}) {
|
|
||||||
fmt.Fprintf(os.Stdout, format, v...)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to STDOUT with string formatting; application exits with error code 1.
|
|
||||||
// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
|
|
||||||
func Abortf(format string, v ...interface{}) {
|
|
||||||
fmt.Fprintf(os.Stdout, format, v...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to DEBUG writer with string formatting; application continues.
|
|
||||||
// Used for logging additional information, primarily for development.
|
|
||||||
func Debugf(format string, v ...interface{}) {
|
|
||||||
DebugLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to INFO writer with string formatting; application continues.
|
|
||||||
// Used for logging additional information, e.g. notable returns or common fail-cases.
|
|
||||||
func Infof(format string, v ...interface{}) {
|
|
||||||
InfoLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to WARNING writer with string formatting; application continues.
|
|
||||||
// Used for logging important information, e.g. uncommon edge-cases or administration related information.
|
|
||||||
func Warnf(format string, v ...interface{}) {
|
|
||||||
WarnLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to ERROR writer with string formatting; application continues.
|
|
||||||
// Used for logging errors, but code still can return default(s) or nil.
|
|
||||||
func Errorf(format string, v ...interface{}) {
|
|
||||||
ErrLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to CRITICAL writer with string formatting; application exits with error code 1.
|
|
||||||
// Used for terminating on unexpected errors with date and code location.
|
|
||||||
func Fatalf(format string, v ...interface{}) {
|
|
||||||
CritLog.Output(2, printfStr(format, v...))
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints to PANIC function with string formatting; application exits with panic.
|
|
||||||
// Used for terminating on unexpected errors with stacktrace.
|
|
||||||
func Panicf(format string, v ...interface{}) {
|
|
||||||
panic(printfStr(format, v...))
|
|
||||||
}
|
|
@ -1,124 +0,0 @@
|
|||||||
# In-Memory LRU Cache for Golang Applications
|
|
||||||
|
|
||||||
This library can be embedded into your existing go applications
|
|
||||||
and play the role *Memcached* or *Redis* might play for others.
|
|
||||||
It is inspired by [PHP Symfony's Cache Components](https://symfony.com/doc/current/components/cache/adapters/array_cache_adapter.html),
|
|
||||||
having a similar API. This library can not be used for persistance,
|
|
||||||
is not properly tested yet and a bit special in a few ways described
|
|
||||||
below (Especially with regards to the memory usage/`size`).
|
|
||||||
|
|
||||||
In addition to the interface described below, a `http.Handler` that can be used as middleware is provided as well.
|
|
||||||
|
|
||||||
- Advantages:
|
|
||||||
- Anything (`interface{}`) can be stored as value
|
|
||||||
- As it lives in the application itself, no serialization or de-serialization is needed
|
|
||||||
- As it lives in the application itself, no memory moving/networking is needed
|
|
||||||
- The computation of a new value for a key does __not__ block the full cache (only the key)
|
|
||||||
- Disadvantages:
|
|
||||||
- You have to provide a size estimate for every value
|
|
||||||
- __This size estimate should not change (i.e. values should not mutate)__
|
|
||||||
- The cache can only be accessed by one application
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Go look at the godocs and ./cache_test.go for more documentation and examples
|
|
||||||
|
|
||||||
maxMemory := 1000
|
|
||||||
cache := lrucache.New(maxMemory)
|
|
||||||
|
|
||||||
bar = cache.Get("foo", func () (value interface{}, ttl time.Duration, size int) {
|
|
||||||
return "bar", 10 * time.Second, len("bar")
|
|
||||||
}).(string)
|
|
||||||
|
|
||||||
// bar == "bar"
|
|
||||||
|
|
||||||
bar = cache.Get("foo", func () (value interface{}, ttl time.Duration, size int) {
|
|
||||||
panic("will not be called")
|
|
||||||
}).(string)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Why does `cache.Get` take a function as argument?
|
|
||||||
|
|
||||||
*Using the mechanism described below is optional, the second argument to `Get` can be `nil` and there is a `Put` function as well.*
|
|
||||||
|
|
||||||
Because this library is meant to be used by multi threaded applications and the following would
|
|
||||||
result in the same data being fetched twice if both goroutines run in parallel:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// This code shows what could happen with other cache libraries
|
|
||||||
c := lrucache.New(MAX_CACHE_ENTRIES)
|
|
||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
go func(){
|
|
||||||
// This code will run twice in different goroutines,
|
|
||||||
// it could overlap. As `fetchData` probably does some
|
|
||||||
// I/O and takes a long time, the probability of both
|
|
||||||
// goroutines calling `fetchData` is very high!
|
|
||||||
url := "http://example.com/foo"
|
|
||||||
contents := c.Get(url)
|
|
||||||
if contents == nil {
|
|
||||||
contents = fetchData(url)
|
|
||||||
c.Set(url, contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
handleData(contents.([]byte))
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Here, if one wanted to make sure that only one of both goroutines fetches the data,
|
|
||||||
the programmer would need to build his own synchronization. That would suck!
|
|
||||||
|
|
||||||
```go
|
|
||||||
c := lrucache.New(MAX_CACHE_SIZE)
|
|
||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
go func(){
|
|
||||||
url := "http://example.com/foo"
|
|
||||||
contents := c.Get(url, func()(interface{}, time.Time, int) {
|
|
||||||
// This closure will only be called once!
|
|
||||||
// If another goroutine calls `c.Get` while this closure
|
|
||||||
// is still being executed, it will wait.
|
|
||||||
buf := fetchData(url)
|
|
||||||
return buf, 100 * time.Second, len(buf)
|
|
||||||
})
|
|
||||||
|
|
||||||
handleData(contents.([]byte))
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
This is much better as less resources are wasted and synchronization is handled by
|
|
||||||
the library. If it gets called, the call to the closure happens synchronously. While
|
|
||||||
it is being executed, all other cache keys can still be accessed without having to wait
|
|
||||||
for the execution to be done.
|
|
||||||
|
|
||||||
## How `Get` works
|
|
||||||
|
|
||||||
The closure passed to `Get` will be called if the value asked for is not cached or
|
|
||||||
expired. It should return the following values:
|
|
||||||
|
|
||||||
- The value corresponding to that key and to be stored in the cache
|
|
||||||
- The time to live for that value (how long until it expires and needs to be recomputed)
|
|
||||||
- A size estimate
|
|
||||||
|
|
||||||
When `maxMemory` is reached, cache entries need to be evicted. Theoretically,
|
|
||||||
it would be possible to use reflection on every value placed in the cache
|
|
||||||
to get its exact size in bytes. This would be very expansive and slow though.
|
|
||||||
Also, size can change. Instead of this library calculating the size in bytes, you, the user,
|
|
||||||
have to provide a size for every value in whatever unit you like (as long as it is the same unit everywhere).
|
|
||||||
|
|
||||||
Suggestions on what to use as size: `len(str)` for strings, `len(slice) * size_of_slice_type`, etc.. It is possible
|
|
||||||
to use `1` as size for every entry, in that case at most `maxMemory` entries will be in the cache at the same time.
|
|
||||||
|
|
||||||
## Affects on GC
|
|
||||||
|
|
||||||
Because of the way a garbage collector decides when to run ([explained in the
|
|
||||||
runtime package](https://pkg.go.dev/runtime)), having large amounts of data
|
|
||||||
sitting in your cache might increase the memory consumption of your process by
|
|
||||||
two times the maximum size of the cache. You can decrease the *target
|
|
||||||
percentage* to reduce the effect, but then you might have negative performance
|
|
||||||
effects when your cache is not filled.
|
|
@ -1,292 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package lrucache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Type of the closure that must be passed to `Get` to
|
|
||||||
// compute the value in case it is not cached.
|
|
||||||
//
|
|
||||||
// returned values are the computed value to be stored in the cache,
|
|
||||||
// the duration until this value will expire and a size estimate.
|
|
||||||
type ComputeValue func() (value interface{}, ttl time.Duration, size int)
|
|
||||||
|
|
||||||
type cacheEntry struct {
|
|
||||||
key string
|
|
||||||
value interface{}
|
|
||||||
|
|
||||||
expiration time.Time
|
|
||||||
size int
|
|
||||||
waitingForComputation int
|
|
||||||
|
|
||||||
next, prev *cacheEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
type Cache struct {
|
|
||||||
mutex sync.Mutex
|
|
||||||
cond *sync.Cond
|
|
||||||
maxmemory, usedmemory int
|
|
||||||
entries map[string]*cacheEntry
|
|
||||||
head, tail *cacheEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a new instance of a LRU In-Memory Cache.
|
|
||||||
// Read [the README](./README.md) for more information
|
|
||||||
// on what is going on with `maxmemory`.
|
|
||||||
func New(maxmemory int) *Cache {
|
|
||||||
cache := &Cache{
|
|
||||||
maxmemory: maxmemory,
|
|
||||||
entries: map[string]*cacheEntry{},
|
|
||||||
}
|
|
||||||
cache.cond = sync.NewCond(&cache.mutex)
|
|
||||||
return cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the cached value for key `key` or call `computeValue` and
|
|
||||||
// store its return value in the cache. If called, the closure will be
|
|
||||||
// called synchronous and __shall not call methods on the same cache__
|
|
||||||
// or a deadlock might ocure. If `computeValue` is nil, the cache is checked
|
|
||||||
// and if no entry was found, nil is returned. If another goroutine is currently
|
|
||||||
// computing that value, the result is waited for.
|
|
||||||
func (c *Cache) Get(key string, computeValue ComputeValue) interface{} {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
c.mutex.Lock()
|
|
||||||
if entry, ok := c.entries[key]; ok {
|
|
||||||
// The expiration not being set is what shows us that
|
|
||||||
// the computation of that value is still ongoing.
|
|
||||||
for entry.expiration.IsZero() {
|
|
||||||
entry.waitingForComputation += 1
|
|
||||||
c.cond.Wait()
|
|
||||||
entry.waitingForComputation -= 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if now.After(entry.expiration) {
|
|
||||||
if !c.evictEntry(entry) {
|
|
||||||
if entry.expiration.IsZero() {
|
|
||||||
panic("LRUCACHE/CACHE > cache entry that shoud have been waited for could not be evicted.")
|
|
||||||
}
|
|
||||||
c.mutex.Unlock()
|
|
||||||
return entry.value
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if entry != c.head {
|
|
||||||
c.unlinkEntry(entry)
|
|
||||||
c.insertFront(entry)
|
|
||||||
}
|
|
||||||
c.mutex.Unlock()
|
|
||||||
return entry.value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if computeValue == nil {
|
|
||||||
c.mutex.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := &cacheEntry{
|
|
||||||
key: key,
|
|
||||||
waitingForComputation: 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
c.entries[key] = entry
|
|
||||||
|
|
||||||
hasPaniced := true
|
|
||||||
defer func() {
|
|
||||||
if hasPaniced {
|
|
||||||
c.mutex.Lock()
|
|
||||||
delete(c.entries, key)
|
|
||||||
entry.expiration = now
|
|
||||||
entry.waitingForComputation -= 1
|
|
||||||
}
|
|
||||||
c.mutex.Unlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
c.mutex.Unlock()
|
|
||||||
value, ttl, size := computeValue()
|
|
||||||
c.mutex.Lock()
|
|
||||||
hasPaniced = false
|
|
||||||
|
|
||||||
entry.value = value
|
|
||||||
entry.expiration = now.Add(ttl)
|
|
||||||
entry.size = size
|
|
||||||
entry.waitingForComputation -= 1
|
|
||||||
|
|
||||||
// Only broadcast if other goroutines are actually waiting
|
|
||||||
// for a result.
|
|
||||||
if entry.waitingForComputation > 0 {
|
|
||||||
// TODO: Have more than one condition variable so that there are
|
|
||||||
// less unnecessary wakeups.
|
|
||||||
c.cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
c.usedmemory += size
|
|
||||||
c.insertFront(entry)
|
|
||||||
|
|
||||||
// Evict only entries with a size of more than zero.
|
|
||||||
// This is the only loop in the implementation outside of the `Keys`
|
|
||||||
// method.
|
|
||||||
evictionCandidate := c.tail
|
|
||||||
for c.usedmemory > c.maxmemory && evictionCandidate != nil {
|
|
||||||
nextCandidate := evictionCandidate.prev
|
|
||||||
if (evictionCandidate.size > 0 || now.After(evictionCandidate.expiration)) &&
|
|
||||||
evictionCandidate.waitingForComputation == 0 {
|
|
||||||
c.evictEntry(evictionCandidate)
|
|
||||||
}
|
|
||||||
evictionCandidate = nextCandidate
|
|
||||||
}
|
|
||||||
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put a new value in the cache. If another goroutine is calling `Get` and
|
|
||||||
// computing the value, this function waits for the computation to be done
|
|
||||||
// before it overwrites the value.
|
|
||||||
func (c *Cache) Put(key string, value interface{}, size int, ttl time.Duration) {
|
|
||||||
now := time.Now()
|
|
||||||
c.mutex.Lock()
|
|
||||||
defer c.mutex.Unlock()
|
|
||||||
|
|
||||||
if entry, ok := c.entries[key]; ok {
|
|
||||||
for entry.expiration.IsZero() {
|
|
||||||
entry.waitingForComputation += 1
|
|
||||||
c.cond.Wait()
|
|
||||||
entry.waitingForComputation -= 1
|
|
||||||
}
|
|
||||||
|
|
||||||
c.usedmemory -= entry.size
|
|
||||||
entry.expiration = now.Add(ttl)
|
|
||||||
entry.size = size
|
|
||||||
entry.value = value
|
|
||||||
c.usedmemory += entry.size
|
|
||||||
|
|
||||||
c.unlinkEntry(entry)
|
|
||||||
c.insertFront(entry)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := &cacheEntry{
|
|
||||||
key: key,
|
|
||||||
value: value,
|
|
||||||
expiration: now.Add(ttl),
|
|
||||||
}
|
|
||||||
c.entries[key] = entry
|
|
||||||
c.insertFront(entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the value at key `key` from the cache.
|
|
||||||
// Return true if the key was in the cache and false
|
|
||||||
// otherwise. It is possible that true is returned even
|
|
||||||
// though the value already expired.
|
|
||||||
// It is possible that false is returned even though the value
|
|
||||||
// will show up in the cache if this function is called on a key
|
|
||||||
// while that key is beeing computed.
|
|
||||||
func (c *Cache) Del(key string) bool {
|
|
||||||
c.mutex.Lock()
|
|
||||||
defer c.mutex.Unlock()
|
|
||||||
|
|
||||||
if entry, ok := c.entries[key]; ok {
|
|
||||||
return c.evictEntry(entry)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call f for every entry in the cache. Some sanity checks
|
|
||||||
// and eviction of expired keys are done as well.
|
|
||||||
// The cache is fully locked for the complete duration of this call!
|
|
||||||
func (c *Cache) Keys(f func(key string, val interface{})) {
|
|
||||||
c.mutex.Lock()
|
|
||||||
defer c.mutex.Unlock()
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
size := 0
|
|
||||||
for key, e := range c.entries {
|
|
||||||
if key != e.key {
|
|
||||||
panic("LRUCACHE/CACHE > key mismatch")
|
|
||||||
}
|
|
||||||
|
|
||||||
if now.After(e.expiration) {
|
|
||||||
if c.evictEntry(e) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.prev != nil {
|
|
||||||
if e.prev.next != e {
|
|
||||||
panic("LRUCACHE/CACHE > list corrupted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.next != nil {
|
|
||||||
if e.next.prev != e {
|
|
||||||
panic("LRUCACHE/CACHE > list corrupted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
size += e.size
|
|
||||||
f(key, e.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
if size != c.usedmemory {
|
|
||||||
panic("LRUCACHE/CACHE > size calculations failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.head != nil {
|
|
||||||
if c.tail == nil || c.head.prev != nil {
|
|
||||||
panic("LRUCACHE/CACHE > head/tail corrupted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.tail != nil {
|
|
||||||
if c.head == nil || c.tail.next != nil {
|
|
||||||
panic("LRUCACHE/CACHE > head/tail corrupted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) insertFront(e *cacheEntry) {
|
|
||||||
e.next = c.head
|
|
||||||
c.head = e
|
|
||||||
|
|
||||||
e.prev = nil
|
|
||||||
if e.next != nil {
|
|
||||||
e.next.prev = e
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.tail == nil {
|
|
||||||
c.tail = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) unlinkEntry(e *cacheEntry) {
|
|
||||||
if e == c.head {
|
|
||||||
c.head = e.next
|
|
||||||
}
|
|
||||||
if e.prev != nil {
|
|
||||||
e.prev.next = e.next
|
|
||||||
}
|
|
||||||
if e.next != nil {
|
|
||||||
e.next.prev = e.prev
|
|
||||||
}
|
|
||||||
if e == c.tail {
|
|
||||||
c.tail = e.prev
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) evictEntry(e *cacheEntry) bool {
|
|
||||||
if e.waitingForComputation != 0 {
|
|
||||||
// panic("LRUCACHE/CACHE > cannot evict this entry as other goroutines need the value")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
c.unlinkEntry(e)
|
|
||||||
c.usedmemory -= e.size
|
|
||||||
delete(c.entries, e.key)
|
|
||||||
return true
|
|
||||||
}
|
|
@ -1,223 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package lrucache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBasics(t *testing.T) {
|
|
||||||
cache := New(123)
|
|
||||||
|
|
||||||
value1 := cache.Get("foo", func() (interface{}, time.Duration, int) {
|
|
||||||
return "bar", 1 * time.Second, 0
|
|
||||||
})
|
|
||||||
|
|
||||||
if value1.(string) != "bar" {
|
|
||||||
t.Error("cache returned wrong value")
|
|
||||||
}
|
|
||||||
|
|
||||||
value2 := cache.Get("foo", func() (interface{}, time.Duration, int) {
|
|
||||||
t.Error("value should be cached")
|
|
||||||
return "", 0, 0
|
|
||||||
})
|
|
||||||
|
|
||||||
if value2.(string) != "bar" {
|
|
||||||
t.Error("cache returned wrong value")
|
|
||||||
}
|
|
||||||
|
|
||||||
existed := cache.Del("foo")
|
|
||||||
if !existed {
|
|
||||||
t.Error("delete did not work as expected")
|
|
||||||
}
|
|
||||||
|
|
||||||
value3 := cache.Get("foo", func() (interface{}, time.Duration, int) {
|
|
||||||
return "baz", 1 * time.Second, 0
|
|
||||||
})
|
|
||||||
|
|
||||||
if value3.(string) != "baz" {
|
|
||||||
t.Error("cache returned wrong value")
|
|
||||||
}
|
|
||||||
|
|
||||||
cache.Keys(func(key string, value interface{}) {
|
|
||||||
if key != "foo" || value.(string) != "baz" {
|
|
||||||
t.Error("cache corrupted")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExpiration(t *testing.T) {
|
|
||||||
cache := New(123)
|
|
||||||
|
|
||||||
failIfCalled := func() (interface{}, time.Duration, int) {
|
|
||||||
t.Error("Value should be cached!")
|
|
||||||
return "", 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
val1 := cache.Get("foo", func() (interface{}, time.Duration, int) {
|
|
||||||
return "bar", 5 * time.Millisecond, 0
|
|
||||||
})
|
|
||||||
val2 := cache.Get("bar", func() (interface{}, time.Duration, int) {
|
|
||||||
return "foo", 20 * time.Millisecond, 0
|
|
||||||
})
|
|
||||||
|
|
||||||
val3 := cache.Get("foo", failIfCalled).(string)
|
|
||||||
val4 := cache.Get("bar", failIfCalled).(string)
|
|
||||||
|
|
||||||
if val1 != val3 || val3 != "bar" || val2 != val4 || val4 != "foo" {
|
|
||||||
t.Error("Wrong values returned")
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
|
|
||||||
val5 := cache.Get("foo", func() (interface{}, time.Duration, int) {
|
|
||||||
return "baz", 0, 0
|
|
||||||
})
|
|
||||||
val6 := cache.Get("bar", failIfCalled)
|
|
||||||
|
|
||||||
if val5.(string) != "baz" || val6.(string) != "foo" {
|
|
||||||
t.Error("unexpected values")
|
|
||||||
}
|
|
||||||
|
|
||||||
cache.Keys(func(key string, val interface{}) {
|
|
||||||
if key != "bar" || val.(string) != "foo" {
|
|
||||||
t.Error("wrong value expired")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
time.Sleep(15 * time.Millisecond)
|
|
||||||
cache.Keys(func(key string, val interface{}) {
|
|
||||||
t.Error("cache should be empty now")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEviction(t *testing.T) {
|
|
||||||
c := New(100)
|
|
||||||
failIfCalled := func() (interface{}, time.Duration, int) {
|
|
||||||
t.Error("Value should be cached!")
|
|
||||||
return "", 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
v1 := c.Get("foo", func() (interface{}, time.Duration, int) {
|
|
||||||
return "bar", 1 * time.Second, 1000
|
|
||||||
})
|
|
||||||
|
|
||||||
v2 := c.Get("foo", func() (interface{}, time.Duration, int) {
|
|
||||||
return "baz", 1 * time.Second, 1000
|
|
||||||
})
|
|
||||||
|
|
||||||
if v1.(string) != "bar" || v2.(string) != "baz" {
|
|
||||||
t.Error("wrong values returned")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Keys(func(key string, val interface{}) {
|
|
||||||
t.Error("cache should be empty now")
|
|
||||||
})
|
|
||||||
|
|
||||||
_ = c.Get("A", func() (interface{}, time.Duration, int) {
|
|
||||||
return "a", 1 * time.Second, 50
|
|
||||||
})
|
|
||||||
|
|
||||||
_ = c.Get("B", func() (interface{}, time.Duration, int) {
|
|
||||||
return "b", 1 * time.Second, 50
|
|
||||||
})
|
|
||||||
|
|
||||||
_ = c.Get("A", failIfCalled)
|
|
||||||
_ = c.Get("B", failIfCalled)
|
|
||||||
_ = c.Get("C", func() (interface{}, time.Duration, int) {
|
|
||||||
return "c", 1 * time.Second, 50
|
|
||||||
})
|
|
||||||
|
|
||||||
_ = c.Get("B", failIfCalled)
|
|
||||||
_ = c.Get("C", failIfCalled)
|
|
||||||
|
|
||||||
v4 := c.Get("A", func() (interface{}, time.Duration, int) {
|
|
||||||
return "evicted", 1 * time.Second, 25
|
|
||||||
})
|
|
||||||
|
|
||||||
if v4.(string) != "evicted" {
|
|
||||||
t.Error("value should have been evicted")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Keys(func(key string, val interface{}) {
|
|
||||||
if key != "A" && key != "C" {
|
|
||||||
t.Errorf("'%s' was not expected", key)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// I know that this is a shity test,
|
|
||||||
// time is relative and unreliable.
|
|
||||||
func TestConcurrency(t *testing.T) {
|
|
||||||
c := New(100)
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
numActions := 20000
|
|
||||||
numThreads := 4
|
|
||||||
wg.Add(numThreads)
|
|
||||||
|
|
||||||
var concurrentModifications int32 = 0
|
|
||||||
|
|
||||||
for i := 0; i < numThreads; i++ {
|
|
||||||
go func() {
|
|
||||||
for j := 0; j < numActions; j++ {
|
|
||||||
_ = c.Get("key", func() (interface{}, time.Duration, int) {
|
|
||||||
m := atomic.AddInt32(&concurrentModifications, 1)
|
|
||||||
if m != 1 {
|
|
||||||
t.Error("only one goroutine at a time should calculate a value for the same key")
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(1 * time.Millisecond)
|
|
||||||
atomic.AddInt32(&concurrentModifications, -1)
|
|
||||||
return "value", 3 * time.Millisecond, 1
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
c.Keys(func(key string, val interface{}) {})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPanic(t *testing.T) {
|
|
||||||
c := New(100)
|
|
||||||
|
|
||||||
c.Put("bar", "baz", 3, 1*time.Minute)
|
|
||||||
|
|
||||||
testpanic := func() {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
if r.(string) != "oops" {
|
|
||||||
t.Fatal("unexpected panic value")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_ = c.Get("foo", func() (value interface{}, ttl time.Duration, size int) {
|
|
||||||
panic("oops")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Fatal("should have paniced!")
|
|
||||||
}
|
|
||||||
|
|
||||||
testpanic()
|
|
||||||
|
|
||||||
v := c.Get("bar", func() (value interface{}, ttl time.Duration, size int) {
|
|
||||||
t.Fatal("should not be called!")
|
|
||||||
return nil, 0, 0
|
|
||||||
})
|
|
||||||
|
|
||||||
if v.(string) != "baz" {
|
|
||||||
t.Fatal("unexpected value")
|
|
||||||
}
|
|
||||||
|
|
||||||
testpanic()
|
|
||||||
}
|
|
@ -1,124 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package lrucache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HttpHandler is can be used as HTTP Middleware in order to cache requests,
|
|
||||||
// for example static assets. By default, the request's raw URI is used as key and nothing else.
|
|
||||||
// Results with a status code other than 200 are cached with a TTL of zero seconds,
|
|
||||||
// so basically re-fetched as soon as the current fetch is done and a new request
|
|
||||||
// for that URI is done.
|
|
||||||
type HttpHandler struct {
|
|
||||||
cache *Cache
|
|
||||||
fetcher http.Handler
|
|
||||||
defaultTTL time.Duration
|
|
||||||
|
|
||||||
// Allows overriding the way the cache key is extracted
|
|
||||||
// from the http request. The defailt is to use the RequestURI.
|
|
||||||
CacheKey func(*http.Request) string
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ http.Handler = (*HttpHandler)(nil)
|
|
||||||
|
|
||||||
type cachedResponseWriter struct {
|
|
||||||
w http.ResponseWriter
|
|
||||||
statusCode int
|
|
||||||
buf bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
type cachedResponse struct {
|
|
||||||
headers http.Header
|
|
||||||
statusCode int
|
|
||||||
data []byte
|
|
||||||
fetched time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ http.ResponseWriter = (*cachedResponseWriter)(nil)
|
|
||||||
|
|
||||||
func (crw *cachedResponseWriter) Header() http.Header {
|
|
||||||
return crw.w.Header()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (crw *cachedResponseWriter) Write(bytes []byte) (int, error) {
|
|
||||||
return crw.buf.Write(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (crw *cachedResponseWriter) WriteHeader(statusCode int) {
|
|
||||||
crw.statusCode = statusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a new caching HttpHandler. If no entry in the cache is found or it was too old, `fetcher` is called with
|
|
||||||
// a modified http.ResponseWriter and the response is stored in the cache. If `fetcher` sets the "Expires" header,
|
|
||||||
// the ttl is set appropriately (otherwise, the default ttl passed as argument here is used).
|
|
||||||
// `maxmemory` should be in the unit bytes.
|
|
||||||
func NewHttpHandler(maxmemory int, ttl time.Duration, fetcher http.Handler) *HttpHandler {
|
|
||||||
return &HttpHandler{
|
|
||||||
cache: New(maxmemory),
|
|
||||||
defaultTTL: ttl,
|
|
||||||
fetcher: fetcher,
|
|
||||||
CacheKey: func(r *http.Request) string {
|
|
||||||
return r.RequestURI
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// gorilla/mux style middleware:
|
|
||||||
func NewMiddleware(maxmemory int, ttl time.Duration) func(http.Handler) http.Handler {
|
|
||||||
return func(next http.Handler) http.Handler {
|
|
||||||
return NewHttpHandler(maxmemory, ttl, next)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tries to serve a response to r from cache or calls next and stores the response to the cache for the next time.
|
|
||||||
func (h *HttpHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.Method != http.MethodGet {
|
|
||||||
h.ServeHTTP(rw, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cr := h.cache.Get(h.CacheKey(r), func() (interface{}, time.Duration, int) {
|
|
||||||
crw := &cachedResponseWriter{
|
|
||||||
w: rw,
|
|
||||||
statusCode: 200,
|
|
||||||
buf: bytes.Buffer{},
|
|
||||||
}
|
|
||||||
|
|
||||||
h.fetcher.ServeHTTP(crw, r)
|
|
||||||
|
|
||||||
cr := &cachedResponse{
|
|
||||||
headers: rw.Header().Clone(),
|
|
||||||
statusCode: crw.statusCode,
|
|
||||||
data: crw.buf.Bytes(),
|
|
||||||
fetched: time.Now(),
|
|
||||||
}
|
|
||||||
cr.headers.Set("Content-Length", strconv.Itoa(len(cr.data)))
|
|
||||||
|
|
||||||
ttl := h.defaultTTL
|
|
||||||
if cr.statusCode != http.StatusOK {
|
|
||||||
ttl = 0
|
|
||||||
} else if cr.headers.Get("Expires") != "" {
|
|
||||||
if expires, err := http.ParseTime(cr.headers.Get("Expires")); err == nil {
|
|
||||||
ttl = time.Until(expires)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cr, ttl, len(cr.data)
|
|
||||||
}).(*cachedResponse)
|
|
||||||
|
|
||||||
for key, val := range cr.headers {
|
|
||||||
rw.Header()[key] = val
|
|
||||||
}
|
|
||||||
|
|
||||||
cr.headers.Set("Age", strconv.Itoa(int(time.Since(cr.fetched).Seconds())))
|
|
||||||
|
|
||||||
rw.WriteHeader(cr.statusCode)
|
|
||||||
rw.Write(cr.data)
|
|
||||||
}
|
|
@ -1,75 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package lrucache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHandlerBasics(t *testing.T) {
|
|
||||||
r := httptest.NewRequest(http.MethodGet, "/test1", nil)
|
|
||||||
rw := httptest.NewRecorder()
|
|
||||||
shouldBeCalled := true
|
|
||||||
|
|
||||||
handler := NewHttpHandler(1000, time.Second, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
rw.Write([]byte("Hello World!"))
|
|
||||||
|
|
||||||
if !shouldBeCalled {
|
|
||||||
t.Fatal("fetcher expected to be called")
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
|
|
||||||
handler.ServeHTTP(rw, r)
|
|
||||||
|
|
||||||
if rw.Code != 200 {
|
|
||||||
t.Fatal("unexpected status code")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(rw.Body.Bytes(), []byte("Hello World!")) {
|
|
||||||
t.Fatal("unexpected body")
|
|
||||||
}
|
|
||||||
|
|
||||||
rw = httptest.NewRecorder()
|
|
||||||
shouldBeCalled = false
|
|
||||||
handler.ServeHTTP(rw, r)
|
|
||||||
|
|
||||||
if rw.Code != 200 {
|
|
||||||
t.Fatal("unexpected status code")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(rw.Body.Bytes(), []byte("Hello World!")) {
|
|
||||||
t.Fatal("unexpected body")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// func TestHandlerExpiration(t *testing.T) {
|
|
||||||
// r := httptest.NewRequest(http.MethodGet, "/test1", nil)
|
|
||||||
// rw := httptest.NewRecorder()
|
|
||||||
// i := 1
|
|
||||||
// now := time.Now()
|
|
||||||
|
|
||||||
// handler := NewHttpHandler(1000, 1*time.Second, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
// rw.Header().Set("Expires", now.Add(10*time.Millisecond).Format(http.TimeFormat))
|
|
||||||
// rw.Write([]byte(strconv.Itoa(i)))
|
|
||||||
// }))
|
|
||||||
|
|
||||||
// handler.ServeHTTP(rw, r)
|
|
||||||
// if !(rw.Body.String() == strconv.Itoa(1)) {
|
|
||||||
// t.Fatal("unexpected body")
|
|
||||||
// }
|
|
||||||
|
|
||||||
// i += 1
|
|
||||||
|
|
||||||
// time.Sleep(11 * time.Millisecond)
|
|
||||||
// rw = httptest.NewRecorder()
|
|
||||||
// handler.ServeHTTP(rw, r)
|
|
||||||
// if !(rw.Body.String() == strconv.Itoa(1)) {
|
|
||||||
// t.Fatal("unexpected body")
|
|
||||||
// }
|
|
||||||
// }
|
|
@ -1,123 +0,0 @@
|
|||||||
package resampler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int64) ([]schema.Float, int64, error) {
|
|
||||||
if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency {
|
|
||||||
return data, old_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if new_frequency%old_frequency != 0 {
|
|
||||||
return nil, 0, errors.New("new sampling frequency should be multiple of the old frequency")
|
|
||||||
}
|
|
||||||
|
|
||||||
var step int = int(new_frequency / old_frequency)
|
|
||||||
var new_data_length = len(data) / step
|
|
||||||
|
|
||||||
if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) {
|
|
||||||
return data, old_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
new_data := make([]schema.Float, new_data_length)
|
|
||||||
|
|
||||||
for i := 0; i < new_data_length; i++ {
|
|
||||||
new_data[i] = data[i*step]
|
|
||||||
}
|
|
||||||
|
|
||||||
return new_data, new_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspired by one of the algorithms from https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf
|
|
||||||
// Adapted from https://github.com/haoel/downsampling/blob/master/core/lttb.go
|
|
||||||
func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_frequency int) ([]schema.Float, int, error) {
|
|
||||||
|
|
||||||
if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency {
|
|
||||||
return data, old_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if new_frequency%old_frequency != 0 {
|
|
||||||
return nil, 0, errors.New(fmt.Sprintf("new sampling frequency : %d should be multiple of the old frequency : %d", new_frequency, old_frequency))
|
|
||||||
}
|
|
||||||
|
|
||||||
var step int = int(new_frequency / old_frequency)
|
|
||||||
var new_data_length = len(data) / step
|
|
||||||
|
|
||||||
if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) {
|
|
||||||
return data, old_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
new_data := make([]schema.Float, 0, new_data_length)
|
|
||||||
|
|
||||||
// Bucket size. Leave room for start and end data points
|
|
||||||
bucketSize := float64(len(data)-2) / float64(new_data_length-2)
|
|
||||||
|
|
||||||
new_data = append(new_data, data[0]) // Always add the first point
|
|
||||||
|
|
||||||
// We have 3 pointers represent for
|
|
||||||
// > bucketLow - the current bucket's beginning location
|
|
||||||
// > bucketMiddle - the current bucket's ending location,
|
|
||||||
// also the beginning location of next bucket
|
|
||||||
// > bucketHight - the next bucket's ending location.
|
|
||||||
bucketLow := 1
|
|
||||||
bucketMiddle := int(math.Floor(bucketSize)) + 1
|
|
||||||
|
|
||||||
var prevMaxAreaPoint int
|
|
||||||
|
|
||||||
for i := 0; i < new_data_length-2; i++ {
|
|
||||||
|
|
||||||
bucketHigh := int(math.Floor(float64(i+2)*bucketSize)) + 1
|
|
||||||
if bucketHigh >= len(data)-1 {
|
|
||||||
bucketHigh = len(data) - 2
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate point average for next bucket (containing c)
|
|
||||||
avgPointX, avgPointY := calculateAverageDataPoint(data[bucketMiddle:bucketHigh+1], int64(bucketMiddle))
|
|
||||||
|
|
||||||
// Get the range for current bucket
|
|
||||||
currBucketStart := bucketLow
|
|
||||||
currBucketEnd := bucketMiddle
|
|
||||||
|
|
||||||
// Point a
|
|
||||||
pointX := prevMaxAreaPoint
|
|
||||||
pointY := data[prevMaxAreaPoint]
|
|
||||||
|
|
||||||
maxArea := -1.0
|
|
||||||
|
|
||||||
var maxAreaPoint int
|
|
||||||
flag_ := 0
|
|
||||||
for ; currBucketStart < currBucketEnd; currBucketStart++ {
|
|
||||||
|
|
||||||
area := calculateTriangleArea(schema.Float(pointX), pointY, avgPointX, avgPointY, schema.Float(currBucketStart), data[currBucketStart])
|
|
||||||
if area > maxArea {
|
|
||||||
maxArea = area
|
|
||||||
maxAreaPoint = currBucketStart
|
|
||||||
}
|
|
||||||
if math.IsNaN(float64(avgPointY)) {
|
|
||||||
flag_ = 1
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if flag_ == 1 {
|
|
||||||
new_data = append(new_data, schema.NaN) // Pick this point from the bucket
|
|
||||||
|
|
||||||
} else {
|
|
||||||
new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket
|
|
||||||
}
|
|
||||||
prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint
|
|
||||||
|
|
||||||
//move to the next window
|
|
||||||
bucketLow = bucketMiddle
|
|
||||||
bucketMiddle = bucketHigh
|
|
||||||
}
|
|
||||||
|
|
||||||
new_data = append(new_data, data[len(data)-1]) // Always add last
|
|
||||||
|
|
||||||
return new_data, new_frequency, nil
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
package resampler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
func calculateTriangleArea(paX, paY, pbX, pbY, pcX, pcY schema.Float) float64 {
|
|
||||||
area := ((paX-pcX)*(pbY-paY) - (paX-pbX)*(pcY-paY)) * 0.5
|
|
||||||
return math.Abs(float64(area))
|
|
||||||
}
|
|
||||||
|
|
||||||
func calculateAverageDataPoint(points []schema.Float, xStart int64) (avgX schema.Float, avgY schema.Float) {
|
|
||||||
flag := 0
|
|
||||||
for _, point := range points {
|
|
||||||
avgX += schema.Float(xStart)
|
|
||||||
avgY += point
|
|
||||||
xStart++
|
|
||||||
if math.IsNaN(float64(point)) {
|
|
||||||
flag = 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
l := schema.Float(len(points))
|
|
||||||
|
|
||||||
avgX /= l
|
|
||||||
avgY /= l
|
|
||||||
|
|
||||||
if flag == 1 {
|
|
||||||
return avgX, schema.NaN
|
|
||||||
} else {
|
|
||||||
return avgX, avgY
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,5 +1,5 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package runtimeEnv
|
package runtimeEnv
|
||||||
@ -12,7 +12,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Changes the processes user and group to that
|
// Changes the processes user and group to that
|
||||||
@ -23,13 +23,13 @@ func DropPrivileges(username string, group string) error {
|
|||||||
if group != "" {
|
if group != "" {
|
||||||
g, err := user.LookupGroup(group)
|
g, err := user.LookupGroup(group)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while looking up group")
|
cclog.Warn("Error while looking up group")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
gid, _ := strconv.Atoi(g.Gid)
|
gid, _ := strconv.Atoi(g.Gid)
|
||||||
if err := syscall.Setgid(gid); err != nil {
|
if err := syscall.Setgid(gid); err != nil {
|
||||||
log.Warn("Error while setting gid")
|
cclog.Warn("Error while setting gid")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -37,13 +37,13 @@ func DropPrivileges(username string, group string) error {
|
|||||||
if username != "" {
|
if username != "" {
|
||||||
u, err := user.Lookup(username)
|
u, err := user.Lookup(username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Error while looking up user")
|
cclog.Warn("Error while looking up user")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
uid, _ := strconv.Atoi(u.Uid)
|
uid, _ := strconv.Atoi(u.Uid)
|
||||||
if err := syscall.Setuid(uid); err != nil {
|
if err := syscall.Setuid(uid); err != nil {
|
||||||
log.Warn("Error while setting uid")
|
cclog.Warn("Error while setting uid")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user