From 639e1b9c6dd3a5b84a579e59183d7a6c9dec495a Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Mon, 30 Jun 2025 12:06:35 +0200 Subject: [PATCH] Port to cc-lib. Extend legal header. --- cmd/cc-backend/cli.go | 2 +- cmd/cc-backend/init.go | 16 +- cmd/cc-backend/main.go | 73 ++- cmd/cc-backend/server.go | 24 +- go.mod | 30 +- go.sum | 61 ++- gqlgen.yml | 72 +-- internal/api/api_test.go | 8 +- internal/api/cluster.go | 4 +- internal/api/job.go | 28 +- internal/api/node.go | 4 +- internal/api/rest.go | 16 +- internal/api/user.go | 4 +- internal/archiver/archiveWorker.go | 22 +- internal/archiver/archiver.go | 8 +- internal/auth/auth.go | 88 ++-- internal/auth/jwt.go | 18 +- internal/auth/jwtCookieSession.go | 34 +- internal/auth/jwtSession.go | 18 +- internal/auth/ldap.go | 34 +- internal/auth/local.go | 16 +- internal/auth/oidc.go | 14 +- internal/config/config.go | 14 +- internal/config/config_test.go | 2 +- internal/config/default_metrics.go | 4 + internal/graph/generated/generated.go | 268 +++++----- internal/graph/model/models.go | 2 +- internal/graph/model/models_gen.go | 3 +- internal/graph/resolver.go | 4 +- internal/graph/schema.resolvers.go | 96 ++-- internal/graph/util.go | 19 +- internal/importer/handleImport.go | 38 +- internal/importer/importer_test.go | 6 +- internal/importer/initDB.go | 38 +- internal/importer/normalize.go | 4 +- internal/importer/normalize_test.go | 4 +- internal/metricDataDispatcher/dataLoader.go | 39 +- internal/metricdata/cc-metric-store.go | 44 +- internal/metricdata/metricdata.go | 10 +- internal/metricdata/prometheus.go | 56 +- internal/metricdata/utils.go | 21 +- internal/repository/dbConnection.go | 14 +- internal/repository/hooks.go | 8 +- internal/repository/job.go | 108 ++-- internal/repository/jobCreate.go | 18 +- internal/repository/jobFind.go | 28 +- internal/repository/jobHooks.go | 4 +- internal/repository/jobQuery.go | 16 +- internal/repository/job_test.go | 4 +- internal/repository/migration.go | 20 +- internal/repository/node.go | 54 +- internal/repository/repository_test.go | 10 +- internal/repository/stats.go | 84 +-- internal/repository/stats_test.go | 3 +- internal/repository/tags.go | 68 +-- internal/repository/transaction.go | 21 +- internal/repository/user.go | 58 +-- internal/repository/userConfig.go | 22 +- internal/repository/userConfig_test.go | 8 +- internal/routerConfig/routes.go | 20 +- internal/tagger/classifyJob.go | 68 +-- internal/tagger/detectApp.go | 20 +- internal/tagger/detectApp_test.go | 8 +- internal/tagger/tagger.go | 14 +- internal/tagger/tagger_test.go | 4 +- internal/taskManager/commitJobService.go | 10 +- internal/taskManager/compressionService.go | 12 +- internal/taskManager/ldapSyncService.go | 14 +- internal/taskManager/retentionService.go | 24 +- internal/taskManager/stopJobsExceedTime.go | 8 +- internal/taskManager/taskManager.go | 14 +- internal/taskManager/updateDurationService.go | 10 +- .../taskManager/updateFootprintService.go | 30 +- internal/util/array.go | 14 - internal/util/compress.go | 77 --- internal/util/copy.go | 107 ---- internal/util/diskUsage.go | 34 -- internal/util/fstat.go | 36 -- internal/util/fswatcher.go | 75 --- internal/util/statistics.go | 60 --- internal/util/util_test.go | 75 --- pkg/archive/archive.go | 26 +- pkg/archive/archive_test.go | 6 +- pkg/archive/clusterConfig.go | 8 +- pkg/archive/clusterConfig_test.go | 2 +- pkg/archive/fsBackend.go | 110 ++-- pkg/archive/fsBackend_test.go | 6 +- pkg/archive/json.go | 18 +- pkg/archive/nodelist.go | 8 +- pkg/archive/nodelist_test.go | 2 +- pkg/archive/s3Backend.go | 2 +- pkg/log/log.go | 220 -------- pkg/lrucache/README.md | 124 ----- pkg/lrucache/cache.go | 292 ----------- pkg/lrucache/cache_test.go | 223 -------- pkg/lrucache/handler.go | 124 ----- pkg/lrucache/handler_test.go | 75 --- pkg/resampler/resampler.go | 123 ----- pkg/resampler/util.go | 35 -- pkg/runtimeEnv/setup.go | 12 +- pkg/schema/cluster.go | 249 --------- pkg/schema/config.go | 180 ------- pkg/schema/float.go | 131 ----- pkg/schema/job.go | 153 ------ pkg/schema/metrics.go | 368 ------------- pkg/schema/node.go | 41 -- pkg/schema/schemas/cluster.schema.json | 339 ------------ pkg/schema/schemas/config.schema.json | 446 ---------------- pkg/schema/schemas/job-data.schema.json | 490 ------------------ pkg/schema/schemas/job-meta.schema.json | 351 ------------- .../schemas/job-metric-data.schema.json | 216 -------- .../schemas/job-metric-statistics.schema.json | 34 -- pkg/schema/schemas/unit.schema.json | 41 -- pkg/schema/user.go | 200 ------- pkg/schema/user_test.go | 129 ----- pkg/schema/validate.go | 68 --- pkg/schema/validate_test.go | 105 ---- tools/archive-manager/main.go | 12 +- tools/gen-keypair/main.go | 2 +- web/web.go | 31 +- 120 files changed, 1140 insertions(+), 6410 deletions(-) delete mode 100644 internal/util/array.go delete mode 100644 internal/util/compress.go delete mode 100644 internal/util/copy.go delete mode 100644 internal/util/diskUsage.go delete mode 100644 internal/util/fstat.go delete mode 100644 internal/util/fswatcher.go delete mode 100644 internal/util/statistics.go delete mode 100644 internal/util/util_test.go delete mode 100644 pkg/log/log.go delete mode 100644 pkg/lrucache/README.md delete mode 100644 pkg/lrucache/cache.go delete mode 100644 pkg/lrucache/cache_test.go delete mode 100644 pkg/lrucache/handler.go delete mode 100644 pkg/lrucache/handler_test.go delete mode 100644 pkg/resampler/resampler.go delete mode 100644 pkg/resampler/util.go delete mode 100644 pkg/schema/cluster.go delete mode 100644 pkg/schema/config.go delete mode 100644 pkg/schema/float.go delete mode 100644 pkg/schema/job.go delete mode 100644 pkg/schema/metrics.go delete mode 100644 pkg/schema/node.go delete mode 100644 pkg/schema/schemas/cluster.schema.json delete mode 100644 pkg/schema/schemas/config.schema.json delete mode 100644 pkg/schema/schemas/job-data.schema.json delete mode 100644 pkg/schema/schemas/job-meta.schema.json delete mode 100644 pkg/schema/schemas/job-metric-data.schema.json delete mode 100644 pkg/schema/schemas/job-metric-statistics.schema.json delete mode 100644 pkg/schema/schemas/unit.schema.json delete mode 100644 pkg/schema/user.go delete mode 100644 pkg/schema/user_test.go delete mode 100644 pkg/schema/validate.go delete mode 100644 pkg/schema/validate_test.go diff --git a/cmd/cc-backend/cli.go b/cmd/cc-backend/cli.go index 8b826bb..235a12c 100644 --- a/cmd/cc-backend/cli.go +++ b/cmd/cc-backend/cli.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package main diff --git a/cmd/cc-backend/init.go b/cmd/cc-backend/init.go index 0a5b836..b46100a 100644 --- a/cmd/cc-backend/init.go +++ b/cmd/cc-backend/init.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package main @@ -8,8 +8,8 @@ import ( "os" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/util" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/util" ) const envString = ` @@ -73,23 +73,23 @@ const configString = ` func initEnv() { if util.CheckFileExists("var") { - log.Exit("Directory ./var already exists. Cautiously exiting application initialization.") + cclog.Exit("Directory ./var already exists. Cautiously exiting application initialization.") } if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil { - log.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error()) + cclog.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error()) } if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil { - log.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error()) + cclog.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error()) } if err := os.Mkdir("var", 0o777); err != nil { - log.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error()) + cclog.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error()) } err := repository.MigrateDB("sqlite3", "./var/job.db") if err != nil { - log.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error()) + cclog.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error()) } } diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index ab07d28..871c8dd 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package main @@ -21,11 +21,11 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/tagger" "github.com/ClusterCockpit/cc-backend/internal/taskManager" - "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" "github.com/google/gops/agent" "github.com/joho/godotenv" @@ -61,13 +61,12 @@ func main() { os.Exit(0) } - // Apply config flags for pkg/log - log.Init(flagLogLevel, flagLogDateTime) + cclog.Init(flagLogLevel, flagLogDateTime) // If init flag set, run tasks here before any file dependencies cause errors if flagInit { initEnv() - log.Exit("Successfully setup environment!\n" + + cclog.Exit("Successfully setup environment!\n" + "Please review config.json and .env and adjust it to your needs.\n" + "Add your job-archive at ./var/job-archive.") } @@ -75,13 +74,13 @@ func main() { // See https://github.com/google/gops (Runtime overhead is almost zero) if flagGops { if err := agent.Listen(agent.Options{}); err != nil { - log.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error()) + cclog.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error()) } } err := godotenv.Load() if err != nil { - log.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error()) + cclog.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error()) } // Initialize sub-modules and handle command line flags. @@ -99,25 +98,25 @@ func main() { if flagMigrateDB { err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB) if err != nil { - log.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error()) + cclog.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error()) } - log.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version) + cclog.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version) } if flagRevertDB { err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB) if err != nil { - log.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error()) + cclog.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error()) } - log.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1)) + cclog.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1)) } if flagForceDB { err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB) if err != nil { - log.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error()) + cclog.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error()) } - log.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version) + cclog.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version) } repository.Connect(config.Keys.DBDriver, config.Keys.DB) @@ -129,7 +128,7 @@ func main() { if flagNewUser != "" { parts := strings.SplitN(flagNewUser, ":", 3) if len(parts) != 3 || len(parts[0]) == 0 { - log.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+ + cclog.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+ "Want: :[admin,support,manager,api,user]:\n"+ "Have: %s\n", flagNewUser) } @@ -138,18 +137,18 @@ func main() { if err := ur.AddUser(&schema.User{ Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","), }); err != nil { - log.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error()) + cclog.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error()) } else { - log.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1]) + cclog.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1]) } } if flagDelUser != "" { ur := repository.GetUserRepository() if err := ur.DelUser(flagDelUser); err != nil { - log.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error()) + cclog.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error()) } else { - log.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser) + cclog.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser) } } @@ -157,59 +156,59 @@ func main() { if flagSyncLDAP { if authHandle.LdapAuth == nil { - log.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.") + cclog.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.") } if err := authHandle.LdapAuth.Sync(); err != nil { - log.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error()) + cclog.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error()) } - log.Print("Sync LDAP: LDAP synchronization successfull.") + cclog.Print("Sync LDAP: LDAP synchronization successfull.") } if flagGenJWT != "" { ur := repository.GetUserRepository() user, err := ur.GetUser(flagGenJWT) if err != nil { - log.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error()) + cclog.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error()) } if !user.HasRole(schema.RoleApi) { - log.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username) + cclog.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username) } jwt, err := authHandle.JwtAuth.ProvideJWT(user) if err != nil { - log.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error()) + cclog.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error()) } - log.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt) + cclog.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt) } } else if flagNewUser != "" || flagDelUser != "" { - log.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.") + cclog.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.") } if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil { - log.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error()) + cclog.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error()) } if err := metricdata.Init(); err != nil { - log.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error()) + cclog.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error()) } if flagReinitDB { if err := importer.InitDB(); err != nil { - log.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error()) + cclog.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error()) } else { - log.Print("Init DB: Sucessfully re-initialized repository DB.") + cclog.Print("Init DB: Sucessfully re-initialized repository DB.") } } if flagImportJob != "" { if err := importer.HandleImportFlag(flagImportJob); err != nil { - log.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error()) + cclog.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error()) } else { - log.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob) + cclog.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob) } } @@ -219,12 +218,12 @@ func main() { if flagApplyTags { if err := tagger.RunTaggers(); err != nil { - log.Abortf("Running job taggers.\nError: %s\n", err.Error()) + cclog.Abortf("Running job taggers.\nError: %s\n", err.Error()) } } if !flagServer { - log.Exit("No errors, server flag not set. Exiting cc-backend.") + cclog.Exit("No errors, server flag not set. Exiting cc-backend.") } archiver.Start(repository.GetJobRepository()) @@ -260,5 +259,5 @@ func main() { } runtimeEnv.SystemdNotifiy(true, "running") wg.Wait() - log.Print("Graceful shutdown completed!") + cclog.Print("Graceful shutdown completed!") } diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index cbd85b7..c01008a 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package main @@ -27,9 +27,9 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph/generated" "github.com/ClusterCockpit/cc-backend/internal/routerConfig" - "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv" "github.com/ClusterCockpit/cc-backend/web" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/gorilla/handlers" "github.com/gorilla/mux" httpSwagger "github.com/swaggo/http-swagger" @@ -101,7 +101,7 @@ func serverInit() { router.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) { rw.Header().Add("Content-Type", "text/html; charset=utf-8") - log.Debugf("##%v##", info) + cclog.Debugf("##%v##", info) web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo, Infos: info}) }).Methods(http.MethodGet) router.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) { @@ -237,7 +237,7 @@ func serverInit() { if config.Keys.EmbedStaticFiles { if i, err := os.Stat("./var/img"); err == nil { if i.IsDir() { - log.Info("Use local directory for static images") + cclog.Info("Use local directory for static images") router.PathPrefix("/img/").Handler(http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img")))) } } @@ -258,12 +258,12 @@ func serverInit() { func serverStart() { handler := handlers.CustomLoggingHandler(io.Discard, router, func(_ io.Writer, params handlers.LogFormatterParams) { if strings.HasPrefix(params.Request.RequestURI, "/api/") { - log.Debugf("%s %s (%d, %.02fkb, %dms)", + cclog.Debugf("%s %s (%d, %.02fkb, %dms)", params.Request.Method, params.URL.RequestURI(), params.StatusCode, float32(params.Size)/1024, time.Since(params.TimeStamp).Milliseconds()) } else { - log.Debugf("%s %s (%d, %.02fkb, %dms)", + cclog.Debugf("%s %s (%d, %.02fkb, %dms)", params.Request.Method, params.URL.RequestURI(), params.StatusCode, float32(params.Size)/1024, time.Since(params.TimeStamp).Milliseconds()) @@ -280,7 +280,7 @@ func serverStart() { // Start http or https server listener, err := net.Listen("tcp", config.Keys.Addr) if err != nil { - log.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error()) + cclog.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error()) } if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" { @@ -293,7 +293,7 @@ func serverStart() { cert, err := tls.LoadX509KeyPair( config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile) if err != nil { - log.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error()) + cclog.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error()) } listener = tls.NewListener(listener, &tls.Config{ Certificates: []tls.Certificate{cert}, @@ -304,20 +304,20 @@ func serverStart() { MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, }) - log.Printf("HTTPS server listening at %s...\n", config.Keys.Addr) + cclog.Printf("HTTPS server listening at %s...\n", config.Keys.Addr) } else { - log.Printf("HTTP server listening at %s...\n", config.Keys.Addr) + cclog.Printf("HTTP server listening at %s...\n", config.Keys.Addr) } // // Because this program will want to bind to a privileged port (like 80), the listener must // be established first, then the user can be changed, and after that, // the actual http server can be started. if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { - log.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error()) + cclog.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error()) } if err = server.Serve(listener); err != nil && err != http.ErrServerClosed { - log.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error()) + cclog.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error()) } } diff --git a/go.mod b/go.mod index 4de1c70..4b5171c 100644 --- a/go.mod +++ b/go.mod @@ -6,11 +6,10 @@ toolchain go1.24.1 require ( github.com/99designs/gqlgen v0.17.66 - github.com/ClusterCockpit/cc-units v0.4.0 + github.com/ClusterCockpit/cc-lib v0.3.0 github.com/Masterminds/squirrel v1.5.4 github.com/coreos/go-oidc/v3 v3.12.0 github.com/expr-lang/expr v1.17.3 - github.com/fsnotify/fsnotify v1.9.0 github.com/go-co-op/gocron/v2 v2.16.0 github.com/go-ldap/ldap/v3 v3.4.10 github.com/go-sql-driver/mysql v1.9.0 @@ -23,15 +22,14 @@ require ( github.com/jmoiron/sqlx v1.4.0 github.com/joho/godotenv v1.5.1 github.com/mattn/go-sqlite3 v1.14.24 - github.com/prometheus/client_golang v1.21.0 - github.com/prometheus/common v0.62.0 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/common v0.63.0 github.com/qustavo/sqlhooks/v2 v2.1.0 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/swaggo/http-swagger v1.3.4 github.com/swaggo/swag v1.16.4 github.com/vektah/gqlparser/v2 v2.5.22 - golang.org/x/crypto v0.36.0 - golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa + golang.org/x/crypto v0.37.0 golang.org/x/oauth2 v0.27.0 golang.org/x/time v0.5.0 ) @@ -45,6 +43,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -69,8 +68,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sosodev/duration v1.3.1 // indirect @@ -78,13 +77,14 @@ require ( github.com/urfave/cli/v2 v2.27.5 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/tools v0.30.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/tools v0.32.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index 189d58d..f3d25ad 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0= -github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw= +github.com/ClusterCockpit/cc-lib v0.3.0 h1:HEWOgnzRM01U10ZFfpiUWMzkLHg5nPdXZqdsiI2q4x0= +github.com/ClusterCockpit/cc-lib v0.3.0/go.mod h1:7CuXVNIJdynMZf6B9v4m54VCbbFg3ZD0tvLw2bVxN0A= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= @@ -88,8 +88,9 @@ github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8= github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -143,8 +144,6 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -186,14 +185,14 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA= -github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAiCZi0= github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -251,17 +250,17 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= -golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4= -golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -273,8 +272,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -284,8 +283,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -297,8 +296,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -317,8 +316,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -327,11 +326,11 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/gqlgen.yml b/gqlgen.yml index 307a074..3118ec9 100644 --- a/gqlgen.yml +++ b/gqlgen.yml @@ -51,70 +51,52 @@ models: - github.com/99designs/gqlgen/graphql.Int64 - github.com/99designs/gqlgen/graphql.Int32 Job: - model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Job" + model: "github.com/ClusterCockpit/cc-lib/schema.Job" fields: tags: resolver: true metaData: resolver: true Cluster: - model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Cluster" + model: "github.com/ClusterCockpit/cc-lib/schema.Cluster" fields: partitions: resolver: true Node: - model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Node" + model: "github.com/ClusterCockpit/cc-lib/schema.Node" fields: metaData: resolver: true - NullableFloat: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Float" } - MetricScope: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricScope" } - MetricValue: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricValue" } + NullableFloat: { model: "github.com/ClusterCockpit/cc-lib/schema.Float" } + MetricScope: { model: "github.com/ClusterCockpit/cc-lib/schema.MetricScope" } + MetricValue: { model: "github.com/ClusterCockpit/cc-lib/schema.MetricValue" } JobStatistics: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobStatistics" } + { model: "github.com/ClusterCockpit/cc-lib/schema.JobStatistics" } GlobalMetricListItem: - { - model: "github.com/ClusterCockpit/cc-backend/pkg/schema.GlobalMetricListItem", - } + { model: "github.com/ClusterCockpit/cc-lib/schema.GlobalMetricListItem" } ClusterSupport: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.ClusterSupport" } - Tag: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Tag" } - Resource: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" } - JobState: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" } + { model: "github.com/ClusterCockpit/cc-lib/schema.ClusterSupport" } + Tag: { model: "github.com/ClusterCockpit/cc-lib/schema.Tag" } + Resource: { model: "github.com/ClusterCockpit/cc-lib/schema.Resource" } + JobState: { model: "github.com/ClusterCockpit/cc-lib/schema.JobState" } MonitoringState: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.NodeState" } + { model: "github.com/ClusterCockpit/cc-lib/schema.NodeState" } HealthState: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MonitoringState" } - TimeRange: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.TimeRange" } - IntRange: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.IntRange" } - JobMetric: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" } - Series: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Series" } + { model: "github.com/ClusterCockpit/cc-lib/schema.MonitoringState" } + TimeRange: { model: "github.com/ClusterCockpit/cc-lib/schema.TimeRange" } + IntRange: { model: "github.com/ClusterCockpit/cc-lib/schema.IntRange" } + JobMetric: { model: "github.com/ClusterCockpit/cc-lib/schema.JobMetric" } + Series: { model: "github.com/ClusterCockpit/cc-lib/schema.Series" } MetricStatistics: - { - model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics", - } + { model: "github.com/ClusterCockpit/cc-lib/schema.MetricStatistics" } MetricConfig: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricConfig" } + { model: "github.com/ClusterCockpit/cc-lib/schema.MetricConfig" } SubClusterConfig: - { - model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubClusterConfig", - } - Accelerator: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Accelerator" } - Topology: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Topology" } + { model: "github.com/ClusterCockpit/cc-lib/schema.SubClusterConfig" } + Accelerator: { model: "github.com/ClusterCockpit/cc-lib/schema.Accelerator" } + Topology: { model: "github.com/ClusterCockpit/cc-lib/schema.Topology" } FilterRanges: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.FilterRanges" } - SubCluster: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubCluster" } - StatsSeries: - { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" } - Unit: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Unit" } + { model: "github.com/ClusterCockpit/cc-lib/schema.FilterRanges" } + SubCluster: { model: "github.com/ClusterCockpit/cc-lib/schema.SubCluster" } + StatsSeries: { model: "github.com/ClusterCockpit/cc-lib/schema.StatsSeries" } + Unit: { model: "github.com/ClusterCockpit/cc-lib/schema.Unit" } diff --git a/internal/api/api_test.go b/internal/api/api_test.go index a938cb6..9b792c2 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package api_test @@ -27,8 +27,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/gorilla/mux" _ "github.com/mattn/go-sqlite3" @@ -116,7 +116,7 @@ func setup(t *testing.T) *api.RestApi { ] }` - log.Init("info", true) + cclog.Init("info", true) tmpdir := t.TempDir() jobarchive := filepath.Join(tmpdir, "job-archive") if err := os.Mkdir(jobarchive, 0777); err != nil { diff --git a/internal/api/cluster.go b/internal/api/cluster.go index 0529480..0a11d9d 100644 --- a/internal/api/cluster.go +++ b/internal/api/cluster.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package api @@ -12,7 +12,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" ) // GetClustersApiResponse model diff --git a/internal/api/job.go b/internal/api/job.go index cce47c5..4c8ca76 100644 --- a/internal/api/job.go +++ b/internal/api/job.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package api @@ -23,8 +23,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/gorilla/mux" ) @@ -198,7 +198,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { results = append(results, job) } - log.Debugf("/api/jobs: %d jobs returned", len(results)) + cclog.Debugf("/api/jobs: %d jobs returned", len(results)) rw.Header().Add("Content-Type", "application/json") bw := bufio.NewWriter(rw) defer bw.Flush() @@ -286,12 +286,12 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) if r.URL.Query().Get("all-metrics") == "true" { data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution) if err != nil { - log.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster) + cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster) return } } - log.Debugf("/api/job/%s: get job %d", id, job.JobID) + cclog.Debugf("/api/job/%s: get job %d", id, job.JobID) rw.Header().Add("Content-Type", "application/json") bw := bufio.NewWriter(rw) defer bw.Flush() @@ -382,7 +382,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution) if err != nil { - log.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster) + cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster) return } @@ -397,7 +397,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { } } - log.Debugf("/api/job/%s: get job %d", id, job.JobID) + cclog.Debugf("/api/job/%s: get job %d", id, job.JobID) rw.Header().Add("Content-Type", "application/json") bw := bufio.NewWriter(rw) defer bw.Flush() @@ -565,7 +565,7 @@ func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) { for _, rtag := range req { // Only Global and Admin Tags if rtag.Scope != "global" && rtag.Scope != "admin" { - log.Warnf("Cannot delete private tag for job %d: Skip", job.JobID) + cclog.Warnf("Cannot delete private tag for job %d: Skip", job.JobID) continue } @@ -611,7 +611,7 @@ func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) { for _, rtag := range req { // Only Global and Admin Tags if rtag.Scope != "global" && rtag.Scope != "admin" { - log.Warn("Cannot delete private tag: Skip") + cclog.Warn("Cannot delete private tag: Skip") continue } @@ -654,7 +654,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { return } - log.Printf("REST: %s\n", req.GoString()) + cclog.Printf("REST: %s\n", req.GoString()) req.State = schema.JobStateRunning if err := importer.SanityChecks(&req); err != nil { @@ -697,7 +697,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { } } - log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime) + cclog.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime) rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusCreated) json.NewEncoder(rw).Encode(DefaultApiResponse{ @@ -737,7 +737,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { return } - // log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req) + // cclog.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) if err != nil { job, err = api.JobRepository.FindCached(req.JobId, req.Cluster, req.StartTime) @@ -920,7 +920,7 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo } api.JobRepository.Mutex.Unlock() - log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%d, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State) + cclog.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%d, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State) // Send a response (with status OK). This means that errors that happen from here on forward // can *NOT* be communicated to the client. If reading from a MetricDataRepository or diff --git a/internal/api/node.go b/internal/api/node.go index 61d7943..385b2da 100644 --- a/internal/api/node.go +++ b/internal/api/node.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package api @@ -10,7 +10,7 @@ import ( "strings" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" ) type Node struct { diff --git a/internal/api/rest.go b/internal/api/rest.go index e2fc119..e4411a4 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package api @@ -16,9 +16,9 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/util" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" "github.com/gorilla/mux" ) @@ -130,7 +130,7 @@ type DefaultApiResponse struct { } func handleError(err error, statusCode int, rw http.ResponseWriter) { - log.Warnf("REST ERROR : %s", err.Error()) + cclog.Warnf("REST ERROR : %s", err.Error()) rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(statusCode) json.NewEncoder(rw).Encode(ErrorResponse{ @@ -161,7 +161,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { if !noticeExists { ntxt, err := os.Create("./var/notice.txt") if err != nil { - log.Errorf("Creating ./var/notice.txt failed: %s", err.Error()) + cclog.Errorf("Creating ./var/notice.txt failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusUnprocessableEntity) return } @@ -170,7 +170,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { if newContent != "" { if err := os.WriteFile("./var/notice.txt", []byte(newContent), 0o666); err != nil { - log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error()) + cclog.Errorf("Writing to ./var/notice.txt failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusUnprocessableEntity) return } else { @@ -178,7 +178,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { } } else { if err := os.WriteFile("./var/notice.txt", []byte(""), 0o666); err != nil { - log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error()) + cclog.Errorf("Writing to ./var/notice.txt failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusUnprocessableEntity) return } else { diff --git a/internal/api/user.go b/internal/api/user.go index 3ba9c87..7e17e36 100644 --- a/internal/api/user.go +++ b/internal/api/user.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package api @@ -10,7 +10,7 @@ import ( "net/http" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/gorilla/mux" ) diff --git a/internal/archiver/archiveWorker.go b/internal/archiver/archiveWorker.go index e9f3dc9..9e834b2 100644 --- a/internal/archiver/archiveWorker.go +++ b/internal/archiver/archiveWorker.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archiver @@ -10,8 +10,8 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" ) @@ -40,7 +40,7 @@ func archivingWorker() { // not using meta data, called to load JobMeta into Cache? // will fail if job meta not in repository if _, err := jobRepo.FetchMetadata(job); err != nil { - log.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error()) + cclog.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error()) jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed) continue } @@ -49,7 +49,7 @@ func archivingWorker() { // TODO: Maybe use context with cancel/timeout here jobMeta, err := ArchiveJob(job, context.Background()) if err != nil { - log.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error()) + cclog.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error()) jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed) continue } @@ -57,21 +57,21 @@ func archivingWorker() { stmt := sq.Update("job").Where("job.id = ?", job.ID) if stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta); err != nil { - log.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error()) + cclog.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error()) continue } if stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta); err != nil { - log.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error()) + cclog.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error()) continue } // Update the jobs database entry one last time: stmt = jobRepo.MarkArchived(stmt, schema.MonitoringStatusArchivingSuccessful) if err := jobRepo.Execute(stmt); err != nil { - log.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error()) + cclog.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error()) continue } - log.Debugf("archiving job %d took %s", job.JobID, time.Since(start)) - log.Printf("archiving job (dbid: %d) successful", job.ID) + cclog.Debugf("archiving job %d took %s", job.JobID, time.Since(start)) + cclog.Printf("archiving job (dbid: %d) successful", job.ID) repository.CallJobStopHooks(job) archivePending.Done() @@ -84,7 +84,7 @@ func archivingWorker() { // Trigger async archiving func TriggerArchiving(job *schema.Job) { if archiveChannel == nil { - log.Fatal("Cannot archive without archiving channel. Did you Start the archiver?") + cclog.Fatal("Cannot archive without archiving channel. Did you Start the archiver?") } archivePending.Add(1) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index b220d3b..e21be13 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archiver @@ -11,8 +11,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) // Writes a running job to the job-archive @@ -36,7 +36,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.Job, error) { jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s) if err != nil { - log.Error("Error wile loading job data for archiving") + cclog.Error("Error wile loading job data for archiving") return nil, err } diff --git a/internal/auth/auth.go b/internal/auth/auth.go index 3e57768..ad78397 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package auth @@ -22,9 +22,9 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/util" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" "github.com/gorilla/sessions" ) @@ -66,7 +66,7 @@ func (auth *Authentication) AuthViaSession( ) (*schema.User, error) { session, err := auth.sessionStore.Get(r, "session") if err != nil { - log.Error("Error while getting session store") + cclog.Error("Error while getting session store") return nil, err } @@ -93,16 +93,16 @@ func Init() { sessKey := os.Getenv("SESSION_KEY") if sessKey == "" { - log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)") + cclog.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)") bytes := make([]byte, 32) if _, err := rand.Read(bytes); err != nil { - log.Fatal("Error while initializing authentication -> failed to generate random bytes for session key") + cclog.Fatal("Error while initializing authentication -> failed to generate random bytes for session key") } authInstance.sessionStore = sessions.NewCookieStore(bytes) } else { bytes, err := base64.StdEncoding.DecodeString(sessKey) if err != nil { - log.Fatal("Error while initializing authentication -> decoding session key failed") + cclog.Fatal("Error while initializing authentication -> decoding session key failed") } authInstance.sessionStore = sessions.NewCookieStore(bytes) } @@ -114,41 +114,41 @@ func Init() { if config.Keys.LdapConfig != nil { ldapAuth := &LdapAuthenticator{} if err := ldapAuth.Init(); err != nil { - log.Warn("Error while initializing authentication -> ldapAuth init failed") + cclog.Warn("Error while initializing authentication -> ldapAuth init failed") } else { authInstance.LdapAuth = ldapAuth authInstance.authenticators = append(authInstance.authenticators, authInstance.LdapAuth) } } else { - log.Info("Missing LDAP configuration: No LDAP support!") + cclog.Info("Missing LDAP configuration: No LDAP support!") } if config.Keys.JwtConfig != nil { authInstance.JwtAuth = &JWTAuthenticator{} if err := authInstance.JwtAuth.Init(); err != nil { - log.Fatal("Error while initializing authentication -> jwtAuth init failed") + cclog.Fatal("Error while initializing authentication -> jwtAuth init failed") } jwtSessionAuth := &JWTSessionAuthenticator{} if err := jwtSessionAuth.Init(); err != nil { - log.Info("jwtSessionAuth init failed: No JWT login support!") + cclog.Info("jwtSessionAuth init failed: No JWT login support!") } else { authInstance.authenticators = append(authInstance.authenticators, jwtSessionAuth) } jwtCookieSessionAuth := &JWTCookieSessionAuthenticator{} if err := jwtCookieSessionAuth.Init(); err != nil { - log.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!") + cclog.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!") } else { authInstance.authenticators = append(authInstance.authenticators, jwtCookieSessionAuth) } } else { - log.Info("Missing JWT configuration: No JWT token support!") + cclog.Info("Missing JWT configuration: No JWT token support!") } authInstance.LocalAuth = &LocalAuthenticator{} if err := authInstance.LocalAuth.Init(); err != nil { - log.Fatal("Error while initializing authentication -> localAuth init failed") + cclog.Fatal("Error while initializing authentication -> localAuth init failed") } authInstance.authenticators = append(authInstance.authenticators, authInstance.LocalAuth) }) @@ -156,7 +156,7 @@ func Init() { func GetAuthInstance() *Authentication { if authInstance == nil { - log.Fatal("Authentication module not initialized!") + cclog.Fatal("Authentication module not initialized!") } return authInstance @@ -167,14 +167,14 @@ func handleTokenUser(tokenUser *schema.User) { dbUser, err := r.GetUser(tokenUser.Username) if err != nil && err != sql.ErrNoRows { - log.Errorf("Error while loading user '%s': %v", tokenUser.Username, err) + cclog.Errorf("Error while loading user '%s': %v", tokenUser.Username, err) } else if err == sql.ErrNoRows && config.Keys.JwtConfig.SyncUserOnLogin { // Adds New User if err := r.AddUser(tokenUser); err != nil { - log.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err) + cclog.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err) } } else if err == nil && config.Keys.JwtConfig.UpdateUserOnLogin { // Update Existing User if err := r.UpdateUser(dbUser, tokenUser); err != nil { - log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err) + cclog.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err) } } } @@ -184,14 +184,14 @@ func handleOIDCUser(OIDCUser *schema.User) { dbUser, err := r.GetUser(OIDCUser.Username) if err != nil && err != sql.ErrNoRows { - log.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err) + cclog.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err) } else if err == sql.ErrNoRows && config.Keys.OpenIDConfig.SyncUserOnLogin { // Adds New User if err := r.AddUser(OIDCUser); err != nil { - log.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err) + cclog.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err) } } else if err == nil && config.Keys.OpenIDConfig.UpdateUserOnLogin { // Update Existing User if err := r.UpdateUser(dbUser, OIDCUser); err != nil { - log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err) + cclog.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err) } } } @@ -199,7 +199,7 @@ func handleOIDCUser(OIDCUser *schema.User) { func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, user *schema.User) error { session, err := auth.sessionStore.New(r, "session") if err != nil { - log.Errorf("session creation failed: %s", err.Error()) + cclog.Errorf("session creation failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusInternalServerError) return err } @@ -215,7 +215,7 @@ func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, session.Values["projects"] = user.Projects session.Values["roles"] = user.Roles if err := auth.sessionStore.Save(r, rw, session); err != nil { - log.Warnf("session save failed: %s", err.Error()) + cclog.Warnf("session save failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusInternalServerError) return err } @@ -236,7 +236,7 @@ func (auth *Authentication) Login( limiter := getIPUserLimiter(ip, username) if !limiter.Allow() { - log.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username) + cclog.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username) onfailure(rw, r, errors.New("too many login attempts, try again in a few minutes")) return } @@ -246,7 +246,7 @@ func (auth *Authentication) Login( var err error dbUser, err = repository.GetUserRepository().GetUser(username) if err != nil && err != sql.ErrNoRows { - log.Errorf("Error while loading user '%v'", username) + cclog.Errorf("Error while loading user '%v'", username) } } @@ -256,12 +256,12 @@ func (auth *Authentication) Login( if user, ok = authenticator.CanLogin(dbUser, username, rw, r); !ok { continue } else { - log.Debugf("Can login with user %v", user) + cclog.Debugf("Can login with user %v", user) } user, err := authenticator.Login(user, rw, r) if err != nil { - log.Warnf("user login failed: %s", err.Error()) + cclog.Warnf("user login failed: %s", err.Error()) onfailure(rw, r, err) return } @@ -270,7 +270,7 @@ func (auth *Authentication) Login( return } - log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects) + cclog.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects) ctx := context.WithValue(r.Context(), repository.ContextUserKey, user) if r.FormValue("redirect") != "" { @@ -282,7 +282,7 @@ func (auth *Authentication) Login( return } - log.Debugf("login failed: no authenticator applied") + cclog.Debugf("login failed: no authenticator applied") onfailure(rw, r, errors.New("no authenticator applied")) }) } @@ -294,14 +294,14 @@ func (auth *Authentication) Auth( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.JwtAuth.AuthViaJWT(rw, r) if err != nil { - log.Infof("auth -> authentication failed: %s", err.Error()) + cclog.Infof("auth -> authentication failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusUnauthorized) return } if user == nil { user, err = auth.AuthViaSession(rw, r) if err != nil { - log.Infof("auth -> authentication failed: %s", err.Error()) + cclog.Infof("auth -> authentication failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusUnauthorized) return } @@ -312,7 +312,7 @@ func (auth *Authentication) Auth( return } - log.Info("auth -> authentication failed") + cclog.Info("auth -> authentication failed") onfailure(rw, r, errors.New("unauthorized (please login first)")) }) } @@ -324,14 +324,14 @@ func (auth *Authentication) AuthApi( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.JwtAuth.AuthViaJWT(rw, r) if err != nil { - log.Infof("auth api -> authentication failed: %s", err.Error()) + cclog.Infof("auth api -> authentication failed: %s", err.Error()) onfailure(rw, r, err) return } ipErr := securedCheck(user, r) if ipErr != nil { - log.Infof("auth api -> secured check failed: %s", ipErr.Error()) + cclog.Infof("auth api -> secured check failed: %s", ipErr.Error()) onfailure(rw, r, ipErr) return } @@ -351,11 +351,11 @@ func (auth *Authentication) AuthApi( return } default: - log.Info("auth api -> authentication failed: missing role") + cclog.Info("auth api -> authentication failed: missing role") onfailure(rw, r, errors.New("unauthorized")) } } - log.Info("auth api -> authentication failed: no auth") + cclog.Info("auth api -> authentication failed: no auth") onfailure(rw, r, errors.New("unauthorized")) }) } @@ -367,7 +367,7 @@ func (auth *Authentication) AuthUserApi( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.JwtAuth.AuthViaJWT(rw, r) if err != nil { - log.Infof("auth user api -> authentication failed: %s", err.Error()) + cclog.Infof("auth user api -> authentication failed: %s", err.Error()) onfailure(rw, r, err) return } @@ -387,11 +387,11 @@ func (auth *Authentication) AuthUserApi( return } default: - log.Info("auth user api -> authentication failed: missing role") + cclog.Info("auth user api -> authentication failed: missing role") onfailure(rw, r, errors.New("unauthorized")) } } - log.Info("auth user api -> authentication failed: no auth") + cclog.Info("auth user api -> authentication failed: no auth") onfailure(rw, r, errors.New("unauthorized")) }) } @@ -403,7 +403,7 @@ func (auth *Authentication) AuthConfigApi( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.AuthViaSession(rw, r) if err != nil { - log.Infof("auth config api -> authentication failed: %s", err.Error()) + cclog.Infof("auth config api -> authentication failed: %s", err.Error()) onfailure(rw, r, err) return } @@ -412,7 +412,7 @@ func (auth *Authentication) AuthConfigApi( onsuccess.ServeHTTP(rw, r.WithContext(ctx)) return } - log.Info("auth config api -> authentication failed: no auth") + cclog.Info("auth config api -> authentication failed: no auth") onfailure(rw, r, errors.New("unauthorized")) }) } @@ -424,7 +424,7 @@ func (auth *Authentication) AuthFrontendApi( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.AuthViaSession(rw, r) if err != nil { - log.Infof("auth frontend api -> authentication failed: %s", err.Error()) + cclog.Infof("auth frontend api -> authentication failed: %s", err.Error()) onfailure(rw, r, err) return } @@ -433,7 +433,7 @@ func (auth *Authentication) AuthFrontendApi( onsuccess.ServeHTTP(rw, r.WithContext(ctx)) return } - log.Info("auth frontend api -> authentication failed: no auth") + cclog.Info("auth frontend api -> authentication failed: no auth") onfailure(rw, r, errors.New("unauthorized")) }) } diff --git a/internal/auth/jwt.go b/internal/auth/jwt.go index 7bac278..2cc2c37 100644 --- a/internal/auth/jwt.go +++ b/internal/auth/jwt.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package auth @@ -15,8 +15,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/golang-jwt/jwt/v5" ) @@ -28,17 +28,17 @@ type JWTAuthenticator struct { func (ja *JWTAuthenticator) Init() error { pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY") if pubKey == "" || privKey == "" { - log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") + cclog.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") } else { bytes, err := base64.StdEncoding.DecodeString(pubKey) if err != nil { - log.Warn("Could not decode JWT public key") + cclog.Warn("Could not decode JWT public key") return err } ja.publicKey = ed25519.PublicKey(bytes) bytes, err = base64.StdEncoding.DecodeString(privKey) if err != nil { - log.Warn("Could not decode JWT private key") + cclog.Warn("Could not decode JWT private key") return err } ja.privateKey = ed25519.PrivateKey(bytes) @@ -70,11 +70,11 @@ func (ja *JWTAuthenticator) AuthViaJWT( return ja.publicKey, nil }) if err != nil { - log.Warn("Error while parsing JWT token") + cclog.Warn("Error while parsing JWT token") return nil, err } if !token.Valid { - log.Warn("jwt token claims are not valid") + cclog.Warn("jwt token claims are not valid") return nil, errors.New("jwt token claims are not valid") } @@ -90,7 +90,7 @@ func (ja *JWTAuthenticator) AuthViaJWT( user, err := ur.GetUser(sub) // Deny any logins for unknown usernames if err != nil { - log.Warn("Could not find user from JWT in internal database.") + cclog.Warn("Could not find user from JWT in internal database.") return nil, errors.New("unknown user") } // Take user roles from database instead of trusting the JWT diff --git a/internal/auth/jwtCookieSession.go b/internal/auth/jwtCookieSession.go index 7e0e045..8f6d064 100644 --- a/internal/auth/jwtCookieSession.go +++ b/internal/auth/jwtCookieSession.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package auth @@ -15,8 +15,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/golang-jwt/jwt/v5" ) @@ -31,18 +31,18 @@ var _ Authenticator = (*JWTCookieSessionAuthenticator)(nil) func (ja *JWTCookieSessionAuthenticator) Init() error { pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY") if pubKey == "" || privKey == "" { - log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") + cclog.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") return errors.New("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") } else { bytes, err := base64.StdEncoding.DecodeString(pubKey) if err != nil { - log.Warn("Could not decode JWT public key") + cclog.Warn("Could not decode JWT public key") return err } ja.publicKey = ed25519.PublicKey(bytes) bytes, err = base64.StdEncoding.DecodeString(privKey) if err != nil { - log.Warn("Could not decode JWT private key") + cclog.Warn("Could not decode JWT private key") return err } ja.privateKey = ed25519.PrivateKey(bytes) @@ -53,13 +53,13 @@ func (ja *JWTCookieSessionAuthenticator) Init() error { if keyFound && pubKeyCrossLogin != "" { bytes, err := base64.StdEncoding.DecodeString(pubKeyCrossLogin) if err != nil { - log.Warn("Could not decode cross login JWT public key") + cclog.Warn("Could not decode cross login JWT public key") return err } ja.publicKeyCrossLogin = ed25519.PublicKey(bytes) } else { ja.publicKeyCrossLogin = nil - log.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)") + cclog.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)") return errors.New("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)") } @@ -67,22 +67,22 @@ func (ja *JWTCookieSessionAuthenticator) Init() error { // Warn if other necessary settings are not configured if jc != nil { if jc.CookieName == "" { - log.Info("cookieName for JWTs not configured (cross login via JWT cookie will fail)") + cclog.Info("cookieName for JWTs not configured (cross login via JWT cookie will fail)") return errors.New("cookieName for JWTs not configured (cross login via JWT cookie will fail)") } if !jc.ValidateUser { - log.Info("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!") + cclog.Info("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!") } if jc.TrustedIssuer == "" { - log.Info("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") + cclog.Info("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") return errors.New("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") } } else { - log.Warn("config for JWTs not configured (cross login via JWT cookie will fail)") + cclog.Warn("config for JWTs not configured (cross login via JWT cookie will fail)") return errors.New("config for JWTs not configured (cross login via JWT cookie will fail)") } - log.Info("JWT Cookie Session authenticator successfully registered") + cclog.Info("JWT Cookie Session authenticator successfully registered") return nil } @@ -140,12 +140,12 @@ func (ja *JWTCookieSessionAuthenticator) Login( return ja.publicKey, nil }) if err != nil { - log.Warn("JWT cookie session: error while parsing token") + cclog.Warn("JWT cookie session: error while parsing token") return nil, err } if !token.Valid { - log.Warn("jwt token claims are not valid") + cclog.Warn("jwt token claims are not valid") return nil, errors.New("jwt token claims are not valid") } @@ -159,12 +159,12 @@ func (ja *JWTCookieSessionAuthenticator) Login( var err error user, err = repository.GetUserRepository().GetUser(sub) if err != nil && err != sql.ErrNoRows { - log.Errorf("Error while loading user '%v'", sub) + cclog.Errorf("Error while loading user '%v'", sub) } // Deny any logins for unknown usernames if user == nil { - log.Warn("Could not find user from JWT in internal database.") + cclog.Warn("Could not find user from JWT in internal database.") return nil, errors.New("unknown user") } } else { diff --git a/internal/auth/jwtSession.go b/internal/auth/jwtSession.go index 67457ee..9c79e72 100644 --- a/internal/auth/jwtSession.go +++ b/internal/auth/jwtSession.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package auth @@ -15,8 +15,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/golang-jwt/jwt/v5" ) @@ -30,13 +30,13 @@ func (ja *JWTSessionAuthenticator) Init() error { if pubKey := os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" { bytes, err := base64.StdEncoding.DecodeString(pubKey) if err != nil { - log.Warn("Could not decode cross login JWT HS512 key") + cclog.Warn("Could not decode cross login JWT HS512 key") return err } ja.loginTokenKey = bytes } - log.Info("JWT Session authenticator successfully registered") + cclog.Info("JWT Session authenticator successfully registered") return nil } @@ -67,12 +67,12 @@ func (ja *JWTSessionAuthenticator) Login( return nil, fmt.Errorf("unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg()) }) if err != nil { - log.Warn("Error while parsing jwt token") + cclog.Warn("Error while parsing jwt token") return nil, err } if !token.Valid { - log.Warn("jwt token claims are not valid") + cclog.Warn("jwt token claims are not valid") return nil, errors.New("jwt token claims are not valid") } @@ -86,12 +86,12 @@ func (ja *JWTSessionAuthenticator) Login( var err error user, err = repository.GetUserRepository().GetUser(sub) if err != nil && err != sql.ErrNoRows { - log.Errorf("Error while loading user '%v'", sub) + cclog.Errorf("Error while loading user '%v'", sub) } // Deny any logins for unknown usernames if user == nil { - log.Warn("Could not find user from JWT in internal database.") + cclog.Warn("Could not find user from JWT in internal database.") return nil, errors.New("unknown user") } } else { diff --git a/internal/auth/ldap.go b/internal/auth/ldap.go index cc7c4f6..d7843e4 100644 --- a/internal/auth/ldap.go +++ b/internal/auth/ldap.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package auth @@ -13,8 +13,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/go-ldap/ldap/v3" ) @@ -28,7 +28,7 @@ var _ Authenticator = (*LdapAuthenticator)(nil) func (la *LdapAuthenticator) Init() error { la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD") if la.syncPassword == "" { - log.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)") + cclog.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)") } lc := config.Keys.LdapConfig @@ -58,7 +58,7 @@ func (la *LdapAuthenticator) CanLogin( if lc.SyncUserOnLogin { l, err := la.getLdapConnection(true) if err != nil { - log.Error("LDAP connection error") + cclog.Error("LDAP connection error") } defer l.Close() @@ -71,12 +71,12 @@ func (la *LdapAuthenticator) CanLogin( sr, err := l.Search(searchRequest) if err != nil { - log.Warn(err) + cclog.Warn(err) return nil, false } if len(sr.Entries) != 1 { - log.Warn("LDAP: User does not exist or too many entries returned") + cclog.Warn("LDAP: User does not exist or too many entries returned") return nil, false } @@ -96,7 +96,7 @@ func (la *LdapAuthenticator) CanLogin( } if err := repository.GetUserRepository().AddUser(user); err != nil { - log.Errorf("User '%s' LDAP: Insert into DB failed", username) + cclog.Errorf("User '%s' LDAP: Insert into DB failed", username) return nil, false } @@ -114,14 +114,14 @@ func (la *LdapAuthenticator) Login( ) (*schema.User, error) { l, err := la.getLdapConnection(false) if err != nil { - log.Warn("Error while getting ldap connection") + cclog.Warn("Error while getting ldap connection") return nil, err } defer l.Close() userDn := strings.Replace(config.Keys.LdapConfig.UserBind, "{username}", user.Username, -1) if err := l.Bind(userDn, r.FormValue("password")); err != nil { - log.Errorf("AUTH/LDAP > Authentication for user %s failed: %v", + cclog.Errorf("AUTH/LDAP > Authentication for user %s failed: %v", user.Username, err) return nil, fmt.Errorf("Authentication failed") } @@ -148,7 +148,7 @@ func (la *LdapAuthenticator) Sync() error { l, err := la.getLdapConnection(true) if err != nil { - log.Error("LDAP connection error") + cclog.Error("LDAP connection error") return err } defer l.Close() @@ -159,7 +159,7 @@ func (la *LdapAuthenticator) Sync() error { lc.UserFilter, []string{"dn", "uid", la.UserAttr}, nil)) if err != nil { - log.Warn("LDAP search error") + cclog.Warn("LDAP search error") return err } @@ -182,7 +182,7 @@ func (la *LdapAuthenticator) Sync() error { for username, where := range users { if where == IN_DB && lc.SyncDelOldUsers { ur.DelUser(username) - log.Debugf("sync: remove %v (does not show up in LDAP anymore)", username) + cclog.Debugf("sync: remove %v (does not show up in LDAP anymore)", username) } else if where == IN_LDAP { name := newnames[username] @@ -198,9 +198,9 @@ func (la *LdapAuthenticator) Sync() error { AuthSource: schema.AuthViaLDAP, } - log.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name) + cclog.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name) if err := ur.AddUser(user); err != nil { - log.Errorf("User '%s' LDAP: Insert into DB failed", username) + cclog.Errorf("User '%s' LDAP: Insert into DB failed", username) return err } } @@ -213,14 +213,14 @@ func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) { lc := config.Keys.LdapConfig conn, err := ldap.DialURL(lc.Url) if err != nil { - log.Warn("LDAP URL dial failed") + cclog.Warn("LDAP URL dial failed") return nil, err } if admin { if err := conn.Bind(lc.SearchDN, la.syncPassword); err != nil { conn.Close() - log.Warn("LDAP connection bind failed") + cclog.Warn("LDAP connection bind failed") return nil, err } } diff --git a/internal/auth/local.go b/internal/auth/local.go index 8d39793..5dc0bf4 100644 --- a/internal/auth/local.go +++ b/internal/auth/local.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package auth @@ -8,8 +8,8 @@ import ( "fmt" "net/http" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "golang.org/x/crypto/bcrypt" ) @@ -27,19 +27,19 @@ func (la *LocalAuthenticator) CanLogin( user *schema.User, username string, rw http.ResponseWriter, - r *http.Request) (*schema.User, bool) { - + r *http.Request, +) (*schema.User, bool) { return user, user != nil && user.AuthSource == schema.AuthViaLocalPassword } func (la *LocalAuthenticator) Login( user *schema.User, rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(r.FormValue("password"))); e != nil { - log.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username) + cclog.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username) return nil, fmt.Errorf("Authentication failed") } diff --git a/internal/auth/oidc.go b/internal/auth/oidc.go index ba1c9da..f688aab 100644 --- a/internal/auth/oidc.go +++ b/internal/auth/oidc.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package auth @@ -15,8 +15,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/coreos/go-oidc/v3/oidc" "github.com/gorilla/mux" "golang.org/x/oauth2" @@ -51,15 +51,15 @@ func setCallbackCookie(w http.ResponseWriter, r *http.Request, name, value strin func NewOIDC(a *Authentication) *OIDC { provider, err := oidc.NewProvider(context.Background(), config.Keys.OpenIDConfig.Provider) if err != nil { - log.Fatal(err) + cclog.Fatal(err) } clientID := os.Getenv("OID_CLIENT_ID") if clientID == "" { - log.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)") + cclog.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)") } clientSecret := os.Getenv("OID_CLIENT_SECRET") if clientSecret == "" { - log.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)") + cclog.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)") } client := &oauth2.Config{ @@ -173,7 +173,7 @@ func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) { } oa.authentication.SaveSession(rw, r, user) - log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects) + cclog.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects) ctx := context.WithValue(r.Context(), repository.ContextUserKey, user) http.RedirectHandler("/", http.StatusTemporaryRedirect).ServeHTTP(rw, r.WithContext(ctx)) } diff --git a/internal/config/config.go b/internal/config/config.go index 31760c7..bb965b8 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package config @@ -9,8 +9,8 @@ import ( "encoding/json" "os" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) var Keys schema.ProgramConfig = schema.ProgramConfig{ @@ -53,20 +53,20 @@ func Init(flagConfigFile string) { raw, err := os.ReadFile(flagConfigFile) if err != nil { if !os.IsNotExist(err) { - log.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error()) + cclog.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error()) } } else { if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil { - log.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error()) + cclog.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error()) } dec := json.NewDecoder(bytes.NewReader(raw)) dec.DisallowUnknownFields() if err := dec.Decode(&Keys); err != nil { - log.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error()) + cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error()) } if Keys.Clusters == nil || len(Keys.Clusters) < 1 { - log.Abort("Config Init: At least one cluster required in config. Exited with error.") + cclog.Abort("Config Init: At least one cluster required in config. Exited with error.") } } } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index ed282be..993b6f0 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package config diff --git a/internal/config/default_metrics.go b/internal/config/default_metrics.go index b0a0cc5..48a0a0b 100644 --- a/internal/config/default_metrics.go +++ b/internal/config/default_metrics.go @@ -1,3 +1,7 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. This file is part of cc-backend. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. package config import ( diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 4f3b9fd..238270f 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -15,7 +15,7 @@ import ( "github.com/99designs/gqlgen/graphql" "github.com/99designs/gqlgen/graphql/introspection" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" gqlparser "github.com/vektah/gqlparser/v2" "github.com/vektah/gqlparser/v2/ast" ) @@ -2714,7 +2714,6 @@ type TimeRangeOutput { input NodeFilter { hostname: StringInput cluster: StringInput - subCluster: StringInput nodeState: NodeState healthState: MonitoringState } @@ -3261,7 +3260,7 @@ func (ec *executionContext) field_Query_jobMetrics_argsScopes( ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) if tmp, ok := rawArgs["scopes"]; ok { - return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx, tmp) } var zeroVal []schema.MetricScope @@ -3822,7 +3821,7 @@ func (ec *executionContext) field_Query_nodeMetricsList_argsScopes( ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) if tmp, ok := rawArgs["scopes"]; ok { - return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx, tmp) } var zeroVal []schema.MetricScope @@ -4001,7 +4000,7 @@ func (ec *executionContext) field_Query_nodeMetrics_argsScopes( ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) if tmp, ok := rawArgs["scopes"]; ok { - return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx, tmp) } var zeroVal []schema.MetricScope @@ -4402,7 +4401,7 @@ func (ec *executionContext) field_Query_scopedJobStats_argsScopes( ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) if tmp, ok := rawArgs["scopes"]; ok { - return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx, tmp) } var zeroVal []schema.MetricScope @@ -4805,7 +4804,7 @@ func (ec *executionContext) _Cluster_subClusters(ctx context.Context, field grap } res := resTmp.([]*schema.SubCluster) fc.Result = res - return ec.marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx, field.Selections, res) + return ec.marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Cluster_subClusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -5463,7 +5462,7 @@ func (ec *executionContext) _GlobalMetricListItem_unit(ctx context.Context, fiel } res := resTmp.(schema.Unit) fc.Result = res - return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) + return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_GlobalMetricListItem_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -5513,7 +5512,7 @@ func (ec *executionContext) _GlobalMetricListItem_scope(ctx context.Context, fie } res := resTmp.(schema.MetricScope) fc.Result = res - return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_GlobalMetricListItem_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -5598,7 +5597,7 @@ func (ec *executionContext) _GlobalMetricListItem_availability(ctx context.Conte } res := resTmp.([]schema.ClusterSupport) fc.Result = res - return ec.marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx, field.Selections, res) + return ec.marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterSupportᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_GlobalMetricListItem_availability(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -6616,7 +6615,7 @@ func (ec *executionContext) _Job_state(ctx context.Context, field graphql.Collec } res := resTmp.(schema.JobState) fc.Result = res - return ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx, field.Selections, res) + return ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Job_state(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -6660,7 +6659,7 @@ func (ec *executionContext) _Job_tags(ctx context.Context, field graphql.Collect } res := resTmp.([]*schema.Tag) fc.Result = res - return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res) + return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Job_tags(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -6714,7 +6713,7 @@ func (ec *executionContext) _Job_resources(ctx context.Context, field graphql.Co } res := resTmp.([]*schema.Resource) fc.Result = res - return ec.marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResourceᚄ(ctx, field.Selections, res) + return ec.marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐResourceᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Job_resources(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -7222,7 +7221,7 @@ func (ec *executionContext) _JobMetric_unit(ctx context.Context, field graphql.C } res := resTmp.(schema.Unit) fc.Result = res - return ec.marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) + return ec.marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_JobMetric_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -7313,7 +7312,7 @@ func (ec *executionContext) _JobMetric_series(ctx context.Context, field graphql } res := resTmp.([]schema.Series) fc.Result = res - return ec.marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeriesᚄ(ctx, field.Selections, res) + return ec.marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSeriesᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_JobMetric_series(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -7364,7 +7363,7 @@ func (ec *executionContext) _JobMetric_statisticsSeries(ctx context.Context, fie } res := resTmp.(*schema.StatsSeries) fc.Result = res - return ec.marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐStatsSeries(ctx, field.Selections, res) + return ec.marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐStatsSeries(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_JobMetric_statisticsSeries(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -7462,7 +7461,7 @@ func (ec *executionContext) _JobMetricWithName_scope(ctx context.Context, field } res := resTmp.(schema.MetricScope) fc.Result = res - return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_JobMetricWithName_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -7506,7 +7505,7 @@ func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field } res := resTmp.(*schema.JobMetric) fc.Result = res - return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobMetric(ctx, field.Selections, res) + return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobMetric(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_JobMetricWithName_metric(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -7560,7 +7559,7 @@ func (ec *executionContext) _JobResultList_items(ctx context.Context, field grap } res := resTmp.([]*schema.Job) fc.Result = res - return ec.marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx, field.Selections, res) + return ec.marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_JobResultList_items(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -9088,7 +9087,7 @@ func (ec *executionContext) _MetricConfig_unit(ctx context.Context, field graphq } res := resTmp.(schema.Unit) fc.Result = res - return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) + return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_MetricConfig_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -9138,7 +9137,7 @@ func (ec *executionContext) _MetricConfig_scope(ctx context.Context, field graph } res := resTmp.(schema.MetricScope) fc.Result = res - return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_MetricConfig_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -9484,7 +9483,7 @@ func (ec *executionContext) _MetricConfig_subClusters(ctx context.Context, field } res := resTmp.([]*schema.SubClusterConfig) fc.Result = res - return ec.marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfigᚄ(ctx, field.Selections, res) + return ec.marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterConfigᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_MetricConfig_subClusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -9586,7 +9585,7 @@ func (ec *executionContext) _MetricFootprints_data(ctx context.Context, field gr } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_MetricFootprints_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -10150,7 +10149,7 @@ func (ec *executionContext) _MetricValue_unit(ctx context.Context, field graphql } res := resTmp.(schema.Unit) fc.Result = res - return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) + return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_MetricValue_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -10244,7 +10243,7 @@ func (ec *executionContext) _Mutation_createTag(ctx context.Context, field graph } res := resTmp.(*schema.Tag) fc.Result = res - return ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTag(ctx, field.Selections, res) + return ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTag(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Mutation_createTag(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -10364,7 +10363,7 @@ func (ec *executionContext) _Mutation_addTagsToJob(ctx context.Context, field gr } res := resTmp.([]*schema.Tag) fc.Result = res - return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res) + return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Mutation_addTagsToJob(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -10429,7 +10428,7 @@ func (ec *executionContext) _Mutation_removeTagsFromJob(ctx context.Context, fie } res := resTmp.([]*schema.Tag) fc.Result = res - return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res) + return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Mutation_removeTagsFromJob(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -10645,7 +10644,7 @@ func (ec *executionContext) _NamedStats_data(ctx context.Context, field graphql. } res := resTmp.(*schema.MetricStatistics) fc.Result = res - return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_NamedStats_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -10741,7 +10740,7 @@ func (ec *executionContext) _NamedStatsWithScope_scope(ctx context.Context, fiel } res := resTmp.(schema.MetricScope) fc.Result = res - return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_NamedStatsWithScope_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -11057,7 +11056,7 @@ func (ec *executionContext) _Node_HealthState(ctx context.Context, field graphql } res := resTmp.(schema.NodeState) fc.Result = res - return ec.marshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNodeState(ctx, field.Selections, res) + return ec.marshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Node_HealthState(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -11282,7 +11281,7 @@ func (ec *executionContext) _NodeStateResultList_items(ctx context.Context, fiel } res := resTmp.([]*schema.Node) fc.Result = res - return ec.marshalNNode2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNodeᚄ(ctx, field.Selections, res) + return ec.marshalNNode2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_NodeStateResultList_items(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -11728,7 +11727,7 @@ func (ec *executionContext) _Query_clusters(ctx context.Context, field graphql.C } res := resTmp.([]*schema.Cluster) fc.Result = res - return ec.marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterᚄ(ctx, field.Selections, res) + return ec.marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Query_clusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -11780,7 +11779,7 @@ func (ec *executionContext) _Query_tags(ctx context.Context, field graphql.Colle } res := resTmp.([]*schema.Tag) fc.Result = res - return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res) + return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Query_tags(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -11834,7 +11833,7 @@ func (ec *executionContext) _Query_globalMetrics(ctx context.Context, field grap } res := resTmp.([]*schema.GlobalMetricListItem) fc.Result = res - return ec.marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx, field.Selections, res) + return ec.marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐGlobalMetricListItemᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Query_globalMetrics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -12008,7 +12007,7 @@ func (ec *executionContext) _Query_node(ctx context.Context, field graphql.Colle } res := resTmp.(*schema.Node) fc.Result = res - return ec.marshalONode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNode(ctx, field.Selections, res) + return ec.marshalONode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNode(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Query_node(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -12198,7 +12197,7 @@ func (ec *executionContext) _Query_job(ctx context.Context, field graphql.Collec } res := resTmp.(*schema.Job) fc.Result = res - return ec.marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJob(ctx, field.Selections, res) + return ec.marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJob(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -13357,7 +13356,7 @@ func (ec *executionContext) _ScopedStats_data(ctx context.Context, field graphql } res := resTmp.(*schema.MetricStatistics) fc.Result = res - return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_ScopedStats_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -13491,7 +13490,7 @@ func (ec *executionContext) _Series_statistics(ctx context.Context, field graphq } res := resTmp.(schema.MetricStatistics) fc.Result = res - return ec.marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) + return ec.marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Series_statistics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -13543,7 +13542,7 @@ func (ec *executionContext) _Series_data(ctx context.Context, field graphql.Coll } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Series_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -13587,7 +13586,7 @@ func (ec *executionContext) _StatsSeries_mean(ctx context.Context, field graphql } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_StatsSeries_mean(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -13631,7 +13630,7 @@ func (ec *executionContext) _StatsSeries_median(ctx context.Context, field graph } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_StatsSeries_median(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -13675,7 +13674,7 @@ func (ec *executionContext) _StatsSeries_min(ctx context.Context, field graphql. } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_StatsSeries_min(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -13719,7 +13718,7 @@ func (ec *executionContext) _StatsSeries_max(ctx context.Context, field graphql. } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_StatsSeries_max(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -14071,7 +14070,7 @@ func (ec *executionContext) _SubCluster_flopRateScalar(ctx context.Context, fiel } res := resTmp.(schema.MetricValue) fc.Result = res - return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) + return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricValue(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_SubCluster_flopRateScalar(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -14123,7 +14122,7 @@ func (ec *executionContext) _SubCluster_flopRateSimd(ctx context.Context, field } res := resTmp.(schema.MetricValue) fc.Result = res - return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) + return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricValue(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_SubCluster_flopRateSimd(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -14175,7 +14174,7 @@ func (ec *executionContext) _SubCluster_memoryBandwidth(ctx context.Context, fie } res := resTmp.(schema.MetricValue) fc.Result = res - return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) + return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricValue(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_SubCluster_memoryBandwidth(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -14227,7 +14226,7 @@ func (ec *executionContext) _SubCluster_topology(ctx context.Context, field grap } res := resTmp.(schema.Topology) fc.Result = res - return ec.marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx, field.Selections, res) + return ec.marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTopology(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_SubCluster_topology(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -14285,7 +14284,7 @@ func (ec *executionContext) _SubCluster_metricConfig(ctx context.Context, field } res := resTmp.([]schema.MetricConfig) fc.Result = res - return ec.marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx, field.Selections, res) + return ec.marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricConfigᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_SubCluster_metricConfig(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -14951,7 +14950,7 @@ func (ec *executionContext) _TimeWeights_nodeHours(ctx context.Context, field gr } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_TimeWeights_nodeHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -14995,7 +14994,7 @@ func (ec *executionContext) _TimeWeights_accHours(ctx context.Context, field gra } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_TimeWeights_accHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -15039,7 +15038,7 @@ func (ec *executionContext) _TimeWeights_coreHours(ctx context.Context, field gr } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_TimeWeights_coreHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -15285,7 +15284,7 @@ func (ec *executionContext) _Topology_accelerators(ctx context.Context, field gr } res := resTmp.([]*schema.Accelerator) fc.Result = res - return ec.marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAcceleratorᚄ(ctx, field.Selections, res) + return ec.marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐAcceleratorᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Topology_accelerators(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -17624,7 +17623,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any it.Partition = data case "duration": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("duration")) - data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) + data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx, v) if err != nil { return it, err } @@ -17645,35 +17644,35 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any it.MinRunningFor = data case "numNodes": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numNodes")) - data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) + data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx, v) if err != nil { return it, err } it.NumNodes = data case "numAccelerators": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numAccelerators")) - data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) + data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx, v) if err != nil { return it, err } it.NumAccelerators = data case "numHWThreads": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numHWThreads")) - data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) + data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx, v) if err != nil { return it, err } it.NumHWThreads = data case "startTime": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startTime")) - data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx, v) + data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTimeRange(ctx, v) if err != nil { return it, err } it.StartTime = data case "state": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("state")) - data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx, v) + data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobStateᚄ(ctx, v) if err != nil { return it, err } @@ -17746,7 +17745,7 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an asMap[k] = v } - fieldsInOrder := [...]string{"hostname", "cluster", "subCluster", "nodeState", "healthState"} + fieldsInOrder := [...]string{"hostname", "cluster", "nodeState", "healthState"} for _, k := range fieldsInOrder { v, ok := asMap[k] if !ok { @@ -17767,13 +17766,6 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an return it, err } it.Cluster = data - case "subCluster": - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subCluster")) - data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) - if err != nil { - return it, err - } - it.SubCluster = data case "nodeState": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nodeState")) data, err := ec.unmarshalONodeState2ᚖstring(ctx, v) @@ -17783,7 +17775,7 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an it.NodeState = data case "healthState": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("healthState")) - data, err := ec.unmarshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNodeState(ctx, v) + data, err := ec.unmarshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx, v) if err != nil { return it, err } @@ -21588,7 +21580,7 @@ func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, o // region ***************************** type.gotpl ***************************** -func (ec *executionContext) marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAccelerator(ctx context.Context, sel ast.SelectionSet, v *schema.Accelerator) graphql.Marshaler { +func (ec *executionContext) marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐAccelerator(ctx context.Context, sel ast.SelectionSet, v *schema.Accelerator) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -21613,7 +21605,7 @@ func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.Se return res } -func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Cluster) graphql.Marshaler { +func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Cluster) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -21637,7 +21629,7 @@ func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpit if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐCluster(ctx, sel, v[i]) + ret[i] = ec.marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐCluster(ctx, sel, v[i]) } if isLen1 { f(i) @@ -21657,7 +21649,7 @@ func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpit return ret } -func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐCluster(ctx context.Context, sel ast.SelectionSet, v *schema.Cluster) graphql.Marshaler { +func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐCluster(ctx context.Context, sel ast.SelectionSet, v *schema.Cluster) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -21667,11 +21659,11 @@ func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋc return ec._Cluster(ctx, sel, v) } -func (ec *executionContext) marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupport(ctx context.Context, sel ast.SelectionSet, v schema.ClusterSupport) graphql.Marshaler { +func (ec *executionContext) marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterSupport(ctx context.Context, sel ast.SelectionSet, v schema.ClusterSupport) graphql.Marshaler { return ec._ClusterSupport(ctx, sel, &v) } -func (ec *executionContext) marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.ClusterSupport) graphql.Marshaler { +func (ec *executionContext) marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterSupportᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.ClusterSupport) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -21695,7 +21687,7 @@ func (ec *executionContext) marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCock if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupport(ctx, sel, v[i]) + ret[i] = ec.marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterSupport(ctx, sel, v[i]) } if isLen1 { f(i) @@ -21853,7 +21845,7 @@ func (ec *executionContext) unmarshalNFloatRange2ᚖgithubᚗcomᚋClusterCockpi return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.GlobalMetricListItem) graphql.Marshaler { +func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐGlobalMetricListItemᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.GlobalMetricListItem) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -21877,7 +21869,7 @@ func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋCl if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItem(ctx, sel, v[i]) + ret[i] = ec.marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐGlobalMetricListItem(ctx, sel, v[i]) } if isLen1 { f(i) @@ -21897,7 +21889,7 @@ func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋCl return ret } -func (ec *executionContext) marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItem(ctx context.Context, sel ast.SelectionSet, v *schema.GlobalMetricListItem) graphql.Marshaler { +func (ec *executionContext) marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐGlobalMetricListItem(ctx context.Context, sel ast.SelectionSet, v *schema.GlobalMetricListItem) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -22174,7 +22166,7 @@ func (ec *executionContext) marshalNInt2ᚖint(ctx context.Context, sel ast.Sele return res } -func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler { +func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -22198,7 +22190,7 @@ func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJob(ctx, sel, v[i]) + ret[i] = ec.marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJob(ctx, sel, v[i]) } if isLen1 { f(i) @@ -22218,7 +22210,7 @@ func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc return ret } -func (ec *executionContext) marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler { +func (ec *executionContext) marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -22304,7 +22296,7 @@ func (ec *executionContext) marshalNJobLink2ᚖgithubᚗcomᚋClusterCockpitᚋc return ec._JobLink(ctx, sel, v) } -func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler { +func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -22382,13 +22374,13 @@ func (ec *executionContext) marshalNJobResultList2ᚖgithubᚗcomᚋClusterCockp return ec._JobResultList(ctx, sel, v) } -func (ec *executionContext) unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx context.Context, v any) (schema.JobState, error) { +func (ec *executionContext) unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx context.Context, v any) (schema.JobState, error) { var res schema.JobState err := res.UnmarshalGQL(v) return res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx context.Context, sel ast.SelectionSet, v schema.JobState) graphql.Marshaler { +func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx context.Context, sel ast.SelectionSet, v schema.JobState) graphql.Marshaler { return v } @@ -22500,11 +22492,11 @@ func (ec *executionContext) marshalNJobsStatistics2ᚖgithubᚗcomᚋClusterCock return ec._JobsStatistics(ctx, sel, v) } -func (ec *executionContext) marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfig(ctx context.Context, sel ast.SelectionSet, v schema.MetricConfig) graphql.Marshaler { +func (ec *executionContext) marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricConfig(ctx context.Context, sel ast.SelectionSet, v schema.MetricConfig) graphql.Marshaler { return ec._MetricConfig(ctx, sel, &v) } -func (ec *executionContext) marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricConfig) graphql.Marshaler { +func (ec *executionContext) marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricConfig) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -22528,7 +22520,7 @@ func (ec *executionContext) marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpi if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfig(ctx, sel, v[i]) + ret[i] = ec.marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricConfig(ctx, sel, v[i]) } if isLen1 { f(i) @@ -22666,13 +22658,13 @@ func (ec *executionContext) marshalNMetricHistoPoints2ᚖgithubᚗcomᚋClusterC return ec._MetricHistoPoints(ctx, sel, v) } -func (ec *executionContext) unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx context.Context, v any) (schema.MetricScope, error) { +func (ec *executionContext) unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx context.Context, v any) (schema.MetricScope, error) { var res schema.MetricScope err := res.UnmarshalGQL(v) return res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx context.Context, sel ast.SelectionSet, v schema.MetricScope) graphql.Marshaler { +func (ec *executionContext) marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx context.Context, sel ast.SelectionSet, v schema.MetricScope) graphql.Marshaler { return v } @@ -22681,7 +22673,7 @@ func (ec *executionContext) unmarshalNMetricStatItem2ᚖgithubᚗcomᚋClusterCo return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler { +func (ec *executionContext) marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -22691,17 +22683,17 @@ func (ec *executionContext) marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCo return ec._MetricStatistics(ctx, sel, v) } -func (ec *executionContext) marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v schema.MetricValue) graphql.Marshaler { +func (ec *executionContext) marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v schema.MetricValue) graphql.Marshaler { return ec._MetricValue(ctx, sel, &v) } -func (ec *executionContext) unmarshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNodeState(ctx context.Context, v any) (schema.NodeState, error) { +func (ec *executionContext) unmarshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx context.Context, v any) (schema.NodeState, error) { tmp, err := graphql.UnmarshalString(v) res := schema.NodeState(tmp) return res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNodeState(ctx context.Context, sel ast.SelectionSet, v schema.NodeState) graphql.Marshaler { +func (ec *executionContext) marshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx context.Context, sel ast.SelectionSet, v schema.NodeState) graphql.Marshaler { res := graphql.MarshalString(string(v)) if res == graphql.Null { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { @@ -22819,7 +22811,7 @@ func (ec *executionContext) marshalNNamedStatsWithScope2ᚖgithubᚗcomᚋCluste return ec._NamedStatsWithScope(ctx, sel, v) } -func (ec *executionContext) marshalNNode2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNodeᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Node) graphql.Marshaler { +func (ec *executionContext) marshalNNode2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Node) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -22843,7 +22835,7 @@ func (ec *executionContext) marshalNNode2ᚕᚖgithubᚗcomᚋClusterCockpitᚋc if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNNode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNode(ctx, sel, v[i]) + ret[i] = ec.marshalNNode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNode(ctx, sel, v[i]) } if isLen1 { f(i) @@ -22863,7 +22855,7 @@ func (ec *executionContext) marshalNNode2ᚕᚖgithubᚗcomᚋClusterCockpitᚋc return ret } -func (ec *executionContext) marshalNNode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNode(ctx context.Context, sel ast.SelectionSet, v *schema.Node) graphql.Marshaler { +func (ec *executionContext) marshalNNode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNode(ctx context.Context, sel ast.SelectionSet, v *schema.Node) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -23029,17 +23021,17 @@ func (ec *executionContext) marshalNNodesResultList2ᚖgithubᚗcomᚋClusterCoc return ec._NodesResultList(ctx, sel, v) } -func (ec *executionContext) unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx context.Context, v any) (schema.Float, error) { +func (ec *executionContext) unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloat(ctx context.Context, v any) (schema.Float, error) { var res schema.Float err := res.UnmarshalGQL(v) return res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx context.Context, sel ast.SelectionSet, v schema.Float) graphql.Marshaler { +func (ec *executionContext) marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloat(ctx context.Context, sel ast.SelectionSet, v schema.Float) graphql.Marshaler { return v } -func (ec *executionContext) unmarshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx context.Context, v any) ([]schema.Float, error) { +func (ec *executionContext) unmarshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx context.Context, v any) ([]schema.Float, error) { var vSlice []any if v != nil { vSlice = graphql.CoerceList(v) @@ -23048,7 +23040,7 @@ func (ec *executionContext) unmarshalNNullableFloat2ᚕgithubᚗcomᚋClusterCoc res := make([]schema.Float, len(vSlice)) for i := range vSlice { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) - res[i], err = ec.unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx, vSlice[i]) + res[i], err = ec.unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloat(ctx, vSlice[i]) if err != nil { return nil, err } @@ -23056,10 +23048,10 @@ func (ec *executionContext) unmarshalNNullableFloat2ᚕgithubᚗcomᚋClusterCoc return res, nil } -func (ec *executionContext) marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Float) graphql.Marshaler { +func (ec *executionContext) marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Float) graphql.Marshaler { ret := make(graphql.Array, len(v)) for i := range v { - ret[i] = ec.marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx, sel, v[i]) + ret[i] = ec.marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloat(ctx, sel, v[i]) } for _, e := range ret { @@ -23071,7 +23063,7 @@ func (ec *executionContext) marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockp return ret } -func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Resource) graphql.Marshaler { +func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Resource) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -23095,7 +23087,7 @@ func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpit if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResource(ctx, sel, v[i]) + ret[i] = ec.marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐResource(ctx, sel, v[i]) } if isLen1 { f(i) @@ -23115,7 +23107,7 @@ func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpit return ret } -func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResource(ctx context.Context, sel ast.SelectionSet, v *schema.Resource) graphql.Marshaler { +func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐResource(ctx context.Context, sel ast.SelectionSet, v *schema.Resource) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -23179,7 +23171,7 @@ func (ec *executionContext) marshalNScopedStats2ᚖgithubᚗcomᚋClusterCockpit return ec._ScopedStats(ctx, sel, v) } -func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler { +func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler { return ec._Series(ctx, sel, &v) } @@ -23240,7 +23232,7 @@ func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel return ret } -func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubCluster) graphql.Marshaler { +func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubCluster) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -23264,7 +23256,7 @@ func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockp if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubCluster(ctx, sel, v[i]) + ret[i] = ec.marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubCluster(ctx, sel, v[i]) } if isLen1 { f(i) @@ -23284,7 +23276,7 @@ func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockp return ret } -func (ec *executionContext) marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubCluster(ctx context.Context, sel ast.SelectionSet, v *schema.SubCluster) graphql.Marshaler { +func (ec *executionContext) marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubCluster(ctx context.Context, sel ast.SelectionSet, v *schema.SubCluster) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -23294,7 +23286,7 @@ func (ec *executionContext) marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpit return ec._SubCluster(ctx, sel, v) } -func (ec *executionContext) marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubClusterConfig) graphql.Marshaler { +func (ec *executionContext) marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubClusterConfig) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -23318,7 +23310,7 @@ func (ec *executionContext) marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋCluste if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfig(ctx, sel, v[i]) + ret[i] = ec.marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterConfig(ctx, sel, v[i]) } if isLen1 { f(i) @@ -23338,7 +23330,7 @@ func (ec *executionContext) marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋCluste return ret } -func (ec *executionContext) marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfig(ctx context.Context, sel ast.SelectionSet, v *schema.SubClusterConfig) graphql.Marshaler { +func (ec *executionContext) marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterConfig(ctx context.Context, sel ast.SelectionSet, v *schema.SubClusterConfig) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -23348,11 +23340,11 @@ func (ec *executionContext) marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCo return ec._SubClusterConfig(ctx, sel, v) } -func (ec *executionContext) marshalNTag2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v schema.Tag) graphql.Marshaler { +func (ec *executionContext) marshalNTag2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v schema.Tag) graphql.Marshaler { return ec._Tag(ctx, sel, &v) } -func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Tag) graphql.Marshaler { +func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Tag) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -23376,7 +23368,7 @@ func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTag(ctx, sel, v[i]) + ret[i] = ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTag(ctx, sel, v[i]) } if isLen1 { f(i) @@ -23396,7 +23388,7 @@ func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc return ret } -func (ec *executionContext) marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v *schema.Tag) graphql.Marshaler { +func (ec *executionContext) marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v *schema.Tag) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") @@ -23452,11 +23444,11 @@ func (ec *executionContext) marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpit return ec._TimeWeights(ctx, sel, v) } -func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler { +func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler { return ec._Topology(ctx, sel, &v) } -func (ec *executionContext) marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler { +func (ec *executionContext) marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler { return ec._Unit(ctx, sel, &v) } @@ -23713,7 +23705,7 @@ func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel a return res } -func (ec *executionContext) marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAcceleratorᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Accelerator) graphql.Marshaler { +func (ec *executionContext) marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐAcceleratorᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Accelerator) graphql.Marshaler { if v == nil { return graphql.Null } @@ -23740,7 +23732,7 @@ func (ec *executionContext) marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCock if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAccelerator(ctx, sel, v[i]) + ret[i] = ec.marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐAccelerator(ctx, sel, v[i]) } if isLen1 { f(i) @@ -24107,7 +24099,7 @@ func (ec *executionContext) marshalOInt2ᚖint(ctx context.Context, sel ast.Sele return res } -func (ec *executionContext) unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx context.Context, v any) (*schema.IntRange, error) { +func (ec *executionContext) unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx context.Context, v any) (*schema.IntRange, error) { if v == nil { return nil, nil } @@ -24115,7 +24107,7 @@ func (ec *executionContext) unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpit return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler { +func (ec *executionContext) marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler { if v == nil { return graphql.Null } @@ -24149,7 +24141,7 @@ func (ec *executionContext) marshalOJobLinkResultList2ᚖgithubᚗcomᚋClusterC return ec._JobLinkResultList(ctx, sel, v) } -func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx context.Context, v any) ([]schema.JobState, error) { +func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobStateᚄ(ctx context.Context, v any) ([]schema.JobState, error) { if v == nil { return nil, nil } @@ -24161,7 +24153,7 @@ func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpit res := make([]schema.JobState, len(vSlice)) for i := range vSlice { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) - res[i], err = ec.unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx, vSlice[i]) + res[i], err = ec.unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx, vSlice[i]) if err != nil { return nil, err } @@ -24169,13 +24161,13 @@ func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpit return res, nil } -func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.JobState) graphql.Marshaler { +func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobStateᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.JobState) graphql.Marshaler { if v == nil { return graphql.Null } ret := make(graphql.Array, len(v)) for i := range v { - ret[i] = ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx, sel, v[i]) + ret[i] = ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx, sel, v[i]) } for _, e := range ret { @@ -24234,7 +24226,7 @@ func (ec *executionContext) marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋCluste return ret } -func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx context.Context, v any) ([]schema.MetricScope, error) { +func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx context.Context, v any) ([]schema.MetricScope, error) { if v == nil { return nil, nil } @@ -24246,7 +24238,7 @@ func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockp res := make([]schema.MetricScope, len(vSlice)) for i := range vSlice { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) - res[i], err = ec.unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, vSlice[i]) + res[i], err = ec.unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, vSlice[i]) if err != nil { return nil, err } @@ -24254,13 +24246,13 @@ func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockp return res, nil } -func (ec *executionContext) marshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricScope) graphql.Marshaler { +func (ec *executionContext) marshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricScope) graphql.Marshaler { if v == nil { return graphql.Null } ret := make(graphql.Array, len(v)) for i := range v { - ret[i] = ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, sel, v[i]) + ret[i] = ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, sel, v[i]) } for _, e := range ret { @@ -24292,11 +24284,11 @@ func (ec *executionContext) unmarshalOMetricStatItem2ᚕᚖgithubᚗcomᚋCluste return res, nil } -func (ec *executionContext) marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v schema.MetricStatistics) graphql.Marshaler { +func (ec *executionContext) marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v schema.MetricStatistics) graphql.Marshaler { return ec._MetricStatistics(ctx, sel, &v) } -func (ec *executionContext) unmarshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNodeState(ctx context.Context, v any) (*schema.NodeState, error) { +func (ec *executionContext) unmarshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx context.Context, v any) (*schema.NodeState, error) { if v == nil { return nil, nil } @@ -24305,7 +24297,7 @@ func (ec *executionContext) unmarshalOMonitoringState2ᚖgithubᚗcomᚋClusterC return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNodeState(ctx context.Context, sel ast.SelectionSet, v *schema.NodeState) graphql.Marshaler { +func (ec *executionContext) marshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx context.Context, sel ast.SelectionSet, v *schema.NodeState) graphql.Marshaler { if v == nil { return graphql.Null } @@ -24313,7 +24305,7 @@ func (ec *executionContext) marshalOMonitoringState2ᚖgithubᚗcomᚋClusterCoc return res } -func (ec *executionContext) marshalONode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐNode(ctx context.Context, sel ast.SelectionSet, v *schema.Node) graphql.Marshaler { +func (ec *executionContext) marshalONode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNode(ctx context.Context, sel ast.SelectionSet, v *schema.Node) graphql.Marshaler { if v == nil { return graphql.Null } @@ -24372,7 +24364,7 @@ func (ec *executionContext) unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockp return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Series) graphql.Marshaler { +func (ec *executionContext) marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Series) graphql.Marshaler { if v == nil { return graphql.Null } @@ -24399,7 +24391,7 @@ func (ec *executionContext) marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋcc if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeries(ctx, sel, v[i]) + ret[i] = ec.marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSeries(ctx, sel, v[i]) } if isLen1 { f(i) @@ -24435,7 +24427,7 @@ func (ec *executionContext) marshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCoc return v } -func (ec *executionContext) marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐStatsSeries(ctx context.Context, sel ast.SelectionSet, v *schema.StatsSeries) graphql.Marshaler { +func (ec *executionContext) marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐStatsSeries(ctx context.Context, sel ast.SelectionSet, v *schema.StatsSeries) graphql.Marshaler { if v == nil { return graphql.Null } @@ -24530,7 +24522,7 @@ func (ec *executionContext) marshalOTime2ᚖtimeᚐTime(ctx context.Context, sel return res } -func (ec *executionContext) unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx context.Context, v any) (*schema.TimeRange, error) { +func (ec *executionContext) unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTimeRange(ctx context.Context, v any) (*schema.TimeRange, error) { if v == nil { return nil, nil } @@ -24538,7 +24530,7 @@ func (ec *executionContext) unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpit return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler { +func (ec *executionContext) marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler { return ec._Unit(ctx, sel, &v) } diff --git a/internal/graph/model/models.go b/internal/graph/model/models.go index 8047957..c943700 100644 --- a/internal/graph/model/models.go +++ b/internal/graph/model/models.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package model diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index fc05280..5a32ac9 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -8,7 +8,7 @@ import ( "strconv" "time" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" ) type Count struct { @@ -170,7 +170,6 @@ type NamedStatsWithScope struct { type NodeFilter struct { Hostname *StringInput `json:"hostname,omitempty"` Cluster *StringInput `json:"cluster,omitempty"` - SubCluster *StringInput `json:"subCluster,omitempty"` NodeState *string `json:"nodeState,omitempty"` HealthState *schema.NodeState `json:"healthState,omitempty"` } diff --git a/internal/graph/resolver.go b/internal/graph/resolver.go index 0f4dc06..990014c 100644 --- a/internal/graph/resolver.go +++ b/internal/graph/resolver.go @@ -4,7 +4,7 @@ import ( "sync" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/jmoiron/sqlx" ) @@ -32,7 +32,7 @@ func Init() { func GetResolverInstance() *Resolver { if resolverInstance == nil { - log.Fatal("Authentication module not initialized!") + cclog.Fatal("Authentication module not initialized!") } return resolverInstance diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 244f087..78a76ef 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -20,8 +20,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) // Partitions is the resolver for the partitions field. @@ -54,7 +54,7 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) { rawFootprint, err := r.Repo.FetchFootprint(obj) if err != nil { - log.Warn("Error while fetching job footprint data") + cclog.Warn("Error while fetching job footprint data") return nil, err } @@ -79,7 +79,7 @@ func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model. func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]*model.EnergyFootprintValue, error) { rawEnergyFootprint, err := r.Repo.FetchEnergyFootprint(obj) if err != nil { - log.Warn("Error while fetching job energy footprint data") + cclog.Warn("Error while fetching job energy footprint data") return nil, err } @@ -143,12 +143,12 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s // Create in DB id, err := r.Repo.CreateTag(typeArg, name, scope) if err != nil { - log.Warn("Error while creating tag") + cclog.Warn("Error while creating tag") return nil, err } return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil } else { - log.Warnf("Not authorized to create tag with scope: %s", scope) + cclog.Warnf("Not authorized to create tag with scope: %s", scope) return nil, fmt.Errorf("not authorized to create tag with scope: %s", scope) } } @@ -168,7 +168,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds jid, err := strconv.ParseInt(job, 10, 64) if err != nil { - log.Warn("Error while adding tag to job") + cclog.Warn("Error while adding tag to job") return nil, err } @@ -177,14 +177,14 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds // Get ID tid, err := strconv.ParseInt(tagId, 10, 64) if err != nil { - log.Warn("Error while parsing tag id") + cclog.Warn("Error while parsing tag id") return nil, err } // Test Exists _, _, tscope, exists := r.Repo.TagInfo(tid) if !exists { - log.Warnf("Tag does not exist (ID): %d", tid) + cclog.Warnf("Tag does not exist (ID): %d", tid) return nil, fmt.Errorf("tag does not exist (ID): %d", tid) } @@ -194,11 +194,11 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds user.Username == tscope { // Add to Job if tags, err = r.Repo.AddTag(user, jid, tid); err != nil { - log.Warn("Error while adding tag") + cclog.Warn("Error while adding tag") return nil, err } } else { - log.Warnf("Not authorized to add tag: %d", tid) + cclog.Warnf("Not authorized to add tag: %d", tid) return nil, fmt.Errorf("not authorized to add tag: %d", tid) } } @@ -215,7 +215,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta jid, err := strconv.ParseInt(job, 10, 64) if err != nil { - log.Warn("Error while parsing job id") + cclog.Warn("Error while parsing job id") return nil, err } @@ -224,14 +224,14 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta // Get ID tid, err := strconv.ParseInt(tagId, 10, 64) if err != nil { - log.Warn("Error while parsing tag id") + cclog.Warn("Error while parsing tag id") return nil, err } // Test Exists _, _, tscope, exists := r.Repo.TagInfo(tid) if !exists { - log.Warnf("Tag does not exist (ID): %d", tid) + cclog.Warnf("Tag does not exist (ID): %d", tid) return nil, fmt.Errorf("tag does not exist (ID): %d", tid) } @@ -241,11 +241,11 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta user.Username == tscope { // Remove from Job if tags, err = r.Repo.RemoveTag(user, jid, tid); err != nil { - log.Warn("Error while removing tag") + cclog.Warn("Error while removing tag") return nil, err } } else { - log.Warnf("Not authorized to remove tag: %d", tid) + cclog.Warnf("Not authorized to remove tag: %d", tid) return nil, fmt.Errorf("not authorized to remove tag: %d", tid) } @@ -267,14 +267,14 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin // Get ID tid, err := strconv.ParseInt(tagId, 10, 64) if err != nil { - log.Warn("Error while parsing tag id for removal") + cclog.Warn("Error while parsing tag id for removal") return nil, err } // Test Exists _, _, tscope, exists := r.Repo.TagInfo(tid) if !exists { - log.Warnf("Tag does not exist (ID): %d", tid) + cclog.Warnf("Tag does not exist (ID): %d", tid) return nil, fmt.Errorf("tag does not exist (ID): %d", tid) } @@ -282,13 +282,13 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin if user.HasRole(schema.RoleAdmin) && (tscope == "global" || tscope == "admin") || user.Username == tscope { // Remove from DB if err = r.Repo.RemoveTagById(tid); err != nil { - log.Warn("Error while removing tag") + cclog.Warn("Error while removing tag") return nil, err } else { tags = append(tags, int(tid)) } } else { - log.Warnf("Not authorized to remove tag: %d", tid) + cclog.Warnf("Not authorized to remove tag: %d", tid) return nil, fmt.Errorf("not authorized to remove tag: %d", tid) } } @@ -298,7 +298,7 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin // UpdateConfiguration is the resolver for the updateConfiguration field. func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) { if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil { - log.Warn("Error while updating user config") + cclog.Warn("Error while updating user config") return nil, err } @@ -344,7 +344,7 @@ func (r *queryResolver) User(ctx context.Context, username string) (*model.User, func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) { data, err := r.Repo.AllocatedNodes(cluster) if err != nil { - log.Warn("Error while fetching allocated nodes") + cclog.Warn("Error while fetching allocated nodes") return nil, err } @@ -364,7 +364,7 @@ func (r *queryResolver) Node(ctx context.Context, id string) (*schema.Node, erro repo := repository.GetNodeRepository() numericId, err := strconv.ParseInt(id, 10, 64) if err != nil { - log.Warn("Error while parsing job id") + cclog.Warn("Error while parsing job id") return nil, err } return repo.GetNode(numericId, false) @@ -387,13 +387,13 @@ func (r *queryResolver) NodeStats(ctx context.Context, filter []*model.NodeFilte func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) { numericId, err := strconv.ParseInt(id, 10, 64) if err != nil { - log.Warn("Error while parsing job id") + cclog.Warn("Error while parsing job id") return nil, err } job, err := r.Repo.FindById(ctx, numericId) if err != nil { - log.Warn("Error while finding job by id") + cclog.Warn("Error while finding job by id") return nil, err } @@ -420,13 +420,13 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str job, err := r.Query().Job(ctx, id) if err != nil { - log.Warn("Error while querying job for metrics") + cclog.Warn("Error while querying job for metrics") return nil, err } data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution) if err != nil { - log.Warn("Error while loading job data") + cclog.Warn("Error while loading job data") return nil, err } @@ -448,13 +448,13 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) { job, err := r.Query().Job(ctx, id) if err != nil { - log.Warnf("Error while querying job %s for metadata", id) + cclog.Warnf("Error while querying job %s for metadata", id) return nil, err } data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) if err != nil { - log.Warnf("Error while loading jobStats data for job id %s", id) + cclog.Warnf("Error while loading jobStats data for job id %s", id) return nil, err } @@ -473,13 +473,13 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) { job, err := r.Query().Job(ctx, id) if err != nil { - log.Warnf("Error while querying job %s for metadata", id) + cclog.Warnf("Error while querying job %s for metadata", id) return nil, err } data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx) if err != nil { - log.Warnf("Error while loading scopedJobStats data for job id %s", id) + cclog.Warnf("Error while loading scopedJobStats data for job id %s", id) return nil, err } @@ -518,13 +518,13 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag jobs, err := r.Repo.QueryJobs(ctx, filter, page, order) if err != nil { - log.Warn("Error while querying jobs") + cclog.Warn("Error while querying jobs") return nil, err } count, err := r.Repo.CountJobs(ctx, filter) if err != nil { - log.Warn("Error while counting jobs") + cclog.Warn("Error while counting jobs") return nil, err } @@ -540,7 +540,7 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag } nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order) if err != nil { - log.Warn("Error while querying next jobs") + cclog.Warn("Error while querying next jobs") return nil, err } @@ -636,7 +636,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job jobs, err := r.Repo.QueryJobs(ctx, filter, nil, order) if err != nil { - log.Warn("Error while querying jobs for comparison") + cclog.Warn("Error while querying jobs for comparison") return nil, err } @@ -644,7 +644,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job for _, job := range jobs { data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) if err != nil { - log.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID) + cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID) continue // return nil, err } @@ -701,7 +701,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) if err != nil { - log.Warn("error while loading node data") + cclog.Warn("error while loading node data") return nil, err } @@ -713,7 +713,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ } host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname) if err != nil { - log.Warnf("error in nodeMetrics resolver: %s", err) + cclog.Warnf("error in nodeMetrics resolver: %s", err) } for metric, scopedMetrics := range metrics { @@ -757,7 +757,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx) if err != nil { - log.Warn("error while loading node data") + cclog.Warn("error while loading node data") return nil, err } @@ -769,7 +769,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub } host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname) if err != nil { - log.Warnf("error in nodeMetrics resolver: %s", err) + cclog.Warnf("error in nodeMetrics resolver: %s", err) } for metric, scopedMetrics := range metrics { @@ -824,12 +824,10 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } // SubCluster returns generated.SubClusterResolver implementation. func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } -type ( - clusterResolver struct{ *Resolver } - jobResolver struct{ *Resolver } - metricValueResolver struct{ *Resolver } - mutationResolver struct{ *Resolver } - nodeResolver struct{ *Resolver } - queryResolver struct{ *Resolver } - subClusterResolver struct{ *Resolver } -) +type clusterResolver struct{ *Resolver } +type jobResolver struct{ *Resolver } +type metricValueResolver struct{ *Resolver } +type mutationResolver struct{ *Resolver } +type nodeResolver struct{ *Resolver } +type queryResolver struct{ *Resolver } +type subClusterResolver struct{ *Resolver } diff --git a/internal/graph/util.go b/internal/graph/util.go index c2bd73d..38c4914 100644 --- a/internal/graph/util.go +++ b/internal/graph/util.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package graph @@ -12,9 +12,8 @@ import ( "github.com/99designs/gqlgen/graphql" "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" - // "github.com/ClusterCockpit/cc-backend/pkg/archive" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) const MAX_JOBS_FOR_ANALYSIS = 500 @@ -28,7 +27,7 @@ func (r *queryResolver) rooflineHeatmap( ) ([][]float64, error) { jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil) if err != nil { - log.Error("Error while querying jobs for roofline") + cclog.Error("Error while querying jobs for roofline") return nil, err } if len(jobs) > MAX_JOBS_FOR_ANALYSIS { @@ -56,13 +55,13 @@ func (r *queryResolver) rooflineHeatmap( jobdata, err := metricDataDispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0) if err != nil { - log.Errorf("Error while loading roofline metrics for job %d", job.ID) + cclog.Errorf("Error while loading roofline metrics for job %d", job.ID) return nil, err } flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"] if flops_ == nil && membw_ == nil { - log.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID) + cclog.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID) continue // return nil, fmt.Errorf("GRAPH/UTIL > 'flops_any' or 'mem_bw' missing for job %d", job.ID) } @@ -70,7 +69,7 @@ func (r *queryResolver) rooflineHeatmap( flops, ok1 := flops_["node"] membw, ok2 := membw_["node"] if !ok1 || !ok2 { - log.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level") + cclog.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level") continue // TODO/FIXME: // return nil, errors.New("GRAPH/UTIL > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level") @@ -105,7 +104,7 @@ func (r *queryResolver) rooflineHeatmap( func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) { jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil) if err != nil { - log.Error("Error while querying jobs for footprint") + cclog.Error("Error while querying jobs for footprint") return nil, err } if len(jobs) > MAX_JOBS_FOR_ANALYSIS { @@ -128,7 +127,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF } if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil { - log.Error("Error while loading averages for footprint") + cclog.Error("Error while loading averages for footprint") return nil, err } diff --git a/internal/importer/handleImport.go b/internal/importer/handleImport.go index 83230f5..71c4d24 100644 --- a/internal/importer/handleImport.go +++ b/internal/importer/handleImport.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package importer @@ -15,8 +15,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) // Import all jobs specified as `:,...` @@ -31,7 +31,7 @@ func HandleImportFlag(flag string) error { raw, err := os.ReadFile(files[0]) if err != nil { - log.Warn("Error while reading metadata file for import") + cclog.Warn("Error while reading metadata file for import") return err } @@ -47,13 +47,13 @@ func HandleImportFlag(flag string) error { MonitoringStatus: schema.MonitoringStatusRunningOrArchiving, } if err = dec.Decode(&job); err != nil { - log.Warn("Error while decoding raw json metadata for import") + cclog.Warn("Error while decoding raw json metadata for import") return err } raw, err = os.ReadFile(files[1]) if err != nil { - log.Warn("Error while reading jobdata file for import") + cclog.Warn("Error while reading jobdata file for import") return err } @@ -66,7 +66,7 @@ func HandleImportFlag(flag string) error { dec.DisallowUnknownFields() jobData := schema.JobData{} if err = dec.Decode(&jobData); err != nil { - log.Warn("Error while decoding raw json jobdata for import") + cclog.Warn("Error while decoding raw json jobdata for import") return err } @@ -74,7 +74,7 @@ func HandleImportFlag(flag string) error { sc, err := archive.GetSubCluster(job.Cluster, job.SubCluster) if err != nil { - log.Errorf("cannot get subcluster: %s", err.Error()) + cclog.Errorf("cannot get subcluster: %s", err.Error()) return err } @@ -94,7 +94,7 @@ func HandleImportFlag(flag string) error { job.RawFootprint, err = json.Marshal(job.Footprint) if err != nil { - log.Warn("Error while marshaling job footprint") + cclog.Warn("Error while marshaling job footprint") return err } @@ -108,7 +108,7 @@ func HandleImportFlag(flag string) error { if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) - log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp) + cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp) // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) // Energy: Power (in Watts) * Time (in Seconds) @@ -120,7 +120,7 @@ func HandleImportFlag(flag string) error { metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { - log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID) + cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID) } job.EnergyFootprint[fp] = metricEnergy @@ -129,45 +129,45 @@ func HandleImportFlag(flag string) error { job.Energy = (math.Round(totalEnergy*100.0) / 100.0) if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { - log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID) + cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID) return err } job.RawResources, err = json.Marshal(job.Resources) if err != nil { - log.Warn("Error while marshaling job resources") + cclog.Warn("Error while marshaling job resources") return err } job.RawMetaData, err = json.Marshal(job.MetaData) if err != nil { - log.Warn("Error while marshaling job metadata") + cclog.Warn("Error while marshaling job metadata") return err } if err = SanityChecks(&job); err != nil { - log.Warn("BaseJob SanityChecks failed") + cclog.Warn("BaseJob SanityChecks failed") return err } if err = archive.GetHandle().ImportJob(&job, &jobData); err != nil { - log.Error("Error while importing job") + cclog.Error("Error while importing job") return err } id, err := r.InsertJob(&job) if err != nil { - log.Warn("Error while job db insert") + cclog.Warn("Error while job db insert") return err } for _, tag := range job.Tags { if err := r.ImportTag(id, tag.Type, tag.Name, tag.Scope); err != nil { - log.Error("Error while adding or creating tag on import") + cclog.Error("Error while adding or creating tag on import") return err } } - log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id) + cclog.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id) } return nil } diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go index d2bb0b4..11e7afe 100644 --- a/internal/importer/importer_test.go +++ b/internal/importer/importer_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package importer_test @@ -16,7 +16,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/importer" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" ) func copyFile(s string, d string) error { @@ -78,7 +78,7 @@ func setup(t *testing.T) *repository.JobRepository { } ]}` - log.Init("info", true) + cclog.Init("info", true) tmpdir := t.TempDir() jobarchive := filepath.Join(tmpdir, "job-archive") diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index 1239951..179c21c 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package importer @@ -13,8 +13,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) const ( @@ -27,15 +27,15 @@ const ( func InitDB() error { r := repository.GetJobRepository() if err := r.Flush(); err != nil { - log.Errorf("repository initDB(): %v", err) + cclog.Errorf("repository initDB(): %v", err) return err } starttime := time.Now() - log.Print("Building job table...") + cclog.Print("Building job table...") t, err := r.TransactionInit() if err != nil { - log.Warn("Error while initializing SQL transactions") + cclog.Warn("Error while initializing SQL transactions") return err } tags := make(map[string]int64) @@ -63,7 +63,7 @@ func InitDB() error { sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster) if err != nil { - log.Errorf("cannot get subcluster: %s", err.Error()) + cclog.Errorf("cannot get subcluster: %s", err.Error()) return err } @@ -83,7 +83,7 @@ func InitDB() error { jobMeta.RawFootprint, err = json.Marshal(jobMeta.Footprint) if err != nil { - log.Warn("Error while marshaling job footprint") + cclog.Warn("Error while marshaling job footprint") return err } @@ -97,7 +97,7 @@ func InitDB() error { if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) - log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp) + cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp) // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) // Energy: Power (in Watts) * Time (in Seconds) @@ -109,7 +109,7 @@ func InitDB() error { metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { - log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) + cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) } jobMeta.EnergyFootprint[fp] = metricEnergy @@ -118,26 +118,26 @@ func InitDB() error { jobMeta.Energy = (math.Round(totalEnergy*100.0) / 100.0) if jobMeta.RawEnergyFootprint, err = json.Marshal(jobMeta.EnergyFootprint); err != nil { - log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) + cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) return err } jobMeta.RawResources, err = json.Marshal(jobMeta.Resources) if err != nil { - log.Errorf("repository initDB(): %v", err) + cclog.Errorf("repository initDB(): %v", err) errorOccured++ continue } jobMeta.RawMetaData, err = json.Marshal(jobMeta.MetaData) if err != nil { - log.Errorf("repository initDB(): %v", err) + cclog.Errorf("repository initDB(): %v", err) errorOccured++ continue } if err := SanityChecks(jobMeta); err != nil { - log.Errorf("repository initDB(): %v", err) + cclog.Errorf("repository initDB(): %v", err) errorOccured++ continue } @@ -145,7 +145,7 @@ func InitDB() error { id, err := r.TransactionAddNamed(t, repository.NamedJobInsert, jobMeta) if err != nil { - log.Errorf("repository initDB(): %v", err) + cclog.Errorf("repository initDB(): %v", err) errorOccured++ continue } @@ -158,7 +158,7 @@ func InitDB() error { addTagQuery, tag.Name, tag.Type) if err != nil { - log.Errorf("Error adding tag: %v", err) + cclog.Errorf("Error adding tag: %v", err) errorOccured++ continue } @@ -176,11 +176,11 @@ func InitDB() error { } if errorOccured > 0 { - log.Warnf("Error in import of %d jobs!", errorOccured) + cclog.Warnf("Error in import of %d jobs!", errorOccured) } r.TransactionEnd(t) - log.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds()) + cclog.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds()) return nil } @@ -190,7 +190,7 @@ func SanityChecks(job *schema.Job) error { return fmt.Errorf("no such cluster: %v", job.Cluster) } if err := archive.AssignSubCluster(job); err != nil { - log.Warn("Error while assigning subcluster to job") + cclog.Warn("Error while assigning subcluster to job") return err } if !job.State.Valid() { diff --git a/internal/importer/normalize.go b/internal/importer/normalize.go index d9595a2..bc72cb3 100644 --- a/internal/importer/normalize.go +++ b/internal/importer/normalize.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package importer @@ -7,7 +7,7 @@ package importer import ( "math" - ccunits "github.com/ClusterCockpit/cc-units" + ccunits "github.com/ClusterCockpit/cc-lib/ccUnits" ) func getNormalizationFactor(v float64) (float64, int) { diff --git a/internal/importer/normalize_test.go b/internal/importer/normalize_test.go index b441c11..72017f5 100644 --- a/internal/importer/normalize_test.go +++ b/internal/importer/normalize_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package importer @@ -8,7 +8,7 @@ import ( "fmt" "testing" - ccunits "github.com/ClusterCockpit/cc-units" + ccunits "github.com/ClusterCockpit/cc-lib/ccUnits" ) func TestNormalizeFactor(t *testing.T) { diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index c6cecd8..6307843 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package metricDataDispatcher @@ -14,10 +14,10 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/lrucache" - "github.com/ClusterCockpit/cc-backend/pkg/resampler" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/lrucache" + "github.com/ClusterCockpit/cc-lib/resampler" + "github.com/ClusterCockpit/cc-lib/schema" ) var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024) @@ -68,10 +68,10 @@ func LoadData(job *schema.Job, jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution) if err != nil { if len(jd) != 0 { - log.Warnf("partial error: %s", err.Error()) + cclog.Warnf("partial error: %s", err.Error()) // return err, 0, 0 // Reactivating will block archiving on one partial error } else { - log.Error("Error while loading job data from metric repository") + cclog.Error("Error while loading job data from metric repository") return err, 0, 0 } } @@ -80,15 +80,15 @@ func LoadData(job *schema.Job, var jd_temp schema.JobData jd_temp, err = archive.GetHandle().LoadJobData(job) if err != nil { - log.Error("Error while loading job data from archive") + cclog.Error("Error while loading job data from archive") return err, 0, 0 } - //Deep copy the cached archive hashmap + // Deep copy the cached archive hashmap jd = metricdata.DeepCopy(jd_temp) - //Resampling for archived data. - //Pass the resolution from frontend here. + // Resampling for archived data. + // Pass the resolution from frontend here. for _, v := range jd { for _, v_ := range v { timestep := 0 @@ -178,7 +178,7 @@ func LoadData(job *schema.Job, }) if err, ok := data.(error); ok { - log.Error("Error in returned dataset") + cclog.Error("Error in returned dataset") return nil, err } @@ -203,7 +203,7 @@ func LoadAverages( stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion? if err != nil { - log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) + cclog.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) return err } @@ -231,7 +231,6 @@ func LoadScopedJobStats( scopes []schema.MetricScope, ctx context.Context, ) (schema.ScopedJobStats, error) { - if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { return archive.LoadScopedStatsFromArchive(job, metrics, scopes) } @@ -243,7 +242,7 @@ func LoadScopedJobStats( scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx) if err != nil { - log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) + cclog.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) return nil, err } @@ -268,7 +267,7 @@ func LoadJobStats( stats, err := repo.LoadStats(job, metrics, ctx) if err != nil { - log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) + cclog.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) return data, err } @@ -318,9 +317,9 @@ func LoadNodeData( data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) if err != nil { if len(data) != 0 { - log.Warnf("partial error: %s", err.Error()) + cclog.Warnf("partial error: %s", err.Error()) } else { - log.Error("Error while loading node data from metric repository") + cclog.Error("Error while loading node data from metric repository") return nil, err } } @@ -355,9 +354,9 @@ func LoadNodeListData( data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx) if err != nil { if len(data) != 0 { - log.Warnf("partial error: %s", err.Error()) + cclog.Warnf("partial error: %s", err.Error()) } else { - log.Error("Error while loading node data from metric repository") + cclog.Error("Error while loading node data from metric repository") return nil, totalNodes, hasNextPage, err } } diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index b76ed5b..36c0dd7 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package metricdata @@ -18,8 +18,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) type CCMetricStoreConfig struct { @@ -82,7 +82,7 @@ type ApiMetricData struct { func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { var config CCMetricStoreConfig if err := json.Unmarshal(rawConfig, &config); err != nil { - log.Warn("Error while unmarshaling raw json config") + cclog.Warn("Error while unmarshaling raw json config") return err } @@ -129,13 +129,13 @@ func (ccms *CCMetricStore) doRequest( ) (*ApiQueryResponse, error) { buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(body); err != nil { - log.Errorf("Error while encoding request body: %s", err.Error()) + cclog.Errorf("Error while encoding request body: %s", err.Error()) return nil, err } req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf) if err != nil { - log.Errorf("Error while building request body: %s", err.Error()) + cclog.Errorf("Error while building request body: %s", err.Error()) return nil, err } if ccms.jwt != "" { @@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest( res, err := ccms.client.Do(req) if err != nil { - log.Errorf("Error while performing request: %s", err.Error()) + cclog.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -161,7 +161,7 @@ func (ccms *CCMetricStore) doRequest( var resBody ApiQueryResponse if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil { - log.Errorf("Error while decoding result body: %s", err.Error()) + cclog.Errorf("Error while decoding result body: %s", err.Error()) return nil, err } @@ -177,7 +177,7 @@ func (ccms *CCMetricStore) LoadData( ) (schema.JobData, error) { queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution) if err != nil { - log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) + cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) return nil, err } @@ -192,7 +192,7 @@ func (ccms *CCMetricStore) LoadData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Errorf("Error while performing request: %s", err.Error()) + cclog.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -298,7 +298,7 @@ func (ccms *CCMetricStore) buildQueries( mc := archive.GetMetricConfig(job.Cluster, metric) if mc == nil { // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster) - log.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster) + cclog.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster) continue } @@ -572,7 +572,7 @@ func (ccms *CCMetricStore) LoadStats( ) (map[string]map[string]schema.MetricStatistics, error) { queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { - log.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error()) + cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error()) return nil, err } @@ -587,7 +587,7 @@ func (ccms *CCMetricStore) LoadStats( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Errorf("Error while performing request: %s", err.Error()) + cclog.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -597,7 +597,7 @@ func (ccms *CCMetricStore) LoadStats( metric := ccms.toLocalName(query.Metric) data := res[0] if data.Error != nil { - log.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) + cclog.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) continue } @@ -608,7 +608,7 @@ func (ccms *CCMetricStore) LoadStats( } if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() { - log.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname) + cclog.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname) continue } @@ -631,7 +631,7 @@ func (ccms *CCMetricStore) LoadScopedStats( ) (schema.ScopedJobStats, error) { queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0) if err != nil { - log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) + cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) return nil, err } @@ -646,7 +646,7 @@ func (ccms *CCMetricStore) LoadScopedStats( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Errorf("Error while performing request: %s", err.Error()) + cclog.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -747,7 +747,7 @@ func (ccms *CCMetricStore) LoadNodeData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Errorf("Error while performing request: %s", err.Error()) + cclog.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -863,7 +863,7 @@ func (ccms *CCMetricStore) LoadNodeListData( queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) if err != nil { - log.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error()) + cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error()) return nil, totalNodes, hasNextPage, err } @@ -878,7 +878,7 @@ func (ccms *CCMetricStore) LoadNodeListData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Errorf("Error while performing request: %s", err.Error()) + cclog.Errorf("Error while performing request: %s", err.Error()) return nil, totalNodes, hasNextPage, err } @@ -982,7 +982,7 @@ func (ccms *CCMetricStore) buildNodeQueries( if subCluster != "" { subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster) if scterr != nil { - log.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error()) + cclog.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error()) return nil, nil, scterr } } @@ -992,7 +992,7 @@ func (ccms *CCMetricStore) buildNodeQueries( mc := archive.GetMetricConfig(cluster, metric) if mc == nil { // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) - log.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster) + cclog.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster) continue } diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index e6b739a..aa3a87c 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package metricdata @@ -12,8 +12,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) type MetricDataRepository interface { @@ -46,7 +46,7 @@ func Init() error { Kind string `json:"kind"` } if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil { - log.Warn("Error while unmarshaling raw json MetricDataRepository") + cclog.Warn("Error while unmarshaling raw json MetricDataRepository") return err } @@ -63,7 +63,7 @@ func Init() error { } if err := mdr.Init(cluster.MetricDataRepository); err != nil { - log.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name) + cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name) return err } metricDataRepos[cluster.Name] = mdr diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index e0add3a..2ec8558 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -1,5 +1,5 @@ // Copyright (C) 2022 DKRZ -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package metricdata @@ -22,8 +22,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" promapi "github.com/prometheus/client_golang/api" promv1 "github.com/prometheus/client_golang/api/prometheus/v1" promcfg "github.com/prometheus/common/config" @@ -160,7 +160,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error { var config PrometheusDataRepositoryConfig // parse config if err := json.Unmarshal(rawConfig, &config); err != nil { - log.Warn("Error while unmarshaling raw json config") + cclog.Warn("Error while unmarshaling raw json config") return err } // support basic authentication @@ -179,7 +179,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error { RoundTripper: rt, }) if err != nil { - log.Error("Error while initializing new prometheus client") + cclog.Error("Error while initializing new prometheus client") return err } // init query client @@ -192,9 +192,9 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error { for metric, templ := range config.Templates { pdb.templates[metric], err = template.New(metric).Parse(templ) if err == nil { - log.Debugf("Added PromQL template for %s: %s", metric, templ) + cclog.Debugf("Added PromQL template for %s: %s", metric, templ) } else { - log.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric) + cclog.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric) } } return nil @@ -221,7 +221,7 @@ func (pdb *PrometheusDataRepository) FormatQuery( return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ)) } else { query := buf.String() - log.Debugf("PromQL: %s", query) + cclog.Debugf("PromQL: %s", query) return query, nil } } else { @@ -285,7 +285,7 @@ func (pdb *PrometheusDataRepository) LoadData( for _, scope := range scopes { if scope != schema.MetricScopeNode { logOnce.Do(func() { - log.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + cclog.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) }) continue } @@ -293,12 +293,12 @@ func (pdb *PrometheusDataRepository) LoadData( for _, metric := range metrics { metricConfig := archive.GetMetricConfig(job.Cluster, metric) if metricConfig == nil { - log.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster) + cclog.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster) return nil, errors.New("Prometheus config error") } query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster) if err != nil { - log.Warn("Error while formatting prometheus query") + cclog.Warn("Error while formatting prometheus query") return nil, err } @@ -310,11 +310,11 @@ func (pdb *PrometheusDataRepository) LoadData( } result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) if err != nil { - log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query) + cclog.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query) return nil, errors.New("Prometheus query error") } if len(warnings) > 0 { - log.Warnf("Warnings: %v\n", warnings) + cclog.Warnf("Warnings: %v\n", warnings) } // init data structures @@ -360,7 +360,7 @@ func (pdb *PrometheusDataRepository) LoadStats( data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) if err != nil { - log.Warn("Error while loading job for stats") + cclog.Warn("Error while loading job for stats") return nil, err } for metric, metricData := range data { @@ -391,19 +391,19 @@ func (pdb *PrometheusDataRepository) LoadNodeData( for _, scope := range scopes { if scope != schema.MetricScopeNode { logOnce.Do(func() { - log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) }) continue } for _, metric := range metrics { metricConfig := archive.GetMetricConfig(cluster, metric) if metricConfig == nil { - log.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster) + cclog.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster) return nil, errors.New("Prometheus config error") } query, err := pdb.FormatQuery(metric, scope, nodes, cluster) if err != nil { - log.Warn("Error while formatting prometheus query") + cclog.Warn("Error while formatting prometheus query") return nil, err } @@ -415,11 +415,11 @@ func (pdb *PrometheusDataRepository) LoadNodeData( } result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) if err != nil { - log.Errorf("Prometheus query error in LoadNodeData: %v\n", err) + cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err) return nil, errors.New("Prometheus query error") } if len(warnings) > 0 { - log.Warnf("Warnings: %v\n", warnings) + cclog.Warnf("Warnings: %v\n", warnings) } step := int64(metricConfig.Timestep) @@ -444,7 +444,7 @@ func (pdb *PrometheusDataRepository) LoadNodeData( } } t1 := time.Since(t0) - log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1) + cclog.Debugf("LoadNodeData of %v nodes took %s", len(data), t1) return data, nil } @@ -459,7 +459,7 @@ func (pdb *PrometheusDataRepository) LoadScopedStats( scopedJobStats := make(schema.ScopedJobStats) data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) if err != nil { - log.Warn("Error while loading job for scopedJobStats") + cclog.Warn("Error while loading job for scopedJobStats") return nil, err } @@ -467,7 +467,7 @@ func (pdb *PrometheusDataRepository) LoadScopedStats( for _, scope := range scopes { if scope != schema.MetricScopeNode { logOnce.Do(func() { - log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) }) continue } @@ -563,7 +563,7 @@ func (pdb *PrometheusDataRepository) LoadNodeListData( for _, scope := range scopes { if scope != schema.MetricScopeNode { logOnce.Do(func() { - log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) }) continue } @@ -571,12 +571,12 @@ func (pdb *PrometheusDataRepository) LoadNodeListData( for _, metric := range metrics { metricConfig := archive.GetMetricConfig(cluster, metric) if metricConfig == nil { - log.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster) + cclog.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster) return nil, totalNodes, hasNextPage, errors.New("Prometheus config error") } query, err := pdb.FormatQuery(metric, scope, nodes, cluster) if err != nil { - log.Warn("Error while formatting prometheus query") + cclog.Warn("Error while formatting prometheus query") return nil, totalNodes, hasNextPage, err } @@ -588,11 +588,11 @@ func (pdb *PrometheusDataRepository) LoadNodeListData( } result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) if err != nil { - log.Errorf("Prometheus query error in LoadNodeData: %v\n", err) + cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err) return nil, totalNodes, hasNextPage, errors.New("Prometheus query error") } if len(warnings) > 0 { - log.Warnf("Warnings: %v\n", warnings) + cclog.Warnf("Warnings: %v\n", warnings) } step := int64(metricConfig.Timestep) @@ -628,6 +628,6 @@ func (pdb *PrometheusDataRepository) LoadNodeListData( } } t1 := time.Since(t0) - log.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1) + cclog.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1) return data, totalNodes, hasNextPage, nil } diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index aa7bde1..59e640e 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package metricdata @@ -10,7 +10,7 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" ) var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { @@ -29,16 +29,16 @@ func (tmdr *TestMetricDataRepository) LoadData( metrics []string, scopes []schema.MetricScope, ctx context.Context, - resolution int) (schema.JobData, error) { - + resolution int, +) (schema.JobData, error) { return TestLoadDataCallback(job, metrics, scopes, ctx, resolution) } func (tmdr *TestMetricDataRepository) LoadStats( job *schema.Job, metrics []string, - ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { - + ctx context.Context, +) (map[string]map[string]schema.MetricStatistics, error) { panic("TODO") } @@ -46,8 +46,8 @@ func (tmdr *TestMetricDataRepository) LoadScopedStats( job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.ScopedJobStats, error) { - + ctx context.Context, +) (schema.ScopedJobStats, error) { panic("TODO") } @@ -56,8 +56,8 @@ func (tmdr *TestMetricDataRepository) LoadNodeData( metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, - ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { - + ctx context.Context, +) (map[string]map[string][]*schema.JobMetric, error) { panic("TODO") } @@ -70,7 +70,6 @@ func (tmdr *TestMetricDataRepository) LoadNodeListData( page *model.PageRequest, ctx context.Context, ) (map[string]schema.JobData, int, bool, error) { - panic("TODO") } diff --git a/internal/repository/dbConnection.go b/internal/repository/dbConnection.go index 0e3f29d..4c4906f 100644 --- a/internal/repository/dbConnection.go +++ b/internal/repository/dbConnection.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/jmoiron/sqlx" "github.com/mattn/go-sqlite3" "github.com/qustavo/sqlhooks/v2" @@ -53,7 +53,7 @@ func Connect(driver string, db string) { // - Enable foreign key checks opts.URL += "?_journal=WAL&_timeout=5000&_fk=true" - if log.Loglevel() == "debug" { + if cclog.Loglevel() == "debug" { sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{})) dbHandle, err = sqlx.Open("sqlite3WithHooks", opts.URL) } else { @@ -63,11 +63,11 @@ func Connect(driver string, db string) { opts.URL += "?multiStatements=true" dbHandle, err = sqlx.Open("mysql", opts.URL) default: - log.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver) + cclog.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver) } if err != nil { - log.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error()) + cclog.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error()) } dbHandle.SetMaxOpenConns(opts.MaxOpenConnections) @@ -78,14 +78,14 @@ func Connect(driver string, db string) { dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver} err = checkDBVersion(driver, dbHandle.DB) if err != nil { - log.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error()) + cclog.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error()) } }) } func GetConnection() *DBConnection { if dbConnInstance == nil { - log.Fatalf("Database connection not initialized!") + cclog.Fatalf("Database connection not initialized!") } return dbConnInstance diff --git a/internal/repository/hooks.go b/internal/repository/hooks.go index 4b9e814..5433072 100644 --- a/internal/repository/hooks.go +++ b/internal/repository/hooks.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -8,7 +8,7 @@ import ( "context" "time" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" ) // Hooks satisfies the sqlhook.Hooks interface @@ -16,13 +16,13 @@ type Hooks struct{} // Before hook will print the query with it's args and return the context with the timestamp func (h *Hooks) Before(ctx context.Context, query string, args ...any) (context.Context, error) { - log.Debugf("SQL query %s %q", query, args) + cclog.Debugf("SQL query %s %q", query, args) return context.WithValue(ctx, "begin", time.Now()), nil } // After hook will get the timestamp registered on the Before hook and print the elapsed time func (h *Hooks) After(ctx context.Context, query string, args ...any) (context.Context, error) { begin := ctx.Value("begin").(time.Time) - log.Debugf("Took: %s\n", time.Since(begin)) + cclog.Debugf("Took: %s\n", time.Since(begin)) return ctx, nil } diff --git a/internal/repository/job.go b/internal/repository/job.go index 8a69024..b6aa323 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -16,9 +16,9 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/lrucache" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/lrucache" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" ) @@ -76,18 +76,18 @@ func scanJob(row interface{ Scan(...any) error }) (*schema.Job, error) { &job.StartTime, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State, &job.Duration, &job.Walltime, &job.RawResources, &job.RawFootprint, &job.Energy); err != nil { - log.Warnf("Error while scanning rows (Job): %v", err) + cclog.Warnf("Error while scanning rows (Job): %v", err) return nil, err } if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil { - log.Warn("Error while unmarshaling raw resources json") + cclog.Warn("Error while unmarshaling raw resources json") return nil, err } job.RawResources = nil if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil { - log.Warnf("Error while unmarshaling raw footprint json: %v", err) + cclog.Warnf("Error while unmarshaling raw footprint json: %v", err) return nil, err } job.RawFootprint = nil @@ -109,7 +109,7 @@ func (r *JobRepository) Optimize() error { return err } case "mysql": - log.Info("Optimize currently not supported for mysql driver") + cclog.Info("Optimize currently not supported for mysql driver") } return nil @@ -160,7 +160,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID). RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil { - log.Warn("Error while scanning for job metadata") + cclog.Warn("Error while scanning for job metadata") return nil, err } @@ -169,12 +169,12 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error } if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil { - log.Warn("Error while unmarshaling raw metadata json") + cclog.Warn("Error while unmarshaling raw metadata json") return nil, err } r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour) - log.Debugf("Timer FetchMetadata %s", time.Since(start)) + cclog.Debugf("Timer FetchMetadata %s", time.Since(start)) return job.MetaData, nil } @@ -183,7 +183,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er r.cache.Del(cachekey) if job.MetaData == nil { if _, err = r.FetchMetadata(job); err != nil { - log.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID) + cclog.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID) return err } } @@ -198,7 +198,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er } if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil { - log.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID) + cclog.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID) return err } @@ -206,7 +206,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er Set("meta_data", job.RawMetaData). Where("job.id = ?", job.ID). RunWith(r.stmtCache).Exec(); err != nil { - log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID) + cclog.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID) return err } @@ -219,7 +219,7 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err if err := sq.Select("job.footprint").From("job").Where("job.id = ?", job.ID). RunWith(r.stmtCache).QueryRow().Scan(&job.RawFootprint); err != nil { - log.Warn("Error while scanning for job footprint") + cclog.Warn("Error while scanning for job footprint") return nil, err } @@ -228,11 +228,11 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err } if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil { - log.Warn("Error while unmarshaling raw footprint json") + cclog.Warn("Error while unmarshaling raw footprint json") return nil, err } - log.Debugf("Timer FetchFootprint %s", time.Since(start)) + cclog.Debugf("Timer FetchFootprint %s", time.Since(start)) return job.Footprint, nil } @@ -246,7 +246,7 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6 if err := sq.Select("job.energy_footprint").From("job").Where("job.id = ?", job.ID). RunWith(r.stmtCache).QueryRow().Scan(&job.RawEnergyFootprint); err != nil { - log.Warn("Error while scanning for job energy_footprint") + cclog.Warn("Error while scanning for job energy_footprint") return nil, err } @@ -255,12 +255,12 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6 } if err := json.Unmarshal(job.RawEnergyFootprint, &job.EnergyFootprint); err != nil { - log.Warn("Error while unmarshaling raw energy footprint json") + cclog.Warn("Error while unmarshaling raw energy footprint json") return nil, err } r.cache.Put(cachekey, job.EnergyFootprint, len(job.EnergyFootprint), 24*time.Hour) - log.Debugf("Timer FetchEnergyFootprint %s", time.Since(start)) + cclog.Debugf("Timer FetchEnergyFootprint %s", time.Since(start)) return job.EnergyFootprint, nil } @@ -273,9 +273,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) { if err != nil { s, _, _ := qd.ToSql() - log.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err) + cclog.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err) } else { - log.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt) + cclog.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt) } return cnt, err } @@ -286,9 +286,9 @@ func (r *JobRepository) DeleteJobById(id int64) error { if err != nil { s, _, _ := qd.ToSql() - log.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err) + cclog.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err) } else { - log.Debugf("DeleteJobById(%d): Success", id) + cclog.Debugf("DeleteJobById(%d): Success", id) } return err } @@ -351,7 +351,7 @@ func (r *JobRepository) FindColumnValue(user *schema.User, searchterm string, ta } return "", ErrNotFound } else { - log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table) + cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table) return "", ErrForbidden } } @@ -370,7 +370,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table err := rows.Scan(&result) if err != nil { rows.Close() - log.Warnf("Error while scanning rows: %v", err) + cclog.Warnf("Error while scanning rows: %v", err) return emptyResult, err } results = append(results, result) @@ -380,7 +380,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table return emptyResult, ErrNotFound } else { - log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table) + cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table) return emptyResult, ErrForbidden } } @@ -399,7 +399,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) { if err != nil { return nil, err } - log.Debugf("Timer Partitions %s", time.Since(start)) + cclog.Debugf("Timer Partitions %s", time.Since(start)) return partitions.([]string), nil } @@ -413,7 +413,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in Where("job.cluster = ?", cluster). RunWith(r.stmtCache).Query() if err != nil { - log.Error("Error while running query") + cclog.Error("Error while running query") return nil, err } @@ -424,11 +424,11 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in var resources []*schema.Resource var subcluster string if err := rows.Scan(&raw, &subcluster); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } if err := json.Unmarshal(raw, &resources); err != nil { - log.Warn("Error while unmarshaling raw resources json") + cclog.Warn("Error while unmarshaling raw resources json") return nil, err } @@ -443,7 +443,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in } } - log.Debugf("Timer AllocatedNodes %s", time.Since(start)) + cclog.Debugf("Timer AllocatedNodes %s", time.Since(start)) return subclusters, nil } @@ -459,20 +459,20 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error { Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)). RunWith(r.DB).Exec() if err != nil { - log.Warn("Error while stopping jobs exceeding walltime") + cclog.Warn("Error while stopping jobs exceeding walltime") return err } rowsAffected, err := res.RowsAffected() if err != nil { - log.Warn("Error while fetching affected rows after stopping due to exceeded walltime") + cclog.Warn("Error while fetching affected rows after stopping due to exceeded walltime") return err } if rowsAffected > 0 { - log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected) + cclog.Infof("%d jobs have been marked as failed due to running too long", rowsAffected) } - log.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start)) + cclog.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start)) return nil } @@ -482,7 +482,7 @@ func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) { Where(sq.Eq{"jobtag.tag_id": tagId}).Distinct() rows, err := query.RunWith(r.stmtCache).Query() if err != nil { - log.Error("Error while running query") + cclog.Error("Error while running query") return nil, err } jobIds := make([]int64, 0, 100) @@ -492,7 +492,7 @@ func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) { if err := rows.Scan(&jobId); err != nil { rows.Close() - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } @@ -511,7 +511,7 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) { rows, err := query.RunWith(r.stmtCache).Query() if err != nil { - log.Error("Error while running query") + cclog.Error("Error while running query") return nil, err } @@ -520,13 +520,13 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) { job, err := scanJob(rows) if err != nil { rows.Close() - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } jobs = append(jobs, job) } - log.Infof("Return job count %d", len(jobs)) + cclog.Infof("Return job count %d", len(jobs)) return jobs, nil } @@ -551,18 +551,18 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64 } if startTimeBegin == 0 { - log.Infof("Find jobs before %d", startTimeEnd) + cclog.Infof("Find jobs before %d", startTimeEnd) query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf( "job.start_time < %d", startTimeEnd)) } else { - log.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd) + cclog.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd) query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf( "job.start_time BETWEEN %d AND %d", startTimeBegin, startTimeEnd)) } rows, err := query.RunWith(r.stmtCache).Query() if err != nil { - log.Error("Error while running query") + cclog.Error("Error while running query") return nil, err } @@ -571,13 +571,13 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64 job, err := scanJob(rows) if err != nil { rows.Close() - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } jobs = append(jobs, job) } - log.Infof("Return job count %d", len(jobs)) + cclog.Infof("Return job count %d", len(jobs)) return jobs, nil } @@ -612,7 +612,7 @@ func (r *JobRepository) UpdateEnergy( /* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */ sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster) if err != nil { - log.Errorf("cannot get subcluster: %s", err.Error()) + cclog.Errorf("cannot get subcluster: %s", err.Error()) return stmt, err } energyFootprint := make(map[string]float64) @@ -625,7 +625,7 @@ func (r *JobRepository) UpdateEnergy( if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh) - log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp) + cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp) // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) // Energy: Power (in Watts) * Time (in Seconds) @@ -637,18 +637,18 @@ func (r *JobRepository) UpdateEnergy( metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { - log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) + cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) } energyFootprint[fp] = metricEnergy totalEnergy += metricEnergy - // log.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy) + // cclog.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy) } var rawFootprint []byte if rawFootprint, err = json.Marshal(energyFootprint); err != nil { - log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) + cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) return stmt, err } @@ -662,7 +662,7 @@ func (r *JobRepository) UpdateFootprint( /* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */ sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster) if err != nil { - log.Errorf("cannot get subcluster: %s", err.Error()) + cclog.Errorf("cannot get subcluster: %s", err.Error()) return stmt, err } footprint := make(map[string]float64) @@ -676,7 +676,7 @@ func (r *JobRepository) UpdateFootprint( } if statType != "avg" && statType != "min" && statType != "max" { - log.Warnf("unknown statType for footprint update: %s", statType) + cclog.Warnf("unknown statType for footprint update: %s", statType) return stmt, fmt.Errorf("unknown statType for footprint update: %s", statType) } @@ -690,7 +690,7 @@ func (r *JobRepository) UpdateFootprint( var rawFootprint []byte if rawFootprint, err = json.Marshal(footprint); err != nil { - log.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) + cclog.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) return stmt, err } diff --git a/internal/repository/jobCreate.go b/internal/repository/jobCreate.go index 1508c8d..aa2ea76 100644 --- a/internal/repository/jobCreate.go +++ b/internal/repository/jobCreate.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -8,8 +8,8 @@ import ( "encoding/json" "fmt" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" ) @@ -34,12 +34,12 @@ func (r *JobRepository) InsertJob(job *schema.Job) (int64, error) { res, err := r.DB.NamedExec(NamedJobCacheInsert, job) r.Mutex.Unlock() if err != nil { - log.Warn("Error while NamedJobInsert") + cclog.Warn("Error while NamedJobInsert") return 0, err } id, err := res.LastInsertId() if err != nil { - log.Warn("Error while getting last insert ID") + cclog.Warn("Error while getting last insert ID") return 0, err } @@ -54,7 +54,7 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) { rows, err := query.RunWith(r.stmtCache).Query() if err != nil { - log.Errorf("Error while running query %v", err) + cclog.Errorf("Error while running query %v", err) return nil, err } @@ -63,7 +63,7 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) { job, err := scanJob(rows) if err != nil { rows.Close() - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } jobs = append(jobs, job) @@ -72,13 +72,13 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) { _, err = r.DB.Exec( "INSERT INTO job (job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data) SELECT job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data FROM job_cache") if err != nil { - log.Warnf("Error while Job sync: %v", err) + cclog.Warnf("Error while Job sync: %v", err) return nil, err } _, err = r.DB.Exec("DELETE FROM job_cache") if err != nil { - log.Warnf("Error while Job cache clean: %v", err) + cclog.Warnf("Error while Job cache clean: %v", err) return nil, err } diff --git a/internal/repository/jobFind.go b/internal/repository/jobFind.go index 2acdb87..39519d5 100644 --- a/internal/repository/jobFind.go +++ b/internal/repository/jobFind.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -11,8 +11,8 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" ) @@ -39,7 +39,7 @@ func (r *JobRepository) Find( q = q.OrderBy("job.id DESC") // always use newest matching job by db id if more than one match - log.Debugf("Timer Find %s", time.Since(start)) + cclog.Debugf("Timer Find %s", time.Since(start)) return scanJob(q.RunWith(r.stmtCache).QueryRow()) } @@ -86,7 +86,7 @@ func (r *JobRepository) FindAll( rows, err := q.RunWith(r.stmtCache).Query() if err != nil { - log.Error("Error while running query") + cclog.Error("Error while running query") return nil, err } @@ -94,12 +94,12 @@ func (r *JobRepository) FindAll( for rows.Next() { job, err := scanJob(rows) if err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } jobs = append(jobs, job) } - log.Debugf("Timer FindAll %s", time.Since(start)) + cclog.Debugf("Timer FindAll %s", time.Since(start)) return jobs, nil } @@ -112,7 +112,7 @@ func (r *JobRepository) GetJobList() ([]int64, error) { rows, err := query.RunWith(r.stmtCache).Query() if err != nil { - log.Error("Error while running query") + cclog.Error("Error while running query") return nil, err } @@ -122,13 +122,13 @@ func (r *JobRepository) GetJobList() ([]int64, error) { err := rows.Scan(&id) if err != nil { rows.Close() - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } jl = append(jl, id) } - log.Infof("Return job count %d", len(jl)) + cclog.Infof("Return job count %d", len(jl)) return jl, nil } @@ -253,7 +253,7 @@ func (r *JobRepository) FindConcurrentJobs( rows, err := query.RunWith(r.stmtCache).Query() if err != nil { - log.Errorf("Error while running query: %v", err) + cclog.Errorf("Error while running query: %v", err) return nil, err } @@ -264,7 +264,7 @@ func (r *JobRepository) FindConcurrentJobs( var id, jobId, startTime sql.NullInt64 if err = rows.Scan(&id, &jobId, &startTime); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } @@ -280,7 +280,7 @@ func (r *JobRepository) FindConcurrentJobs( rows, err = queryRunning.RunWith(r.stmtCache).Query() if err != nil { - log.Errorf("Error while running query: %v", err) + cclog.Errorf("Error while running query: %v", err) return nil, err } @@ -288,7 +288,7 @@ func (r *JobRepository) FindConcurrentJobs( var id, jobId, startTime sql.NullInt64 if err := rows.Scan(&id, &jobId, &startTime); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } diff --git a/internal/repository/jobHooks.go b/internal/repository/jobHooks.go index 1016335..824b5cd 100644 --- a/internal/repository/jobHooks.go +++ b/internal/repository/jobHooks.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -7,7 +7,7 @@ package repository import ( "sync" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" ) type JobHook interface { diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index 2f72e77..c9ccb03 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -13,8 +13,8 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" ) @@ -68,7 +68,7 @@ func (r *JobRepository) QueryJobs( rows, err := query.RunWith(r.stmtCache).Query() if err != nil { queryString, queryVars, _ := query.ToSql() - log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) + cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) return nil, err } @@ -77,7 +77,7 @@ func (r *JobRepository) QueryJobs( job, err := scanJob(rows) if err != nil { rows.Close() - log.Warn("Error while scanning rows (Jobs)") + cclog.Warn("Error while scanning rows (Jobs)") return nil, err } jobs = append(jobs, job) @@ -123,7 +123,7 @@ func SecurityCheckWithUser(user *schema.User, query sq.SelectBuilder) (sq.Select if len(user.Projects) != 0 { return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.hpc_user": user.Username}}), nil } else { - log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username) + cclog.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username) return query.Where("job.hpc_user = ?", user.Username), nil } case user.HasRole(schema.RoleUser): // User : Only personal jobs @@ -244,7 +244,7 @@ func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBui case "last30d": then = now - (60 * 60 * 24 * 30) default: - log.Debugf("No known named timeRange: startTime.range = %s", cond.Range) + cclog.Debugf("No known named timeRange: startTime.range = %s", cond.Range) return query } return query.Where(field+" BETWEEN ? AND ?", then, now) @@ -335,7 +335,7 @@ var ( func toSnakeCase(str string) string { for _, c := range str { if c == '\'' || c == '\\' { - log.Panic("toSnakeCase() attack vector!") + cclog.Panic("toSnakeCase() attack vector!") } } diff --git a/internal/repository/job_test.go b/internal/repository/job_test.go index bf7abd9..e96373d 100644 --- a/internal/repository/job_test.go +++ b/internal/repository/job_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -9,7 +9,7 @@ import ( "fmt" "testing" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" _ "github.com/mattn/go-sqlite3" ) diff --git a/internal/repository/migration.go b/internal/repository/migration.go index fb78170..13f74ec 100644 --- a/internal/repository/migration.go +++ b/internal/repository/migration.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -9,7 +9,7 @@ import ( "embed" "fmt" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4/database/mysql" "github.com/golang-migrate/migrate/v4/database/sqlite3" @@ -54,13 +54,13 @@ func checkDBVersion(backend string, db *sql.DB) error { return err } default: - log.Abortf("Migration: Unsupported database backend '%s'.\n", backend) + cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend) } v, dirty, err := m.Version() if err != nil { if err == migrate.ErrNilVersion { - log.Warn("Legacy database without version or missing database file!") + cclog.Warn("Legacy database without version or missing database file!") } else { return err } @@ -84,7 +84,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro case "sqlite3": d, err := iofs.New(migrationFiles, "migrations/sqlite3") if err != nil { - log.Fatal(err) + cclog.Fatal(err) } m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db)) @@ -102,7 +102,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro return m, err } default: - log.Abortf("Migration: Unsupported database backend '%s'.\n", backend) + cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend) } return m, nil @@ -117,14 +117,14 @@ func MigrateDB(backend string, db string) error { v, dirty, err := m.Version() if err != nil { if err == migrate.ErrNilVersion { - log.Warn("Legacy database without version or missing database file!") + cclog.Warn("Legacy database without version or missing database file!") } else { return err } } if v < Version { - log.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version) + cclog.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version) } if dirty { @@ -133,7 +133,7 @@ func MigrateDB(backend string, db string) error { if err := m.Up(); err != nil { if err == migrate.ErrNoChange { - log.Info("DB already up to date!") + cclog.Info("DB already up to date!") } else { return err } @@ -151,7 +151,7 @@ func RevertDB(backend string, db string) error { if err := m.Migrate(Version - 1); err != nil { if err == migrate.ErrNoChange { - log.Info("DB already up to date!") + cclog.Info("DB already up to date!") } else { return err } diff --git a/internal/repository/node.go b/internal/repository/node.go index f288acc..83bf062 100644 --- a/internal/repository/node.go +++ b/internal/repository/node.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -15,9 +15,9 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/lrucache" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/lrucache" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" ) @@ -59,7 +59,7 @@ func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, er if err := sq.Select("node.meta_data").From("node").Where("node.id = ?", node.ID). RunWith(r.stmtCache).QueryRow().Scan(&node.RawMetaData); err != nil { - log.Warn("Error while scanning for node metadata") + cclog.Warn("Error while scanning for node metadata") return nil, err } @@ -68,12 +68,12 @@ func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, er } if err := json.Unmarshal(node.RawMetaData, &node.MetaData); err != nil { - log.Warn("Error while unmarshaling raw metadata json") + cclog.Warn("Error while unmarshaling raw metadata json") return nil, err } r.cache.Put(cachekey, node.MetaData, len(node.RawMetaData), 24*time.Hour) - log.Debugf("Timer FetchMetadata %s", time.Since(start)) + cclog.Debugf("Timer FetchMetadata %s", time.Since(start)) return node.MetaData, nil } @@ -82,7 +82,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err r.cache.Del(cachekey) if node.MetaData == nil { if _, err = r.FetchMetadata(node); err != nil { - log.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID) + cclog.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID) return err } } @@ -97,7 +97,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err } if node.RawMetaData, err = json.Marshal(node.MetaData); err != nil { - log.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID) + cclog.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID) return err } @@ -105,7 +105,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err Set("meta_data", node.RawMetaData). Where("node.id = ?", node.ID). RunWith(r.stmtCache).Exec(); err != nil { - log.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID) + cclog.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID) return err } @@ -120,7 +120,7 @@ func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error) Where("node.id = ?", id).RunWith(r.DB). QueryRow().Scan(&node.ID, &node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState, &node.HealthState); err != nil { - log.Warnf("Error while querying node '%v' from database", id) + cclog.Warnf("Error while querying node '%v' from database", id) return nil, err } @@ -128,7 +128,7 @@ func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error) var err error var meta map[string]string if meta, err = r.FetchMetadata(node); err != nil { - log.Warnf("Error while fetching metadata for node '%v'", id) + cclog.Warnf("Error while fetching metadata for node '%v'", id) return nil, err } node.MetaData = meta @@ -146,12 +146,12 @@ func (r *NodeRepository) AddNode(node *schema.Node) (int64, error) { res, err := r.DB.NamedExec(NamedNodeInsert, node) if err != nil { - log.Errorf("Error while adding node '%v' to database", node.Hostname) + cclog.Errorf("Error while adding node '%v' to database", node.Hostname) return 0, err } node.ID, err = res.LastInsertId() if err != nil { - log.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname) + cclog.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname) return 0, err } @@ -166,7 +166,7 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt if err == sql.ErrNoRows { subcluster, err := archive.GetSubClusterByNode(cluster, hostname) if err != nil { - log.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err) + cclog.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err) return err } node := schema.Node{ @@ -175,29 +175,29 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt } _, err = r.AddNode(&node) if err != nil { - log.Errorf("Error while adding node '%s' to database: %v", hostname, err) + cclog.Errorf("Error while adding node '%s' to database: %v", hostname, err) return err } - log.Infof("Added node '%s' to database", hostname) + cclog.Infof("Added node '%s' to database", hostname) return nil } else { - log.Warnf("Error while querying node '%v' from database", id) + cclog.Warnf("Error while querying node '%v' from database", id) return err } } if _, err := sq.Update("node").Set("node_state", nodeState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil { - log.Errorf("error while updating node '%s'", hostname) + cclog.Errorf("error while updating node '%s'", hostname) return err } - log.Infof("Updated node '%s' in database", hostname) + cclog.Infof("Updated node '%s' in database", hostname) return nil } // func (r *NodeRepository) UpdateHealthState(hostname string, healthState *schema.MonitoringState) error { // if _, err := sq.Update("node").Set("health_state", healthState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil { -// log.Errorf("error while updating node '%d'", id) +// cclog.Errorf("error while updating node '%d'", id) // return err // } // @@ -207,10 +207,10 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt func (r *NodeRepository) DeleteNode(id int64) error { _, err := r.DB.Exec(`DELETE FROM node WHERE node.id = ?`, id) if err != nil { - log.Errorf("Error while deleting node '%d' from DB", id) + cclog.Errorf("Error while deleting node '%d' from DB", id) return err } - log.Infof("deleted node '%d' from DB", id) + cclog.Infof("deleted node '%d' from DB", id) return nil } @@ -243,7 +243,7 @@ func (r *NodeRepository) QueryNodes( rows, err := query.RunWith(r.stmtCache).Query() if err != nil { queryString, queryVars, _ := query.ToSql() - log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) + cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) return nil, err } @@ -254,7 +254,7 @@ func (r *NodeRepository) QueryNodes( if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState, &node.HealthState); err != nil { rows.Close() - log.Warn("Error while scanning rows (Nodes)") + cclog.Warn("Error while scanning rows (Nodes)") return nil, err } nodes = append(nodes, &node) @@ -269,7 +269,7 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) { rows, err := q.RunWith(r.DB).Query() if err != nil { - log.Warn("Error while querying user list") + cclog.Warn("Error while querying user list") return nil, err } nodeList := make([]*schema.Node, 0, 100) @@ -278,7 +278,7 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) { node := &schema.Node{} if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState, &node.HealthState); err != nil { - log.Warn("Error while scanning node list") + cclog.Warn("Error while scanning node list") return nil, err } diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 1ca9ec5..5603c31 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -9,8 +9,8 @@ import ( "testing" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" _ "github.com/mattn/go-sqlite3" ) @@ -65,7 +65,7 @@ func BenchmarkDB_FindJobById(b *testing.B) { func BenchmarkDB_FindJob(b *testing.B) { var jobId int64 = 107266 var startTime int64 = 1657557241 - var cluster = "fritz" + cluster := "fritz" b.Run("FindJob", func(b *testing.B) { db := setup(b) @@ -147,7 +147,7 @@ func getContext(tb testing.TB) context.Context { func setup(tb testing.TB) *JobRepository { tb.Helper() - log.Init("warn", true) + cclog.Init("warn", true) dbfile := "testdata/job.db" err := MigrateDB("sqlite3", dbfile) noErr(tb, err) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 69aa284..7beb674 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -14,8 +14,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" ) @@ -158,7 +158,7 @@ func (r *JobRepository) JobsStatsGrouped( rows, err := query.RunWith(r.DB).Query() if err != nil { - log.Warn("Error while querying DB for job statistics") + cclog.Warn("Error while querying DB for job statistics") return nil, err } @@ -169,7 +169,7 @@ func (r *JobRepository) JobsStatsGrouped( var name sql.NullString var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64 if err := rows.Scan(&id, &jobs, &name, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } @@ -241,7 +241,7 @@ func (r *JobRepository) JobsStatsGrouped( } } - log.Debugf("Timer JobsStatsGrouped %s", time.Since(start)) + cclog.Debugf("Timer JobsStatsGrouped %s", time.Since(start)) return stats, nil } @@ -261,7 +261,7 @@ func (r *JobRepository) JobsStats( var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64 if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } @@ -287,7 +287,7 @@ func (r *JobRepository) JobsStats( }) } - log.Debugf("Timer JobStats %s", time.Since(start)) + cclog.Debugf("Timer JobStats %s", time.Since(start)) return stats, nil } @@ -301,7 +301,7 @@ func LoadJobStat(job *schema.Job, metric string, statType string) float64 { case "min": return stats.Min default: - log.Errorf("Unknown stat type %s", statType) + cclog.Errorf("Unknown stat type %s", statType) } } @@ -322,7 +322,7 @@ func (r *JobRepository) JobCountGrouped( } rows, err := query.RunWith(r.DB).Query() if err != nil { - log.Warn("Error while querying DB for job statistics") + cclog.Warn("Error while querying DB for job statistics") return nil, err } @@ -332,7 +332,7 @@ func (r *JobRepository) JobCountGrouped( var id sql.NullString var cnt sql.NullInt64 if err := rows.Scan(&id, &cnt); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } if id.Valid { @@ -344,7 +344,7 @@ func (r *JobRepository) JobCountGrouped( } } - log.Debugf("Timer JobCountGrouped %s", time.Since(start)) + cclog.Debugf("Timer JobCountGrouped %s", time.Since(start)) return stats, nil } @@ -364,7 +364,7 @@ func (r *JobRepository) AddJobCountGrouped( } rows, err := query.RunWith(r.DB).Query() if err != nil { - log.Warn("Error while querying DB for job statistics") + cclog.Warn("Error while querying DB for job statistics") return nil, err } @@ -374,7 +374,7 @@ func (r *JobRepository) AddJobCountGrouped( var id sql.NullString var cnt sql.NullInt64 if err := rows.Scan(&id, &cnt); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } if id.Valid { @@ -393,7 +393,7 @@ func (r *JobRepository) AddJobCountGrouped( } } - log.Debugf("Timer AddJobCountGrouped %s", time.Since(start)) + cclog.Debugf("Timer AddJobCountGrouped %s", time.Since(start)) return stats, nil } @@ -411,7 +411,7 @@ func (r *JobRepository) AddJobCount( } rows, err := query.RunWith(r.DB).Query() if err != nil { - log.Warn("Error while querying DB for job statistics") + cclog.Warn("Error while querying DB for job statistics") return nil, err } @@ -420,7 +420,7 @@ func (r *JobRepository) AddJobCount( for rows.Next() { var cnt sql.NullInt64 if err := rows.Scan(&cnt); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } @@ -438,7 +438,7 @@ func (r *JobRepository) AddJobCount( } } - log.Debugf("Timer AddJobCount %s", time.Since(start)) + cclog.Debugf("Timer AddJobCount %s", time.Since(start)) return stats, nil } @@ -479,29 +479,29 @@ func (r *JobRepository) AddHistograms( value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType) stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount) if err != nil { - log.Warn("Error while loading job statistics histogram: job duration") + cclog.Warn("Error while loading job statistics histogram: job duration") return nil, err } stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter) if err != nil { - log.Warn("Error while loading job statistics histogram: num nodes") + cclog.Warn("Error while loading job statistics histogram: num nodes") return nil, err } stat.HistNumCores, err = r.jobsStatisticsHistogram(ctx, "job.num_hwthreads as value", filter) if err != nil { - log.Warn("Error while loading job statistics histogram: num hwthreads") + cclog.Warn("Error while loading job statistics histogram: num hwthreads") return nil, err } stat.HistNumAccs, err = r.jobsStatisticsHistogram(ctx, "job.num_acc as value", filter) if err != nil { - log.Warn("Error while loading job statistics histogram: num acc") + cclog.Warn("Error while loading job statistics histogram: num acc") return nil, err } - log.Debugf("Timer AddHistograms %s", time.Since(start)) + cclog.Debugf("Timer AddHistograms %s", time.Since(start)) return stat, nil } @@ -520,7 +520,7 @@ func (r *JobRepository) AddMetricHistograms( if f.State != nil { if len(f.State) == 1 && f.State[0] == "running" { stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount) - log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start)) return stat, nil } } @@ -530,13 +530,13 @@ func (r *JobRepository) AddMetricHistograms( for _, m := range metrics { metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount) if err != nil { - log.Warnf("Error while loading job metric statistics histogram: %s", m) + cclog.Warnf("Error while loading job metric statistics histogram: %s", m) continue } stat.HistMetrics = append(stat.HistMetrics, metricHisto) } - log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start)) return stat, nil } @@ -560,7 +560,7 @@ func (r *JobRepository) jobsStatisticsHistogram( rows, err := query.GroupBy("value").RunWith(r.DB).Query() if err != nil { - log.Error("Error while running query") + cclog.Error("Error while running query") return nil, err } @@ -569,13 +569,13 @@ func (r *JobRepository) jobsStatisticsHistogram( for rows.Next() { point := model.HistoPoint{} if err := rows.Scan(&point.Value, &point.Count); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } points = append(points, &point) } - log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) + cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) return points, nil } @@ -607,7 +607,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram( rows, err := query.GroupBy("value").RunWith(r.DB).Query() if err != nil { - log.Error("Error while running query") + cclog.Error("Error while running query") return nil, err } @@ -615,7 +615,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram( for rows.Next() { point := model.HistoPoint{} if err := rows.Scan(&point.Value, &point.Count); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } @@ -630,7 +630,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram( } } - log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) + cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) return points, nil } @@ -652,7 +652,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( peak = metricConfig.Peak unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base footprintStat = metricConfig.Footprint - log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) + cclog.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) } } @@ -674,7 +674,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( } } - // log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit) + // cclog.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit) // Make bins, see https://jereze.com/code/sql-histogram/ (Modified here) start := time.Now() @@ -709,7 +709,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( rows, err := mainQuery.RunWith(r.DB).Query() if err != nil { - log.Errorf("Error while running mainQuery: %s", err) + cclog.Errorf("Error while running mainQuery: %s", err) return nil, err } @@ -726,7 +726,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!) rpoint := model.MetricHistoPoint{} if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max - log.Warnf("Error while scanning rows for %s", metric) + cclog.Warnf("Error while scanning rows for %s", metric) return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested? } @@ -736,10 +736,10 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( e.Count = rpoint.Count // Only Required For Debug: Check DB returned Min/Max against Backend Init above // if rpoint.Min != nil { - // log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min) + // cclog.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min) // } // if rpoint.Max != nil { - // log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max) + // cclog.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max) // } break } @@ -749,7 +749,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points} - log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) + cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) return &result, nil } @@ -762,11 +762,11 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram( // Get Jobs jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil) if err != nil { - log.Errorf("Error while querying jobs for footprint: %s", err) + cclog.Errorf("Error while querying jobs for footprint: %s", err) return nil } if len(jobs) > 500 { - log.Errorf("too many jobs matched (max: %d)", 500) + cclog.Errorf("too many jobs matched (max: %d)", 500) return nil } @@ -782,7 +782,7 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram( } if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil { - log.Errorf("Error while loading averages for histogram: %s", err) + cclog.Errorf("Error while loading averages for histogram: %s", err) return nil } } diff --git a/internal/repository/stats_test.go b/internal/repository/stats_test.go index 2cc377c..bc4ac04 100644 --- a/internal/repository/stats_test.go +++ b/internal/repository/stats_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -19,7 +19,6 @@ func TestBuildJobStatsQuery(t *testing.T) { noErr(t, err) fmt.Printf("SQL: %s\n", sql) - } func TestJobStats(t *testing.T) { diff --git a/internal/repository/tags.go b/internal/repository/tags.go index 18ce62f..87bf69d 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -9,8 +9,8 @@ import ( "strings" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" ) @@ -18,7 +18,7 @@ import ( func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*schema.Tag, error) { j, err := r.FindByIdWithUser(user, job) if err != nil { - log.Warn("Error while finding job by id") + cclog.Warn("Error while finding job by id") return nil, err } @@ -26,19 +26,19 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche if _, err := q.RunWith(r.stmtCache).Exec(); err != nil { s, _, _ := q.ToSql() - log.Errorf("Error adding tag with %s: %v", s, err) + cclog.Errorf("Error adding tag with %s: %v", s, err) return nil, err } tags, err := r.GetTags(user, &job) if err != nil { - log.Warn("Error while getting tags for job") + cclog.Warn("Error while getting tags for job") return nil, err } archiveTags, err := r.getArchiveTags(&job) if err != nil { - log.Warn("Error while getting tags for job") + cclog.Warn("Error while getting tags for job") return nil, err } @@ -48,7 +48,7 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error) { j, err := r.FindByIdDirect(job) if err != nil { - log.Warn("Error while finding job by id") + cclog.Warn("Error while finding job by id") return nil, err } @@ -56,19 +56,19 @@ func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error if _, err := q.RunWith(r.stmtCache).Exec(); err != nil { s, _, _ := q.ToSql() - log.Errorf("Error adding tag with %s: %v", s, err) + cclog.Errorf("Error adding tag with %s: %v", s, err) return nil, err } tags, err := r.GetTagsDirect(&job) if err != nil { - log.Warn("Error while getting tags for job") + cclog.Warn("Error while getting tags for job") return nil, err } archiveTags, err := r.getArchiveTags(&job) if err != nil { - log.Warn("Error while getting tags for job") + cclog.Warn("Error while getting tags for job") return nil, err } @@ -80,7 +80,7 @@ func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) { j, err := r.FindByIdWithUser(user, job) if err != nil { - log.Warn("Error while finding job by id") + cclog.Warn("Error while finding job by id") return nil, err } @@ -88,19 +88,19 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema. if _, err := q.RunWith(r.stmtCache).Exec(); err != nil { s, _, _ := q.ToSql() - log.Errorf("Error removing tag with %s: %v", s, err) + cclog.Errorf("Error removing tag with %s: %v", s, err) return nil, err } tags, err := r.GetTags(user, &job) if err != nil { - log.Warn("Error while getting tags for job") + cclog.Warn("Error while getting tags for job") return nil, err } archiveTags, err := r.getArchiveTags(&job) if err != nil { - log.Warn("Error while getting tags for job") + cclog.Warn("Error while getting tags for job") return nil, err } @@ -113,14 +113,14 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT // Get Tag ID to delete tagID, exists := r.TagId(tagType, tagName, tagScope) if !exists { - log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) + cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) return nil, fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) } // Get Job j, err := r.FindByIdWithUser(user, job) if err != nil { - log.Warn("Error while finding job by id") + cclog.Warn("Error while finding job by id") return nil, err } @@ -129,19 +129,19 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT if _, err := q.RunWith(r.stmtCache).Exec(); err != nil { s, _, _ := q.ToSql() - log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err) + cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err) return nil, err } tags, err := r.GetTags(user, &job) if err != nil { - log.Warn("Error while getting tags for job") + cclog.Warn("Error while getting tags for job") return nil, err } archiveTags, err := r.getArchiveTags(&job) if err != nil { - log.Warn("Error while getting tags for job") + cclog.Warn("Error while getting tags for job") return nil, err } @@ -152,13 +152,13 @@ func (r *JobRepository) removeTagFromArchiveJobs(jobIds []int64) { for _, j := range jobIds { tags, err := r.getArchiveTags(&j) if err != nil { - log.Warnf("Error while getting tags for job %d", j) + cclog.Warnf("Error while getting tags for job %d", j) continue } job, err := r.FindByIdDirect(j) if err != nil { - log.Warnf("Error while getting job %d", j) + cclog.Warnf("Error while getting job %d", j) continue } @@ -172,7 +172,7 @@ func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagSc // Get Tag ID to delete tagID, exists := r.TagId(tagType, tagName, tagScope) if !exists { - log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) + cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) return fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) } @@ -192,7 +192,7 @@ func (r *JobRepository) RemoveTagById(tagID int64) error { if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil { s, _, _ := qJobTag.ToSql() - log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err) + cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err) return err } @@ -201,7 +201,7 @@ func (r *JobRepository) RemoveTagById(tagID int64) error { if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil { s, _, _ := qTag.ToSql() - log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err) + cclog.Errorf("Error removing tag from table 'tag' with %s: %v", s, err) return err } @@ -223,7 +223,7 @@ func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope strin res, err := q.RunWith(r.stmtCache).Exec() if err != nil { s, _, _ := q.ToSql() - log.Errorf("Error inserting tag with %s: %v", s, err) + cclog.Errorf("Error inserting tag with %s: %v", s, err) return 0, err } @@ -272,7 +272,7 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts // Handle Job Ownership if user != nil && user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { // ADMIN || SUPPORT: Count all jobs - // log.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags") + // cclog.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags") // Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case } else if user != nil && user.HasRole(schema.RoleManager) { // MANAGER: Count own jobs plus project's jobs // Build ("project1", "project2", ...) list of variable length directly in SQL string @@ -396,7 +396,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e rows, err := q.RunWith(r.stmtCache).Query() if err != nil { s, _, _ := q.ToSql() - log.Errorf("Error get tags with %s: %v", s, err) + cclog.Errorf("Error get tags with %s: %v", s, err) return nil, err } @@ -404,7 +404,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e for rows.Next() { tag := &schema.Tag{} if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } // Handle Scope Filtering: Tag Scope is Global, Private (== Username) or User is auth'd to view Admin Tags @@ -429,7 +429,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) { rows, err := q.RunWith(r.stmtCache).Query() if err != nil { s, _, _ := q.ToSql() - log.Errorf("Error get tags with %s: %v", s, err) + cclog.Errorf("Error get tags with %s: %v", s, err) return nil, err } @@ -437,7 +437,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) { for rows.Next() { tag := &schema.Tag{} if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } tags = append(tags, tag) @@ -456,7 +456,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) { rows, err := q.RunWith(r.stmtCache).Query() if err != nil { s, _, _ := q.ToSql() - log.Errorf("Error get tags with %s: %v", s, err) + cclog.Errorf("Error get tags with %s: %v", s, err) return nil, err } @@ -464,7 +464,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) { for rows.Next() { tag := &schema.Tag{} if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil { - log.Warn("Error while scanning rows") + cclog.Warn("Error while scanning rows") return nil, err } tags = append(tags, tag) @@ -488,7 +488,7 @@ func (r *JobRepository) ImportTag(jobId int64, tagType string, tagName string, t if _, err := q.RunWith(r.stmtCache).Exec(); err != nil { s, _, _ := q.ToSql() - log.Errorf("Error adding tag on import with %s: %v", s, err) + cclog.Errorf("Error adding tag on import with %s: %v", s, err) return err } diff --git a/internal/repository/transaction.go b/internal/repository/transaction.go index 603d505..39941c1 100644 --- a/internal/repository/transaction.go +++ b/internal/repository/transaction.go @@ -1,11 +1,11 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository import ( - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/jmoiron/sqlx" ) @@ -20,7 +20,7 @@ func (r *JobRepository) TransactionInit() (*Transaction, error) { t.tx, err = r.DB.Beginx() if err != nil { - log.Warn("Error while bundling transactions") + cclog.Warn("Error while bundling transactions") return nil, err } return t, nil @@ -30,14 +30,14 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error { var err error if t.tx != nil { if err = t.tx.Commit(); err != nil { - log.Warn("Error while committing transactions") + cclog.Warn("Error while committing transactions") return err } } t.tx, err = r.DB.Beginx() if err != nil { - log.Warn("Error while bundling transactions") + cclog.Warn("Error while bundling transactions") return err } @@ -46,7 +46,7 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error { func (r *JobRepository) TransactionEnd(t *Transaction) error { if err := t.tx.Commit(); err != nil { - log.Warn("Error while committing SQL transactions") + cclog.Warn("Error while committing SQL transactions") return err } return nil @@ -59,13 +59,13 @@ func (r *JobRepository) TransactionAddNamed( ) (int64, error) { res, err := t.tx.NamedExec(query, args) if err != nil { - log.Errorf("Named Exec failed: %v", err) + cclog.Errorf("Named Exec failed: %v", err) return 0, err } id, err := res.LastInsertId() if err != nil { - log.Errorf("repository initDB(): %v", err) + cclog.Errorf("repository initDB(): %v", err) return 0, err } @@ -73,16 +73,15 @@ func (r *JobRepository) TransactionAddNamed( } func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) { - res, err := t.tx.Exec(query, args...) if err != nil { - log.Errorf("TransactionAdd(), Exec() Error: %v", err) + cclog.Errorf("TransactionAdd(), Exec() Error: %v", err) return 0, err } id, err := res.LastInsertId() if err != nil { - log.Errorf("TransactionAdd(), LastInsertId() Error: %v", err) + cclog.Errorf("TransactionAdd(), LastInsertId() Error: %v", err) return 0, err } diff --git a/internal/repository/user.go b/internal/repository/user.go index c411c38..1dca7f4 100644 --- a/internal/repository/user.go +++ b/internal/repository/user.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -13,13 +13,13 @@ import ( "strings" "sync" + "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" "golang.org/x/crypto/bcrypt" - "github.com/ClusterCockpit/cc-backend/internal/config" ) var ( @@ -50,7 +50,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) { if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("hpc_user"). Where("hpc_user.username = ?", username).RunWith(r.DB). QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil { - log.Warnf("Error while querying user '%v' from database", username) + cclog.Warnf("Error while querying user '%v' from database", username) return nil, err } @@ -59,7 +59,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) { user.Email = email.String if rawRoles.Valid { if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil { - log.Warn("Error while unmarshaling raw roles from DB") + cclog.Warn("Error while unmarshaling raw roles from DB") return nil, err } } @@ -76,14 +76,14 @@ func (r *UserRepository) GetLdapUsernames() ([]string, error) { var users []string rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE hpc_user.ldap = 1`) if err != nil { - log.Warn("Error while querying usernames") + cclog.Warn("Error while querying usernames") return nil, err } for rows.Next() { var username string if err := rows.Scan(&username); err != nil { - log.Warnf("Error while scanning for user '%s'", username) + cclog.Warnf("Error while scanning for user '%s'", username) return nil, err } @@ -111,7 +111,7 @@ func (r *UserRepository) AddUser(user *schema.User) error { if user.Password != "" { password, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost) if err != nil { - log.Error("Error while encrypting new user password") + cclog.Error("Error while encrypting new user password") return err } cols = append(cols, "password") @@ -123,21 +123,21 @@ func (r *UserRepository) AddUser(user *schema.User) error { } if _, err := sq.Insert("hpc_user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil { - log.Errorf("Error while inserting new user '%v' into DB", user.Username) + cclog.Errorf("Error while inserting new user '%v' into DB", user.Username) return err } - log.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson) + cclog.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson) defaultMetricsCfg, err := config.LoadDefaultMetricsConfig() if err != nil { - log.Errorf("Error loading default metrics config: %v", err) + cclog.Errorf("Error loading default metrics config: %v", err) } else if defaultMetricsCfg != nil { for _, cluster := range defaultMetricsCfg.Clusters { metricsArray := config.ParseMetricsString(cluster.DefaultMetrics) metricsJSON, err := json.Marshal(metricsArray) if err != nil { - log.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err) + cclog.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err) continue } confKey := "job_view_selectedMetrics:" + cluster.Name @@ -145,9 +145,9 @@ func (r *UserRepository) AddUser(user *schema.User) error { Columns("username", "confkey", "value"). Values(user.Username, confKey, string(metricsJSON)). RunWith(r.DB).Exec(); err != nil { - log.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err) + cclog.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err) } else { - log.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON)) + cclog.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON)) } } } @@ -160,7 +160,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro // TODO: Discuss updatable fields if dbUser.Name != user.Name { if _, err := sq.Update("hpc_user").Set("name", user.Name).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { - log.Errorf("error while updating name of user '%s'", user.Username) + cclog.Errorf("error while updating name of user '%s'", user.Username) return err } } @@ -179,10 +179,10 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro func (r *UserRepository) DelUser(username string) error { _, err := r.DB.Exec(`DELETE FROM hpc_user WHERE hpc_user.username = ?`, username) if err != nil { - log.Errorf("Error while deleting user '%s' from DB", username) + cclog.Errorf("Error while deleting user '%s' from DB", username) return err } - log.Infof("deleted user '%s' from DB", username) + cclog.Infof("deleted user '%s' from DB", username) return nil } @@ -194,7 +194,7 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) { rows, err := q.RunWith(r.DB).Query() if err != nil { - log.Warn("Error while querying user list") + cclog.Warn("Error while querying user list") return nil, err } @@ -206,12 +206,12 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) { user := &schema.User{} var name, email sql.NullString if err := rows.Scan(&user.Username, &name, &email, &rawroles, &rawprojects); err != nil { - log.Warn("Error while scanning user list") + cclog.Warn("Error while scanning user list") return nil, err } if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil { - log.Warn("Error while unmarshaling raw role list") + cclog.Warn("Error while unmarshaling raw role list") return nil, err } @@ -234,7 +234,7 @@ func (r *UserRepository) AddRole( newRole := strings.ToLower(queryrole) user, err := r.GetUser(username) if err != nil { - log.Warnf("Could not load user '%s'", username) + cclog.Warnf("Could not load user '%s'", username) return err } @@ -249,7 +249,7 @@ func (r *UserRepository) AddRole( roles, _ := json.Marshal(append(user.Roles, newRole)) if _, err := sq.Update("hpc_user").Set("roles", roles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { - log.Errorf("error while adding new role for user '%s'", user.Username) + cclog.Errorf("error while adding new role for user '%s'", user.Username) return err } return nil @@ -259,7 +259,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr oldRole := strings.ToLower(queryrole) user, err := r.GetUser(username) if err != nil { - log.Warnf("Could not load user '%s'", username) + cclog.Warnf("Could not load user '%s'", username) return err } @@ -285,7 +285,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr mroles, _ := json.Marshal(newroles) if _, err := sq.Update("hpc_user").Set("roles", mroles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { - log.Errorf("Error while removing role for user '%s'", user.Username) + cclog.Errorf("Error while removing role for user '%s'", user.Username) return err } return nil @@ -364,10 +364,10 @@ const ContextUserKey ContextKey = "user" func GetUserFromContext(ctx context.Context) *schema.User { x := ctx.Value(ContextUserKey) if x == nil { - log.Warnf("no user retrieved from context") + cclog.Warnf("no user retrieved from context") return nil } - // log.Infof("user retrieved from context: %v", x.(*schema.User)) + // cclog.Infof("user retrieved from context: %v", x.(*schema.User)) return x.(*schema.User) } @@ -385,11 +385,11 @@ func (r *UserRepository) FetchUserInCtx(ctx context.Context, username string) (* if err == sql.ErrNoRows { /* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */ /* since FetchUser will be called to retrieve full name and mail for every job in query/list */ - // log.Warnf("User '%s' Not found in DB", username) + // cclog.Warnf("User '%s' Not found in DB", username) return nil, nil } - log.Warnf("Error while fetching user '%s'", username) + cclog.Warnf("Error while fetching user '%s'", username) return nil, err } diff --git a/internal/repository/userConfig.go b/internal/repository/userConfig.go index 5d43071..2ef7164 100644 --- a/internal/repository/userConfig.go +++ b/internal/repository/userConfig.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -10,9 +10,9 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/lrucache" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/lrucache" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/jmoiron/sqlx" ) @@ -35,7 +35,7 @@ func GetUserCfgRepo() *UserCfgRepo { lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`) if err != nil { - log.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error()) + cclog.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error()) } userCfgRepoInstance = &UserCfgRepo{ @@ -70,7 +70,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{}, rows, err := uCfg.Lookup.Query(user.Username) if err != nil { - log.Warnf("Error while looking up user uiconfig for user '%v'", user.Username) + cclog.Warnf("Error while looking up user uiconfig for user '%v'", user.Username) return err, 0, 0 } @@ -79,13 +79,13 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{}, for rows.Next() { var key, rawval string if err := rows.Scan(&key, &rawval); err != nil { - log.Warn("Error while scanning user uiconfig values") + cclog.Warn("Error while scanning user uiconfig values") return err, 0, 0 } var val interface{} if err := json.Unmarshal([]byte(rawval), &val); err != nil { - log.Warn("Error while unmarshaling raw user uiconfig json") + cclog.Warn("Error while unmarshaling raw user uiconfig json") return err, 0, 0 } @@ -100,7 +100,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{}, return uiconfig, 24 * time.Hour, size }) if err, ok := data.(error); ok { - log.Error("Error in returned dataset") + cclog.Error("Error in returned dataset") return nil, err } @@ -117,7 +117,7 @@ func (uCfg *UserCfgRepo) UpdateConfig( if user == nil { var val interface{} if err := json.Unmarshal([]byte(value), &val); err != nil { - log.Warn("Error while unmarshaling raw user config json") + cclog.Warn("Error while unmarshaling raw user config json") return err } @@ -128,7 +128,7 @@ func (uCfg *UserCfgRepo) UpdateConfig( } if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`, user.Username, key, value); err != nil { - log.Warnf("Error while replacing user config in DB for user '%v'", user.Username) + cclog.Warnf("Error while replacing user config in DB for user '%v'", user.Username) return err } diff --git a/internal/repository/userConfig_test.go b/internal/repository/userConfig_test.go index cd15c9d..d200763 100644 --- a/internal/repository/userConfig_test.go +++ b/internal/repository/userConfig_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository @@ -10,8 +10,8 @@ import ( "testing" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" _ "github.com/mattn/go-sqlite3" ) @@ -39,7 +39,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo { } } ] }` - log.Init("info", true) + cclog.Init("info", true) dbfilepath := "testdata/job.db" err := MigrateDB("sqlite3", dbfilepath) if err != nil { diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 32e1c15..9c19de5 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package routerConfig @@ -16,10 +16,10 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/util" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/web" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" "github.com/gorilla/mux" ) @@ -57,23 +57,23 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType { // startJobCount := time.Now() stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy) if err != nil { - log.Warnf("failed to count jobs: %s", err.Error()) + cclog.Warnf("failed to count jobs: %s", err.Error()) } - // log.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount)) + // cclog.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount)) // startRunningJobCount := time.Now() stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running") if err != nil { - log.Warnf("failed to count running jobs: %s", err.Error()) + cclog.Warnf("failed to count running jobs: %s", err.Error()) } - // log.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount)) + // cclog.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount)) i["clusters"] = stats if util.CheckFileExists("./var/notice.txt") { msg, err := os.ReadFile("./var/notice.txt") if err != nil { - log.Warnf("failed to read notice.txt file: %s", err.Error()) + cclog.Warnf("failed to read notice.txt file: %s", err.Error()) } else { i["message"] = string(msg) } @@ -178,7 +178,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType { tags, counts, err := jobRepo.CountTags(repository.GetUserFromContext(r.Context())) tagMap := make(map[string][]map[string]interface{}) if err != nil { - log.Warnf("GetTags failed: %s", err.Error()) + cclog.Warnf("GetTags failed: %s", err.Error()) i["tagmap"] = tagMap return i } diff --git a/internal/tagger/classifyJob.go b/internal/tagger/classifyJob.go index 0af7096..32063cd 100644 --- a/internal/tagger/classifyJob.go +++ b/internal/tagger/classifyJob.go @@ -15,10 +15,10 @@ import ( "text/template" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" "github.com/expr-lang/expr" "github.com/expr-lang/expr/vm" ) @@ -66,7 +66,7 @@ type JobClassTagger struct { func (t *JobClassTagger) prepareRule(b []byte, fns string) { var rule RuleFormat if err := json.NewDecoder(bytes.NewReader(b)).Decode(&rule); err != nil { - log.Warn("Error while decoding raw job meta json") + cclog.Warn("Error while decoding raw job meta json") return } @@ -80,7 +80,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) { for _, p := range rule.Parameters { param, ok := t.parameters[p] if !ok { - log.Warnf("prepareRule() > missing parameter %s in rule %s", p, fns) + cclog.Warnf("prepareRule() > missing parameter %s in rule %s", p, fns) return } ri.env[p] = param @@ -93,7 +93,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) { for _, r := range rule.Requirements { req, err := expr.Compile(r, expr.AsBool()) if err != nil { - log.Errorf("error compiling requirement %s: %#v", r, err) + cclog.Errorf("error compiling requirement %s: %#v", r, err) return } ri.requirements = append(ri.requirements, req) @@ -103,7 +103,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) { for _, v := range rule.Variables { req, err := expr.Compile(v.Expr, expr.AsFloat64()) if err != nil { - log.Errorf("error compiling requirement %s: %#v", v.Name, err) + cclog.Errorf("error compiling requirement %s: %#v", v.Name, err) return } ri.variables = append(ri.variables, ruleVariable{name: v.Name, expr: req}) @@ -112,7 +112,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) { // compile rule exp, err := expr.Compile(rule.Rule, expr.AsBool()) if err != nil { - log.Errorf("error compiling rule %s: %#v", fns, err) + cclog.Errorf("error compiling rule %s: %#v", fns, err) return } ri.rule = exp @@ -120,9 +120,9 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) { // prepare hint template ri.hint, err = template.New(fns).Parse(rule.Hint) if err != nil { - log.Errorf("error processing template %s: %#v", fns, err) + cclog.Errorf("error processing template %s: %#v", fns, err) } - log.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables)) + cclog.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables)) t.rules[rule.Tag] = ri } @@ -135,19 +135,19 @@ func (t *JobClassTagger) EventMatch(s string) bool { func (t *JobClassTagger) EventCallback() { files, err := os.ReadDir(t.cfgPath) if err != nil { - log.Fatal(err) + cclog.Fatal(err) } if util.CheckFileExists(t.cfgPath + "/parameters.json") { - log.Info("Merge parameters") + cclog.Info("Merge parameters") b, err := os.ReadFile(t.cfgPath + "/parameters.json") if err != nil { - log.Warnf("prepareRule() > open file error: %v", err) + cclog.Warnf("prepareRule() > open file error: %v", err) } var paramTmp map[string]any if err := json.NewDecoder(bytes.NewReader(b)).Decode(¶mTmp); err != nil { - log.Warn("Error while decoding parameters.json") + cclog.Warn("Error while decoding parameters.json") } maps.Copy(t.parameters, paramTmp) @@ -156,11 +156,11 @@ func (t *JobClassTagger) EventCallback() { for _, fn := range files { fns := fn.Name() if fns != "parameters.json" { - log.Debugf("Process: %s", fns) + cclog.Debugf("Process: %s", fns) filename := fmt.Sprintf("%s/%s", t.cfgPath, fns) b, err := os.ReadFile(filename) if err != nil { - log.Warnf("prepareRule() > open file error: %v", err) + cclog.Warnf("prepareRule() > open file error: %v", err) return } t.prepareRule(b, fns) @@ -169,15 +169,15 @@ func (t *JobClassTagger) EventCallback() { } func (t *JobClassTagger) initParameters() error { - log.Info("Initialize parameters") + cclog.Info("Initialize parameters") b, err := jobclassFiles.ReadFile("jobclasses/parameters.json") if err != nil { - log.Warnf("prepareRule() > open file error: %v", err) + cclog.Warnf("prepareRule() > open file error: %v", err) return err } if err := json.NewDecoder(bytes.NewReader(b)).Decode(&t.parameters); err != nil { - log.Warn("Error while decoding parameters.json") + cclog.Warn("Error while decoding parameters.json") return err } @@ -190,7 +190,7 @@ func (t *JobClassTagger) Register() error { err := t.initParameters() if err != nil { - log.Warnf("error reading parameters.json: %v", err) + cclog.Warnf("error reading parameters.json: %v", err) return err } @@ -203,11 +203,11 @@ func (t *JobClassTagger) Register() error { fns := fn.Name() if fns != "parameters.json" { filename := fmt.Sprintf("jobclasses/%s", fns) - log.Infof("Process: %s", fns) + cclog.Infof("Process: %s", fns) b, err := jobclassFiles.ReadFile(filename) if err != nil { - log.Warnf("prepareRule() > open file error: %v", err) + cclog.Warnf("prepareRule() > open file error: %v", err) return err } t.prepareRule(b, fns) @@ -216,7 +216,7 @@ func (t *JobClassTagger) Register() error { if util.CheckFileExists(t.cfgPath) { t.EventCallback() - log.Infof("Setup file watch for %s", t.cfgPath) + cclog.Infof("Setup file watch for %s", t.cfgPath) util.AddListener(t.cfgPath, t) } @@ -227,16 +227,16 @@ func (t *JobClassTagger) Match(job *schema.Job) { r := repository.GetJobRepository() jobstats, err := archive.GetStatistics(job) metricsList := archive.GetMetricConfigSubCluster(job.Cluster, job.SubCluster) - log.Infof("Enter match rule with %d rules for job %d", len(t.rules), job.JobID) + cclog.Infof("Enter match rule with %d rules for job %d", len(t.rules), job.JobID) if err != nil { - log.Errorf("job classification failed for job %d: %#v", job.JobID, err) + cclog.Errorf("job classification failed for job %d: %#v", job.JobID, err) return } for tag, ri := range t.rules { env := make(map[string]any) maps.Copy(env, ri.env) - log.Infof("Try to match rule %s for job %d", tag, job.JobID) + cclog.Infof("Try to match rule %s for job %d", tag, job.JobID) // Initialize environment env["job"] = map[string]any{ @@ -253,7 +253,7 @@ func (t *JobClassTagger) Match(job *schema.Job) { for _, m := range ri.metrics { stats, ok := jobstats[m] if !ok { - log.Errorf("job classification failed for job %d: missing metric '%s'", job.JobID, m) + cclog.Errorf("job classification failed for job %d: missing metric '%s'", job.JobID, m) return } env[m] = map[string]any{ @@ -273,11 +273,11 @@ func (t *JobClassTagger) Match(job *schema.Job) { for _, r := range ri.requirements { ok, err := expr.Run(r, env) if err != nil { - log.Errorf("error running requirement for rule %s: %#v", tag, err) + cclog.Errorf("error running requirement for rule %s: %#v", tag, err) return } if !ok.(bool) { - log.Infof("requirement for rule %s not met", tag) + cclog.Infof("requirement for rule %s not met", tag) return } } @@ -286,7 +286,7 @@ func (t *JobClassTagger) Match(job *schema.Job) { for _, v := range ri.variables { value, err := expr.Run(v.expr, env) if err != nil { - log.Errorf("error running rule %s: %#v", tag, err) + cclog.Errorf("error running rule %s: %#v", tag, err) return } env[v.name] = value @@ -296,11 +296,11 @@ func (t *JobClassTagger) Match(job *schema.Job) { match, err := expr.Run(ri.rule, env) if err != nil { - log.Errorf("error running rule %s: %#v", tag, err) + cclog.Errorf("error running rule %s: %#v", tag, err) return } if match.(bool) { - log.Info("Rule matches!") + cclog.Info("Rule matches!") id := *job.ID if !r.HasTag(id, t.tagType, tag) { r.AddTagOrCreateDirect(id, t.tagType, tag) @@ -309,14 +309,14 @@ func (t *JobClassTagger) Match(job *schema.Job) { // process hint template var msg bytes.Buffer if err := ri.hint.Execute(&msg, env); err != nil { - log.Errorf("Template error: %s", err.Error()) + cclog.Errorf("Template error: %s", err.Error()) return } // FIXME: Handle case where multiple tags apply r.UpdateMetadata(job, "message", msg.String()) } else { - log.Info("Rule does not match!") + cclog.Info("Rule does not match!") } } } diff --git a/internal/tagger/detectApp.go b/internal/tagger/detectApp.go index d82db1a..c06fb72 100644 --- a/internal/tagger/detectApp.go +++ b/internal/tagger/detectApp.go @@ -15,9 +15,9 @@ import ( "strings" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/util" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" ) //go:embed apps/* @@ -53,15 +53,15 @@ func (t *AppTagger) EventMatch(s string) bool { func (t *AppTagger) EventCallback() { files, err := os.ReadDir(t.cfgPath) if err != nil { - log.Fatal(err) + cclog.Fatal(err) } for _, fn := range files { fns := fn.Name() - log.Debugf("Process: %s", fns) + cclog.Debugf("Process: %s", fns) f, err := os.Open(fmt.Sprintf("%s/%s", t.cfgPath, fns)) if err != nil { - log.Errorf("error opening app file %s: %#v", fns, err) + cclog.Errorf("error opening app file %s: %#v", fns, err) } t.scanApp(f, fns) } @@ -78,7 +78,7 @@ func (t *AppTagger) Register() error { t.apps = make(map[string]appInfo, 0) for _, fn := range files { fns := fn.Name() - log.Debugf("Process: %s", fns) + cclog.Debugf("Process: %s", fns) f, err := appFiles.Open(fmt.Sprintf("apps/%s", fns)) if err != nil { return fmt.Errorf("error opening app file %s: %#v", fns, err) @@ -89,7 +89,7 @@ func (t *AppTagger) Register() error { if util.CheckFileExists(t.cfgPath) { t.EventCallback() - log.Infof("Setup file watch for %s", t.cfgPath) + cclog.Infof("Setup file watch for %s", t.cfgPath) util.AddListener(t.cfgPath, t) } @@ -100,7 +100,7 @@ func (t *AppTagger) Match(job *schema.Job) { r := repository.GetJobRepository() metadata, err := r.FetchMetadata(job) if err != nil { - log.Infof("Cannot fetch metadata for job: %d on %s", job.JobID, job.Cluster) + cclog.Infof("Cannot fetch metadata for job: %d on %s", job.JobID, job.Cluster) return } @@ -122,6 +122,6 @@ func (t *AppTagger) Match(job *schema.Job) { } } } else { - log.Infof("Cannot extract job script for job: %d on %s", job.JobID, job.Cluster) + cclog.Infof("Cannot extract job script for job: %d on %s", job.JobID, job.Cluster) } } diff --git a/internal/tagger/detectApp_test.go b/internal/tagger/detectApp_test.go index 4fb52d9..78f5f76 100644 --- a/internal/tagger/detectApp_test.go +++ b/internal/tagger/detectApp_test.go @@ -1,5 +1,5 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package tagger @@ -8,12 +8,12 @@ import ( "testing" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" ) func setup(tb testing.TB) *repository.JobRepository { tb.Helper() - log.Init("warn", true) + cclog.Init("warn", true) dbfile := "../repository/testdata/job.db" err := repository.MigrateDB("sqlite3", dbfile) noErr(tb, err) diff --git a/internal/tagger/tagger.go b/internal/tagger/tagger.go index 04edd49..af0ba19 100644 --- a/internal/tagger/tagger.go +++ b/internal/tagger/tagger.go @@ -1,5 +1,5 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package tagger @@ -8,8 +8,8 @@ import ( "sync" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) type Tagger interface { @@ -66,21 +66,21 @@ func RunTaggers() error { r := repository.GetJobRepository() jl, err := r.GetJobList() if err != nil { - log.Errorf("Error while getting job list %s", err) + cclog.Errorf("Error while getting job list %s", err) return err } for _, id := range jl { job, err := r.FindByIdDirect(id) if err != nil { - log.Errorf("Error while getting job %s", err) + cclog.Errorf("Error while getting job %s", err) return err } for _, tagger := range jobTagger.startTaggers { tagger.Match(job) } for _, tagger := range jobTagger.stopTaggers { - log.Infof("Run stop tagger for job %d", job.ID) + cclog.Infof("Run stop tagger for job %d", job.ID) tagger.Match(job) } } diff --git a/internal/tagger/tagger_test.go b/internal/tagger/tagger_test.go index 057ca17..a94c20f 100644 --- a/internal/tagger/tagger_test.go +++ b/internal/tagger/tagger_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package tagger @@ -8,7 +8,7 @@ import ( "testing" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" ) func TestInit(t *testing.T) { diff --git a/internal/taskManager/commitJobService.go b/internal/taskManager/commitJobService.go index c60acb3..5489007 100644 --- a/internal/taskManager/commitJobService.go +++ b/internal/taskManager/commitJobService.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package taskManager @@ -9,7 +9,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/go-co-op/gocron/v2" ) @@ -21,15 +21,15 @@ func RegisterCommitJobService() { frequency = "2m" } d, _ := time.ParseDuration(frequency) - log.Infof("Register commitJob service with %s interval", frequency) + cclog.Infof("Register commitJob service with %s interval", frequency) s.NewJob(gocron.DurationJob(d), gocron.NewTask( func() { start := time.Now() - log.Printf("Jobcache sync started at %s", start.Format(time.RFC3339)) + cclog.Printf("Jobcache sync started at %s", start.Format(time.RFC3339)) jobs, _ := jobRepo.SyncJobs() repository.CallJobStartHooks(jobs) - log.Printf("Jobcache sync and job callbacks are done and took %s", time.Since(start)) + cclog.Printf("Jobcache sync and job callbacks are done and took %s", time.Since(start)) })) } diff --git a/internal/taskManager/compressionService.go b/internal/taskManager/compressionService.go index 005a5bb..e96115f 100644 --- a/internal/taskManager/compressionService.go +++ b/internal/taskManager/compressionService.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package taskManager @@ -8,13 +8,13 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/go-co-op/gocron/v2" ) func RegisterCompressionService(compressOlderThan int) { - log.Info("Register compression service") + cclog.Info("Register compression service") s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(05, 0, 0))), gocron.NewTask( @@ -26,7 +26,7 @@ func RegisterCompressionService(compressOlderThan int) { startTime := time.Now().Unix() - int64(compressOlderThan*24*3600) lastTime := ar.CompressLast(startTime) if startTime == lastTime { - log.Info("Compression Service - Complete archive run") + cclog.Info("Compression Service - Complete archive run") jobs, err = jobRepo.FindJobsBetween(0, startTime) } else { @@ -34,7 +34,7 @@ func RegisterCompressionService(compressOlderThan int) { } if err != nil { - log.Warnf("Error while looking for compression jobs: %v", err) + cclog.Warnf("Error while looking for compression jobs: %v", err) } ar.Compress(jobs) })) diff --git a/internal/taskManager/ldapSyncService.go b/internal/taskManager/ldapSyncService.go index a998aa8..27212e8 100644 --- a/internal/taskManager/ldapSyncService.go +++ b/internal/taskManager/ldapSyncService.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package taskManager @@ -8,29 +8,29 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/auth" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/go-co-op/gocron/v2" ) func RegisterLdapSyncService(ds string) { interval, err := parseDuration(ds) if err != nil { - log.Warnf("Could not parse duration for sync interval: %v", + cclog.Warnf("Could not parse duration for sync interval: %v", ds) return } auth := auth.GetAuthInstance() - log.Info("Register LDAP sync service") + cclog.Info("Register LDAP sync service") s.NewJob(gocron.DurationJob(interval), gocron.NewTask( func() { t := time.Now() - log.Printf("ldap sync started at %s", t.Format(time.RFC3339)) + cclog.Printf("ldap sync started at %s", t.Format(time.RFC3339)) if err := auth.LdapAuth.Sync(); err != nil { - log.Errorf("ldap sync failed: %s", err.Error()) + cclog.Errorf("ldap sync failed: %s", err.Error()) } - log.Print("ldap sync done") + cclog.Print("ldap sync done") })) } diff --git a/internal/taskManager/retentionService.go b/internal/taskManager/retentionService.go index 502f890..440c369 100644 --- a/internal/taskManager/retentionService.go +++ b/internal/taskManager/retentionService.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package taskManager @@ -8,12 +8,12 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/go-co-op/gocron/v2" ) func RegisterRetentionDeleteService(age int, includeDB bool) { - log.Info("Register retention delete service") + cclog.Info("Register retention delete service") s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))), gocron.NewTask( @@ -21,26 +21,26 @@ func RegisterRetentionDeleteService(age int, includeDB bool) { startTime := time.Now().Unix() - int64(age*24*3600) jobs, err := jobRepo.FindJobsBetween(0, startTime) if err != nil { - log.Warnf("Error while looking for retention jobs: %s", err.Error()) + cclog.Warnf("Error while looking for retention jobs: %s", err.Error()) } archive.GetHandle().CleanUp(jobs) if includeDB { cnt, err := jobRepo.DeleteJobsBefore(startTime) if err != nil { - log.Errorf("Error while deleting retention jobs from db: %s", err.Error()) + cclog.Errorf("Error while deleting retention jobs from db: %s", err.Error()) } else { - log.Infof("Retention: Removed %d jobs from db", cnt) + cclog.Infof("Retention: Removed %d jobs from db", cnt) } if err = jobRepo.Optimize(); err != nil { - log.Errorf("Error occured in db optimization: %s", err.Error()) + cclog.Errorf("Error occured in db optimization: %s", err.Error()) } } })) } func RegisterRetentionMoveService(age int, includeDB bool, location string) { - log.Info("Register retention move service") + cclog.Info("Register retention move service") s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))), gocron.NewTask( @@ -48,19 +48,19 @@ func RegisterRetentionMoveService(age int, includeDB bool, location string) { startTime := time.Now().Unix() - int64(age*24*3600) jobs, err := jobRepo.FindJobsBetween(0, startTime) if err != nil { - log.Warnf("Error while looking for retention jobs: %s", err.Error()) + cclog.Warnf("Error while looking for retention jobs: %s", err.Error()) } archive.GetHandle().Move(jobs, location) if includeDB { cnt, err := jobRepo.DeleteJobsBefore(startTime) if err != nil { - log.Errorf("Error while deleting retention jobs from db: %v", err) + cclog.Errorf("Error while deleting retention jobs from db: %v", err) } else { - log.Infof("Retention: Removed %d jobs from db", cnt) + cclog.Infof("Retention: Removed %d jobs from db", cnt) } if err = jobRepo.Optimize(); err != nil { - log.Errorf("Error occured in db optimization: %v", err) + cclog.Errorf("Error occured in db optimization: %v", err) } } })) diff --git a/internal/taskManager/stopJobsExceedTime.go b/internal/taskManager/stopJobsExceedTime.go index d97813a..a3743f6 100644 --- a/internal/taskManager/stopJobsExceedTime.go +++ b/internal/taskManager/stopJobsExceedTime.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package taskManager @@ -8,19 +8,19 @@ import ( "runtime" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/go-co-op/gocron/v2" ) func RegisterStopJobsExceedTime() { - log.Info("Register undead jobs service") + cclog.Info("Register undead jobs service") s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(03, 0, 0))), gocron.NewTask( func() { err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime) if err != nil { - log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error()) + cclog.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error()) } runtime.GC() })) diff --git a/internal/taskManager/taskManager.go b/internal/taskManager/taskManager.go index 7d9a3a2..5f51040 100644 --- a/internal/taskManager/taskManager.go +++ b/internal/taskManager/taskManager.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package taskManager @@ -10,8 +10,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" "github.com/go-co-op/gocron/v2" ) @@ -23,13 +23,13 @@ var ( func parseDuration(s string) (time.Duration, error) { interval, err := time.ParseDuration(s) if err != nil { - log.Warnf("Could not parse duration for sync interval: %v", + cclog.Warnf("Could not parse duration for sync interval: %v", s) return 0, err } if interval == 0 { - log.Info("TaskManager: Sync interval is zero") + cclog.Info("TaskManager: Sync interval is zero") } return interval, nil @@ -40,7 +40,7 @@ func Start() { jobRepo = repository.GetJobRepository() s, err = gocron.NewScheduler() if err != nil { - log.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error()) + cclog.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error()) } if config.Keys.StopJobsExceedingWalltime > 0 { @@ -54,7 +54,7 @@ func Start() { cfg.Retention.IncludeDB = true if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil { - log.Warn("Error while unmarshaling raw config json") + cclog.Warn("Error while unmarshaling raw config json") } switch cfg.Retention.Policy { diff --git a/internal/taskManager/updateDurationService.go b/internal/taskManager/updateDurationService.go index 81d799e..70ec506 100644 --- a/internal/taskManager/updateDurationService.go +++ b/internal/taskManager/updateDurationService.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package taskManager @@ -8,7 +8,7 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" "github.com/go-co-op/gocron/v2" ) @@ -20,14 +20,14 @@ func RegisterUpdateDurationWorker() { frequency = "5m" } d, _ := time.ParseDuration(frequency) - log.Infof("Register Duration Update service with %s interval", frequency) + cclog.Infof("Register Duration Update service with %s interval", frequency) s.NewJob(gocron.DurationJob(d), gocron.NewTask( func() { start := time.Now() - log.Printf("Update duration started at %s", start.Format(time.RFC3339)) + cclog.Printf("Update duration started at %s", start.Format(time.RFC3339)) jobRepo.UpdateDuration() - log.Printf("Update duration is done and took %s", time.Since(start)) + cclog.Printf("Update duration is done and took %s", time.Since(start)) })) } diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index f417ad4..41c5837 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package taskManager @@ -12,8 +12,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" sq "github.com/Masterminds/squirrel" "github.com/go-co-op/gocron/v2" ) @@ -26,7 +26,7 @@ func RegisterFootprintWorker() { frequency = "10m" } d, _ := time.ParseDuration(frequency) - log.Infof("Register Footprint Update service with %s interval", frequency) + cclog.Infof("Register Footprint Update service with %s interval", frequency) s.NewJob(gocron.DurationJob(d), gocron.NewTask( @@ -35,7 +35,7 @@ func RegisterFootprintWorker() { c := 0 ce := 0 cl := 0 - log.Printf("Update Footprints started at %s", s.Format(time.RFC3339)) + cclog.Printf("Update Footprints started at %s", s.Format(time.RFC3339)) for _, cluster := range archive.Clusters { s_cluster := time.Now() @@ -54,21 +54,21 @@ func RegisterFootprintWorker() { repo, err := metricdata.GetMetricDataRepo(cluster.Name) if err != nil { - log.Errorf("no metric data repository configured for '%s'", cluster.Name) + cclog.Errorf("no metric data repository configured for '%s'", cluster.Name) continue } pendingStatements := []sq.UpdateBuilder{} for _, job := range jobs { - log.Debugf("Prepare job %d", job.JobID) + cclog.Debugf("Prepare job %d", job.JobID) cl++ s_job := time.Now() jobStats, err := repo.LoadStats(job, allMetrics, context.Background()) if err != nil { - log.Errorf("error wile loading job data stats for footprint update: %v", err) + cclog.Errorf("error wile loading job data stats for footprint update: %v", err) ce++ continue } @@ -106,26 +106,26 @@ func RegisterFootprintWorker() { stmt := sq.Update("job") stmt, err = jobRepo.UpdateFootprint(stmt, job) if err != nil { - log.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error()) + cclog.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error()) ce++ continue } stmt = stmt.Where("job.id = ?", job.ID) pendingStatements = append(pendingStatements, stmt) - log.Debugf("Job %d took %s", job.JobID, time.Since(s_job)) + cclog.Debugf("Job %d took %s", job.JobID, time.Since(s_job)) } t, err := jobRepo.TransactionInit() if err != nil { - log.Errorf("failed TransactionInit %v", err) - log.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name) + cclog.Errorf("failed TransactionInit %v", err) + cclog.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name) ce += len(pendingStatements) } else { for _, ps := range pendingStatements { query, args, err := ps.ToSql() if err != nil { - log.Errorf("failed in ToSQL conversion: %v", err) + cclog.Errorf("failed in ToSQL conversion: %v", err) ce++ } else { // args...: Footprint-JSON, Energyfootprint-JSON, TotalEnergy, JobID @@ -135,8 +135,8 @@ func RegisterFootprintWorker() { } jobRepo.TransactionEnd(t) } - log.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster)) + cclog.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster)) } - log.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s)) + cclog.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s)) })) } diff --git a/internal/util/array.go b/internal/util/array.go deleted file mode 100644 index 19bdb53..0000000 --- a/internal/util/array.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package util - -func Contains[T comparable](items []T, item T) bool { - for _, v := range items { - if v == item { - return true - } - } - return false -} diff --git a/internal/util/compress.go b/internal/util/compress.go deleted file mode 100644 index 4a901ae..0000000 --- a/internal/util/compress.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package util - -import ( - "compress/gzip" - "io" - "os" - - "github.com/ClusterCockpit/cc-backend/pkg/log" -) - -func CompressFile(fileIn string, fileOut string) error { - originalFile, err := os.Open(fileIn) - if err != nil { - log.Errorf("CompressFile() error: %v", err) - return err - } - defer originalFile.Close() - - gzippedFile, err := os.Create(fileOut) - - if err != nil { - log.Errorf("CompressFile() error: %v", err) - return err - } - defer gzippedFile.Close() - - gzipWriter := gzip.NewWriter(gzippedFile) - defer gzipWriter.Close() - - _, err = io.Copy(gzipWriter, originalFile) - if err != nil { - log.Errorf("CompressFile() error: %v", err) - return err - } - gzipWriter.Flush() - if err := os.Remove(fileIn); err != nil { - log.Errorf("CompressFile() error: %v", err) - return err - } - - return nil -} - -func UncompressFile(fileIn string, fileOut string) error { - gzippedFile, err := os.Open(fileIn) - if err != nil { - log.Errorf("UncompressFile() error: %v", err) - return err - } - defer gzippedFile.Close() - - gzipReader, _ := gzip.NewReader(gzippedFile) - defer gzipReader.Close() - - uncompressedFile, err := os.Create(fileOut) - if err != nil { - log.Errorf("UncompressFile() error: %v", err) - return err - } - defer uncompressedFile.Close() - - _, err = io.Copy(uncompressedFile, gzipReader) - if err != nil { - log.Errorf("UncompressFile() error: %v", err) - return err - } - if err := os.Remove(fileIn); err != nil { - log.Errorf("UncompressFile() error: %v", err) - return err - } - - return nil -} diff --git a/internal/util/copy.go b/internal/util/copy.go deleted file mode 100644 index c6896c4..0000000 --- a/internal/util/copy.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package util - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -func CopyFile(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - defer func() { - if e := out.Close(); e != nil { - err = e - } - }() - - _, err = io.Copy(out, in) - if err != nil { - return - } - - err = out.Sync() - if err != nil { - return - } - - si, err := os.Stat(src) - if err != nil { - return - } - err = os.Chmod(dst, si.Mode()) - if err != nil { - return - } - - return -} - -func CopyDir(src string, dst string) (err error) { - src = filepath.Clean(src) - dst = filepath.Clean(dst) - - si, err := os.Stat(src) - if err != nil { - return err - } - if !si.IsDir() { - return fmt.Errorf("source is not a directory") - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return - } - if err == nil { - return fmt.Errorf("destination already exists") - } - - err = os.MkdirAll(dst, si.Mode()) - if err != nil { - return - } - - entries, err := ioutil.ReadDir(src) - if err != nil { - return - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - - if entry.IsDir() { - err = CopyDir(srcPath, dstPath) - if err != nil { - return - } - } else { - // Skip symlinks. - if entry.Mode()&os.ModeSymlink != 0 { - continue - } - - err = CopyFile(srcPath, dstPath) - if err != nil { - return - } - } - } - - return -} diff --git a/internal/util/diskUsage.go b/internal/util/diskUsage.go deleted file mode 100644 index 53665c5..0000000 --- a/internal/util/diskUsage.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package util - -import ( - "os" - - "github.com/ClusterCockpit/cc-backend/pkg/log" -) - -func DiskUsage(dirpath string) float64 { - var size int64 - - dir, err := os.Open(dirpath) - if err != nil { - log.Errorf("DiskUsage() error: %v", err) - return 0 - } - defer dir.Close() - - files, err := dir.Readdir(-1) - if err != nil { - log.Errorf("DiskUsage() error: %v", err) - return 0 - } - - for _, file := range files { - size += file.Size() - } - - return float64(size) * 1e-6 -} diff --git a/internal/util/fstat.go b/internal/util/fstat.go deleted file mode 100644 index 54e1154..0000000 --- a/internal/util/fstat.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package util - -import ( - "errors" - "os" - - "github.com/ClusterCockpit/cc-backend/pkg/log" -) - -func CheckFileExists(filePath string) bool { - _, err := os.Stat(filePath) - return !errors.Is(err, os.ErrNotExist) -} - -func GetFilesize(filePath string) int64 { - fileInfo, err := os.Stat(filePath) - if err != nil { - log.Errorf("Error on Stat %s: %v", filePath, err) - return 0 - } - return fileInfo.Size() -} - -func GetFilecount(path string) int { - files, err := os.ReadDir(path) - if err != nil { - log.Errorf("Error on ReadDir %s: %v", path, err) - return 0 - } - - return len(files) -} diff --git a/internal/util/fswatcher.go b/internal/util/fswatcher.go deleted file mode 100644 index 5d13462..0000000 --- a/internal/util/fswatcher.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package util - -import ( - "sync" - - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/fsnotify/fsnotify" -) - -type Listener interface { - EventCallback() - EventMatch(event string) bool -} - -var ( - initOnce sync.Once - w *fsnotify.Watcher - listeners []Listener -) - -func AddListener(path string, l Listener) { - var err error - - initOnce.Do(func() { - var err error - w, err = fsnotify.NewWatcher() - if err != nil { - log.Error("creating a new watcher: %w", err) - } - listeners = make([]Listener, 0) - - go watchLoop(w) - }) - - listeners = append(listeners, l) - err = w.Add(path) - if err != nil { - log.Warnf("%q: %s", path, err) - } -} - -func FsWatcherShutdown() { - if w != nil { - w.Close() - } -} - -func watchLoop(w *fsnotify.Watcher) { - for { - select { - // Read from Errors. - case err, ok := <-w.Errors: - if !ok { // Channel was closed (i.e. Watcher.Close() was called). - return - } - log.Errorf("watch event loop: %s", err) - // Read from Events. - case e, ok := <-w.Events: - if !ok { // Channel was closed (i.e. Watcher.Close() was called). - return - } - - log.Infof("Event %s", e) - for _, l := range listeners { - if l.EventMatch(e.String()) { - l.EventCallback() - } - } - } - } -} diff --git a/internal/util/statistics.go b/internal/util/statistics.go deleted file mode 100644 index d75224f..0000000 --- a/internal/util/statistics.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package util - -import ( - "golang.org/x/exp/constraints" - - "fmt" - "math" - "sort" -) - -func Min[T constraints.Ordered](a, b T) T { - if a < b { - return a - } - return b -} - -func Max[T constraints.Ordered](a, b T) T { - if a > b { - return a - } - return b -} - -func sortedCopy(input []float64) []float64 { - sorted := make([]float64, len(input)) - copy(sorted, input) - sort.Float64s(sorted) - return sorted -} - -func Mean(input []float64) (float64, error) { - if len(input) == 0 { - return math.NaN(), fmt.Errorf("input array is empty: %#v", input) - } - sum := 0.0 - for _, n := range input { - sum += n - } - return sum / float64(len(input)), nil -} - -func Median(input []float64) (median float64, err error) { - c := sortedCopy(input) - // Even numbers: add the two middle numbers, divide by two (use mean function) - // Odd numbers: Use the middle number - l := len(c) - if l == 0 { - return math.NaN(), fmt.Errorf("input array is empty: %#v", input) - } else if l%2 == 0 { - median, _ = Mean(c[l/2-1 : l/2+1]) - } else { - median = c[l/2] - } - return median, nil -} diff --git a/internal/util/util_test.go b/internal/util/util_test.go deleted file mode 100644 index d945c96..0000000 --- a/internal/util/util_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package util_test - -import ( - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/ClusterCockpit/cc-backend/internal/util" -) - -func TestCheckFileExists(t *testing.T) { - tmpdir := t.TempDir() - if !util.CheckFileExists(tmpdir) { - t.Fatal("expected true, got false") - } - - filePath := filepath.Join(tmpdir, "version.txt") - - if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil { - t.Fatal(err) - } - if !util.CheckFileExists(filePath) { - t.Fatal("expected true, got false") - } - - filePath = filepath.Join(tmpdir, "version-test.txt") - if util.CheckFileExists(filePath) { - t.Fatal("expected false, got true") - } -} - -func TestGetFileSize(t *testing.T) { - tmpdir := t.TempDir() - filePath := filepath.Join(tmpdir, "data.json") - - if s := util.GetFilesize(filePath); s > 0 { - t.Fatalf("expected 0, got %d", s) - } - - if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil { - t.Fatal(err) - } - if s := util.GetFilesize(filePath); s == 0 { - t.Fatal("expected not 0, got 0") - } -} - -func TestGetFileCount(t *testing.T) { - tmpdir := t.TempDir() - - if c := util.GetFilecount(tmpdir); c != 0 { - t.Fatalf("expected 0, got %d", c) - } - - filePath := filepath.Join(tmpdir, "data-1.json") - if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil { - t.Fatal(err) - } - filePath = filepath.Join(tmpdir, "data-2.json") - if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil { - t.Fatal(err) - } - if c := util.GetFilecount(tmpdir); c != 2 { - t.Fatalf("expected 2, got %d", c) - } - - if c := util.GetFilecount(filePath); c != 0 { - t.Fatalf("expected 0, got %d", c) - } -} diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 318d6b4..f69cde3 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive @@ -10,9 +10,9 @@ import ( "maps" "sync" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/lrucache" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/lrucache" + "github.com/ClusterCockpit/cc-lib/schema" ) const Version uint64 = 2 @@ -75,7 +75,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error { } if err = json.Unmarshal(rawConfig, &cfg); err != nil { - log.Warn("Error while unmarshaling raw config json") + cclog.Warn("Error while unmarshaling raw config json") return } @@ -91,10 +91,10 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error { var version uint64 version, err = ar.Init(rawConfig) if err != nil { - log.Errorf("Error while initializing archiveBackend: %s", err.Error()) + cclog.Errorf("Error while initializing archiveBackend: %s", err.Error()) return } - log.Infof("Load archive version %d", version) + cclog.Infof("Load archive version %d", version) err = initClusterConfig() }) @@ -114,7 +114,7 @@ func LoadAveragesFromArchive( ) error { metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) + cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } @@ -137,7 +137,7 @@ func LoadStatsFromArchive( data := make(map[string]schema.MetricStatistics, len(metrics)) metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) + cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return data, err } @@ -166,7 +166,7 @@ func LoadScopedStatsFromArchive( ) (schema.ScopedJobStats, error) { data, err := ar.LoadJobStats(job) if err != nil { - log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error()) + cclog.Errorf("Error while loading job stats from archiveBackend: %s", err.Error()) return nil, err } @@ -176,7 +176,7 @@ func LoadScopedStatsFromArchive( func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) { metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) + cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return nil, err } @@ -195,7 +195,7 @@ func UpdateMetadata(job *schema.Job, metadata map[string]string) error { jobMeta, err := ar.LoadJobMeta(job) if err != nil { - log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) + cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } @@ -216,7 +216,7 @@ func UpdateTags(job *schema.Job, tags []*schema.Tag) error { jobMeta, err := ar.LoadJobMeta(job) if err != nil { - log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) + cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index ba53e38..715e6ea 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive_test @@ -10,9 +10,9 @@ import ( "path/filepath" "testing" - "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" ) var jobs []*schema.Job diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index 04d1349..51b89b1 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive @@ -8,8 +8,8 @@ import ( "errors" "fmt" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) var ( @@ -27,7 +27,7 @@ func initClusterConfig() error { cluster, err := ar.LoadClusterCfg(c) if err != nil { - log.Warnf("Error while loading cluster config for cluster '%v'", c) + cclog.Warnf("Error while loading cluster config for cluster '%v'", c) return err } diff --git a/pkg/archive/clusterConfig_test.go b/pkg/archive/clusterConfig_test.go index a73f22f..3613017 100644 --- a/pkg/archive/clusterConfig_test.go +++ b/pkg/archive/clusterConfig_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive_test diff --git a/pkg/archive/fsBackend.go b/pkg/archive/fsBackend.go index a90c092..beccdfa 100644 --- a/pkg/archive/fsBackend.go +++ b/pkg/archive/fsBackend.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive @@ -21,9 +21,9 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/internal/util" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" "github.com/santhosh-tekuri/jsonschema/v5" ) @@ -68,7 +68,7 @@ func getPath( func loadJobMeta(filename string) (*schema.Job, error) { b, err := os.ReadFile(filename) if err != nil { - log.Errorf("loadJobMeta() > open file error: %v", err) + cclog.Errorf("loadJobMeta() > open file error: %v", err) return nil, err } if config.Keys.Validate { @@ -83,7 +83,7 @@ func loadJobMeta(filename string) (*schema.Job, error) { func loadJobData(filename string, isCompressed bool) (schema.JobData, error) { f, err := os.Open(filename) if err != nil { - log.Errorf("fsBackend LoadJobData()- %v", err) + cclog.Errorf("fsBackend LoadJobData()- %v", err) return nil, err } defer f.Close() @@ -91,7 +91,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) { if isCompressed { r, err := gzip.NewReader(f) if err != nil { - log.Errorf(" %v", err) + cclog.Errorf(" %v", err) return nil, err } defer r.Close() @@ -116,7 +116,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) { func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) { f, err := os.Open(filename) if err != nil { - log.Errorf("fsBackend LoadJobStats()- %v", err) + cclog.Errorf("fsBackend LoadJobStats()- %v", err) return nil, err } defer f.Close() @@ -124,7 +124,7 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er if isCompressed { r, err := gzip.NewReader(f) if err != nil { - log.Errorf(" %v", err) + cclog.Errorf(" %v", err) return nil, err } defer r.Close() @@ -149,25 +149,25 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) { var config FsArchiveConfig if err := json.Unmarshal(rawConfig, &config); err != nil { - log.Warnf("Init() > Unmarshal error: %#v", err) + cclog.Warnf("Init() > Unmarshal error: %#v", err) return 0, err } if config.Path == "" { err := fmt.Errorf("Init() : empty config.Path") - log.Errorf("Init() > config.Path error: %v", err) + cclog.Errorf("Init() > config.Path error: %v", err) return 0, err } fsa.path = config.Path b, err := os.ReadFile(filepath.Join(fsa.path, "version.txt")) if err != nil { - log.Warnf("fsBackend Init() - %v", err) + cclog.Warnf("fsBackend Init() - %v", err) return 0, err } version, err := strconv.ParseUint(strings.TrimSuffix(string(b), "\n"), 10, 64) if err != nil { - log.Errorf("fsBackend Init()- %v", err) + cclog.Errorf("fsBackend Init()- %v", err) return 0, err } @@ -177,7 +177,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) { entries, err := os.ReadDir(fsa.path) if err != nil { - log.Errorf("Init() > ReadDir() error: %v", err) + cclog.Errorf("Init() > ReadDir() error: %v", err) return 0, err } @@ -195,7 +195,7 @@ func (fsa *FsArchive) Info() { fmt.Printf("Job archive %s\n", fsa.path) clusters, err := os.ReadDir(fsa.path) if err != nil { - log.Fatalf("Reading clusters failed: %s", err.Error()) + cclog.Fatalf("Reading clusters failed: %s", err.Error()) } ci := make(map[string]*clusterInfo) @@ -209,7 +209,7 @@ func (fsa *FsArchive) Info() { ci[cc] = &clusterInfo{dateFirst: time.Now().Unix()} lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name())) if err != nil { - log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error()) } for _, lvl1Dir := range lvl1Dirs { @@ -218,14 +218,14 @@ func (fsa *FsArchive) Info() { } lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name())) if err != nil { - log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error()) } for _, lvl2Dir := range lvl2Dirs { dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name()) startTimeDirs, err := os.ReadDir(dirpath) if err != nil { - log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error()) } for _, startTimeDir := range startTimeDirs { @@ -233,7 +233,7 @@ func (fsa *FsArchive) Info() { ci[cc].numJobs++ startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64) if err != nil { - log.Fatalf("Cannot parse starttime: %s", err.Error()) + cclog.Fatalf("Cannot parse starttime: %s", err.Error()) } ci[cc].dateFirst = util.Min(ci[cc].dateFirst, startTime) ci[cc].dateLast = util.Max(ci[cc].dateLast, startTime) @@ -278,7 +278,7 @@ func (fsa *FsArchive) Clean(before int64, after int64) { clusters, err := os.ReadDir(fsa.path) if err != nil { - log.Fatalf("Reading clusters failed: %s", err.Error()) + cclog.Fatalf("Reading clusters failed: %s", err.Error()) } for _, cluster := range clusters { @@ -288,7 +288,7 @@ func (fsa *FsArchive) Clean(before int64, after int64) { lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name())) if err != nil { - log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error()) } for _, lvl1Dir := range lvl1Dirs { @@ -297,33 +297,33 @@ func (fsa *FsArchive) Clean(before int64, after int64) { } lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name())) if err != nil { - log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error()) } for _, lvl2Dir := range lvl2Dirs { dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name()) startTimeDirs, err := os.ReadDir(dirpath) if err != nil { - log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error()) } for _, startTimeDir := range startTimeDirs { if startTimeDir.IsDir() { startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64) if err != nil { - log.Fatalf("Cannot parse starttime: %s", err.Error()) + cclog.Fatalf("Cannot parse starttime: %s", err.Error()) } if startTime < before || startTime > after { if err := os.RemoveAll(filepath.Join(dirpath, startTimeDir.Name())); err != nil { - log.Errorf("JobArchive Cleanup() error: %v", err) + cclog.Errorf("JobArchive Cleanup() error: %v", err) } } } } if util.GetFilecount(dirpath) == 0 { if err := os.Remove(dirpath); err != nil { - log.Errorf("JobArchive Clean() error: %v", err) + cclog.Errorf("JobArchive Clean() error: %v", err) } } } @@ -337,16 +337,16 @@ func (fsa *FsArchive) Move(jobs []*schema.Job, path string) { target := getDirectory(job, path) if err := os.MkdirAll(filepath.Clean(filepath.Join(target, "..")), 0777); err != nil { - log.Errorf("JobArchive Move MkDir error: %v", err) + cclog.Errorf("JobArchive Move MkDir error: %v", err) } if err := os.Rename(source, target); err != nil { - log.Errorf("JobArchive Move() error: %v", err) + cclog.Errorf("JobArchive Move() error: %v", err) } parent := filepath.Clean(filepath.Join(source, "..")) if util.GetFilecount(parent) == 0 { if err := os.Remove(parent); err != nil { - log.Errorf("JobArchive Move() error: %v", err) + cclog.Errorf("JobArchive Move() error: %v", err) } } } @@ -357,18 +357,18 @@ func (fsa *FsArchive) CleanUp(jobs []*schema.Job) { for _, job := range jobs { dir := getDirectory(job, fsa.path) if err := os.RemoveAll(dir); err != nil { - log.Errorf("JobArchive Cleanup() error: %v", err) + cclog.Errorf("JobArchive Cleanup() error: %v", err) } parent := filepath.Clean(filepath.Join(dir, "..")) if util.GetFilecount(parent) == 0 { if err := os.Remove(parent); err != nil { - log.Errorf("JobArchive Cleanup() error: %v", err) + cclog.Errorf("JobArchive Cleanup() error: %v", err) } } } - log.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start)) + cclog.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start)) } func (fsa *FsArchive) Compress(jobs []*schema.Job) { @@ -383,24 +383,24 @@ func (fsa *FsArchive) Compress(jobs []*schema.Job) { } } - log.Infof("Compression Service - %d files took %s", cnt, time.Since(start)) + cclog.Infof("Compression Service - %d files took %s", cnt, time.Since(start)) } func (fsa *FsArchive) CompressLast(starttime int64) int64 { filename := filepath.Join(fsa.path, "compress.txt") b, err := os.ReadFile(filename) if err != nil { - log.Errorf("fsBackend Compress - %v", err) + cclog.Errorf("fsBackend Compress - %v", err) os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644) return starttime } last, err := strconv.ParseInt(strings.TrimSuffix(string(b), "\n"), 10, 64) if err != nil { - log.Errorf("fsBackend Compress - %v", err) + cclog.Errorf("fsBackend Compress - %v", err) return starttime } - log.Infof("fsBackend Compress - start %d last %d", starttime, last) + cclog.Infof("fsBackend Compress - start %d last %d", starttime, last) os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644) return last } @@ -437,10 +437,10 @@ func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.Job, error) { func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) { b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json")) if err != nil { - log.Errorf("LoadClusterCfg() > open file error: %v", err) + cclog.Errorf("LoadClusterCfg() > open file error: %v", err) // if config.Keys.Validate { if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil { - log.Warnf("Validate cluster config: %v\n", err) + cclog.Warnf("Validate cluster config: %v\n", err) return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err) } } @@ -453,7 +453,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer { go func() { clustersDir, err := os.ReadDir(fsa.path) if err != nil { - log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error()) + cclog.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error()) } for _, clusterDir := range clustersDir { @@ -462,7 +462,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer { } lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name())) if err != nil { - log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error()) } for _, lvl1Dir := range lvl1Dirs { @@ -473,21 +473,21 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer { lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name())) if err != nil { - log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error()) } for _, lvl2Dir := range lvl2Dirs { dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name()) startTimeDirs, err := os.ReadDir(dirpath) if err != nil { - log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error()) + cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error()) } for _, startTimeDir := range startTimeDirs { if startTimeDir.IsDir() { job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json")) if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) { - log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) + cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) } if loadMetricData { @@ -501,10 +501,10 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer { data, err := loadJobData(filename, isCompressed) if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) { - log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) + cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) } ch <- JobContainer{Meta: job, Data: &data} - log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) + cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) } else { ch <- JobContainer{Meta: job, Data: nil} } @@ -521,15 +521,15 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer { func (fsa *FsArchive) StoreJobMeta(job *schema.Job) error { f, err := os.Create(getPath(job, fsa.path, "meta.json")) if err != nil { - log.Error("Error while creating filepath for meta.json") + cclog.Error("Error while creating filepath for meta.json") return err } if err := EncodeJobMeta(f, job); err != nil { - log.Error("Error while encoding job metadata to meta.json file") + cclog.Error("Error while encoding job metadata to meta.json file") return err } if err := f.Close(); err != nil { - log.Warn("Error while closing meta.json file") + cclog.Warn("Error while closing meta.json file") return err } @@ -546,35 +546,35 @@ func (fsa *FsArchive) ImportJob( ) error { dir := getPath(jobMeta, fsa.path, "") if err := os.MkdirAll(dir, 0777); err != nil { - log.Error("Error while creating job archive path") + cclog.Error("Error while creating job archive path") return err } f, err := os.Create(path.Join(dir, "meta.json")) if err != nil { - log.Error("Error while creating filepath for meta.json") + cclog.Error("Error while creating filepath for meta.json") return err } if err := EncodeJobMeta(f, jobMeta); err != nil { - log.Error("Error while encoding job metadata to meta.json file") + cclog.Error("Error while encoding job metadata to meta.json file") return err } if err := f.Close(); err != nil { - log.Warn("Error while closing meta.json file") + cclog.Warn("Error while closing meta.json file") return err } f, err = os.Create(path.Join(dir, "data.json")) if err != nil { - log.Error("Error while creating filepath for data.json") + cclog.Error("Error while creating filepath for data.json") return err } if err := EncodeJobData(f, jobData); err != nil { - log.Error("Error while encoding job metricdata to data.json file") + cclog.Error("Error while encoding job metricdata to data.json file") return err } if err := f.Close(); err != nil { - log.Warn("Error while closing data.json file") + cclog.Warn("Error while closing data.json file") } return err } diff --git a/pkg/archive/fsBackend_test.go b/pkg/archive/fsBackend_test.go index ddb430a..7b1fe74 100644 --- a/pkg/archive/fsBackend_test.go +++ b/pkg/archive/fsBackend_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive @@ -10,8 +10,8 @@ import ( "path/filepath" "testing" - "github.com/ClusterCockpit/cc-backend/internal/util" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" ) func TestInitEmptyPath(t *testing.T) { diff --git a/pkg/archive/json.go b/pkg/archive/json.go index d3639f5..f0665d4 100644 --- a/pkg/archive/json.go +++ b/pkg/archive/json.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive @@ -9,15 +9,15 @@ import ( "io" "time" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" ) func DecodeJobData(r io.Reader, k string) (schema.JobData, error) { data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) { var d schema.JobData if err := json.NewDecoder(r).Decode(&d); err != nil { - log.Warn("Error while decoding raw job data json") + cclog.Warn("Error while decoding raw job data json") return err, 0, 1000 } @@ -25,7 +25,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) { }) if err, ok := data.(error); ok { - log.Warn("Error in decoded job data set") + cclog.Warn("Error in decoded job data set") return nil, err } @@ -72,7 +72,7 @@ func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) { func DecodeJobMeta(r io.Reader) (*schema.Job, error) { var d schema.Job if err := json.NewDecoder(r).Decode(&d); err != nil { - log.Warn("Error while decoding raw job meta json") + cclog.Warn("Error while decoding raw job meta json") return &d, err } @@ -84,7 +84,7 @@ func DecodeJobMeta(r io.Reader) (*schema.Job, error) { func DecodeCluster(r io.Reader) (*schema.Cluster, error) { var c schema.Cluster if err := json.NewDecoder(r).Decode(&c); err != nil { - log.Warn("Error while decoding raw cluster json") + cclog.Warn("Error while decoding raw cluster json") return &c, err } @@ -96,7 +96,7 @@ func DecodeCluster(r io.Reader) (*schema.Cluster, error) { func EncodeJobData(w io.Writer, d *schema.JobData) error { // Sanitize parameters if err := json.NewEncoder(w).Encode(d); err != nil { - log.Warn("Error while encoding new job data json") + cclog.Warn("Error while encoding new job data json") return err } @@ -106,7 +106,7 @@ func EncodeJobData(w io.Writer, d *schema.JobData) error { func EncodeJobMeta(w io.Writer, d *schema.Job) error { // Sanitize parameters if err := json.NewEncoder(w).Encode(d); err != nil { - log.Warn("Error while encoding new job meta json") + cclog.Warn("Error while encoding new job meta json") return err } diff --git a/pkg/archive/nodelist.go b/pkg/archive/nodelist.go index 26a15d2..23ce8a4 100644 --- a/pkg/archive/nodelist.go +++ b/pkg/archive/nodelist.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" ) type NodeList [][]interface { @@ -51,7 +51,7 @@ func (nl *NodeList) PrintList() []string { if inner["zeroPadded"] == 1 { out = append(out, fmt.Sprintf("%s%0*d", prefix, inner["digits"], i)) } else { - log.Error("node list: only zero-padded ranges are allowed") + cclog.Error("node list: only zero-padded ranges are allowed") } } } @@ -129,7 +129,7 @@ type NLExprIntRange struct { func (nle NLExprIntRange) consume(input string) (next string, ok bool) { if !nle.zeroPadded || nle.digits < 1 { - log.Error("only zero-padded ranges are allowed") + cclog.Error("only zero-padded ranges are allowed") return "", false } diff --git a/pkg/archive/nodelist_test.go b/pkg/archive/nodelist_test.go index 52aa812..f2747c8 100644 --- a/pkg/archive/nodelist_test.go +++ b/pkg/archive/nodelist_test.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive diff --git a/pkg/archive/s3Backend.go b/pkg/archive/s3Backend.go index d8b06e7..8af644a 100644 --- a/pkg/archive/s3Backend.go +++ b/pkg/archive/s3Backend.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package archive diff --git a/pkg/log/log.go b/pkg/log/log.go deleted file mode 100644 index ef14535..0000000 --- a/pkg/log/log.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package log - -import ( - "fmt" - "io" - "log" - "os" -) - -// Provides a simple way of logging with different levels. -// Time/Date are not logged because systemd adds -// them for us (Default, can be changed by flag '--logdate true'). -// -// Uses these prefixes: https://www.freedesktop.org/software/systemd/man/sd-daemon.html - -var ( - DebugWriter io.Writer = os.Stderr - InfoWriter io.Writer = os.Stderr - WarnWriter io.Writer = os.Stderr - ErrWriter io.Writer = os.Stderr - CritWriter io.Writer = os.Stderr -) - -var ( - DebugPrefix string = "<7>[DEBUG] " - InfoPrefix string = "<6>[INFO] " - WarnPrefix string = "<4>[WARNING] " - ErrPrefix string = "<3>[ERROR] " - CritPrefix string = "<2>[CRITICAL] " -) - -var ( - DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, log.LstdFlags) - InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile) - WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile) - ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile) - CritLog *log.Logger = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile) -) - -var loglevel string = "info" - -/* CONFIG */ - -func Init(lvl string, logdate bool) { - // Discard I/O for all writers below selected loglevel; is always written. - switch lvl { - case "crit": - ErrWriter = io.Discard - fallthrough - case "err": - WarnWriter = io.Discard - fallthrough - case "warn": - InfoWriter = io.Discard - fallthrough - case "info": - DebugWriter = io.Discard - case "debug": - // Nothing to do... - break - default: - fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel '%s'\n", lvl, loglevel) - } - - if !logdate { - DebugLog = log.New(DebugWriter, DebugPrefix, 0) - InfoLog = log.New(InfoWriter, InfoPrefix, log.Lshortfile) - WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile) - ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile) - CritLog = log.New(CritWriter, CritPrefix, log.Llongfile) - } else { - DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags) - InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile) - WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile) - ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile) - CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile) - } - - loglevel = lvl -} - -/* HELPER */ - -func Loglevel() string { - return loglevel -} - -/* PRIVATE HELPER */ - -// Return unformatted string -func printStr(v ...interface{}) string { - return fmt.Sprint(v...) -} - -// Return formatted string -func printfStr(format string, v ...interface{}) string { - return fmt.Sprintf(format, v...) -} - -/* PRINT */ - -// Prints to STDOUT without string formatting; application continues. -// Used for special cases not requiring log information like date or location. -func Print(v ...interface{}) { - fmt.Fprintln(os.Stdout, v...) -} - -// Prints to STDOUT without string formatting; application exits with error code 0. -// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs. -func Exit(v ...interface{}) { - fmt.Fprintln(os.Stdout, v...) - os.Exit(0) -} - -// Prints to STDOUT without string formatting; application exits with error code 1. -// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init(). -func Abort(v ...interface{}) { - fmt.Fprintln(os.Stdout, v...) - os.Exit(1) -} - -// Prints to DEBUG writer without string formatting; application continues. -// Used for logging additional information, primarily for development. -func Debug(v ...interface{}) { - DebugLog.Output(2, printStr(v...)) -} - -// Prints to INFO writer without string formatting; application continues. -// Used for logging additional information, e.g. notable returns or common fail-cases. -func Info(v ...interface{}) { - InfoLog.Output(2, printStr(v...)) -} - -// Prints to WARNING writer without string formatting; application continues. -// Used for logging important information, e.g. uncommon edge-cases or administration related information. -func Warn(v ...interface{}) { - WarnLog.Output(2, printStr(v...)) -} - -// Prints to ERROR writer without string formatting; application continues. -// Used for logging errors, but code still can return default(s) or nil. -func Error(v ...interface{}) { - ErrLog.Output(2, printStr(v...)) -} - -// Prints to CRITICAL writer without string formatting; application exits with error code 1. -// Used for terminating on unexpected errors with date and code location. -func Fatal(v ...interface{}) { - CritLog.Output(2, printStr(v...)) - os.Exit(1) -} - -// Prints to PANIC function without string formatting; application exits with panic. -// Used for terminating on unexpected errors with stacktrace. -func Panic(v ...interface{}) { - panic(printStr(v...)) -} - -/* PRINT FORMAT*/ - -// Prints to STDOUT with string formatting; application continues. -// Used for special cases not requiring log information like date or location. -func Printf(format string, v ...interface{}) { - fmt.Fprintf(os.Stdout, format, v...) -} - -// Prints to STDOUT with string formatting; application exits with error code 0. -// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs. -func Exitf(format string, v ...interface{}) { - fmt.Fprintf(os.Stdout, format, v...) - os.Exit(0) -} - -// Prints to STDOUT with string formatting; application exits with error code 1. -// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init(). -func Abortf(format string, v ...interface{}) { - fmt.Fprintf(os.Stdout, format, v...) - os.Exit(1) -} - -// Prints to DEBUG writer with string formatting; application continues. -// Used for logging additional information, primarily for development. -func Debugf(format string, v ...interface{}) { - DebugLog.Output(2, printfStr(format, v...)) -} - -// Prints to INFO writer with string formatting; application continues. -// Used for logging additional information, e.g. notable returns or common fail-cases. -func Infof(format string, v ...interface{}) { - InfoLog.Output(2, printfStr(format, v...)) -} - -// Prints to WARNING writer with string formatting; application continues. -// Used for logging important information, e.g. uncommon edge-cases or administration related information. -func Warnf(format string, v ...interface{}) { - WarnLog.Output(2, printfStr(format, v...)) -} - -// Prints to ERROR writer with string formatting; application continues. -// Used for logging errors, but code still can return default(s) or nil. -func Errorf(format string, v ...interface{}) { - ErrLog.Output(2, printfStr(format, v...)) -} - -// Prints to CRITICAL writer with string formatting; application exits with error code 1. -// Used for terminating on unexpected errors with date and code location. -func Fatalf(format string, v ...interface{}) { - CritLog.Output(2, printfStr(format, v...)) - os.Exit(1) -} - -// Prints to PANIC function with string formatting; application exits with panic. -// Used for terminating on unexpected errors with stacktrace. -func Panicf(format string, v ...interface{}) { - panic(printfStr(format, v...)) -} diff --git a/pkg/lrucache/README.md b/pkg/lrucache/README.md deleted file mode 100644 index 855a185..0000000 --- a/pkg/lrucache/README.md +++ /dev/null @@ -1,124 +0,0 @@ -# In-Memory LRU Cache for Golang Applications - -This library can be embedded into your existing go applications -and play the role *Memcached* or *Redis* might play for others. -It is inspired by [PHP Symfony's Cache Components](https://symfony.com/doc/current/components/cache/adapters/array_cache_adapter.html), -having a similar API. This library can not be used for persistance, -is not properly tested yet and a bit special in a few ways described -below (Especially with regards to the memory usage/`size`). - -In addition to the interface described below, a `http.Handler` that can be used as middleware is provided as well. - -- Advantages: - - Anything (`interface{}`) can be stored as value - - As it lives in the application itself, no serialization or de-serialization is needed - - As it lives in the application itself, no memory moving/networking is needed - - The computation of a new value for a key does __not__ block the full cache (only the key) -- Disadvantages: - - You have to provide a size estimate for every value - - __This size estimate should not change (i.e. values should not mutate)__ - - The cache can only be accessed by one application - -## Example - -```go -// Go look at the godocs and ./cache_test.go for more documentation and examples - -maxMemory := 1000 -cache := lrucache.New(maxMemory) - -bar = cache.Get("foo", func () (value interface{}, ttl time.Duration, size int) { - return "bar", 10 * time.Second, len("bar") -}).(string) - -// bar == "bar" - -bar = cache.Get("foo", func () (value interface{}, ttl time.Duration, size int) { - panic("will not be called") -}).(string) -``` - -## Why does `cache.Get` take a function as argument? - -*Using the mechanism described below is optional, the second argument to `Get` can be `nil` and there is a `Put` function as well.* - -Because this library is meant to be used by multi threaded applications and the following would -result in the same data being fetched twice if both goroutines run in parallel: - -```go -// This code shows what could happen with other cache libraries -c := lrucache.New(MAX_CACHE_ENTRIES) - -for i := 0; i < 2; i++ { - go func(){ - // This code will run twice in different goroutines, - // it could overlap. As `fetchData` probably does some - // I/O and takes a long time, the probability of both - // goroutines calling `fetchData` is very high! - url := "http://example.com/foo" - contents := c.Get(url) - if contents == nil { - contents = fetchData(url) - c.Set(url, contents) - } - - handleData(contents.([]byte)) - }() -} - -``` - -Here, if one wanted to make sure that only one of both goroutines fetches the data, -the programmer would need to build his own synchronization. That would suck! - -```go -c := lrucache.New(MAX_CACHE_SIZE) - -for i := 0; i < 2; i++ { - go func(){ - url := "http://example.com/foo" - contents := c.Get(url, func()(interface{}, time.Time, int) { - // This closure will only be called once! - // If another goroutine calls `c.Get` while this closure - // is still being executed, it will wait. - buf := fetchData(url) - return buf, 100 * time.Second, len(buf) - }) - - handleData(contents.([]byte)) - }() -} - -``` - -This is much better as less resources are wasted and synchronization is handled by -the library. If it gets called, the call to the closure happens synchronously. While -it is being executed, all other cache keys can still be accessed without having to wait -for the execution to be done. - -## How `Get` works - -The closure passed to `Get` will be called if the value asked for is not cached or -expired. It should return the following values: - -- The value corresponding to that key and to be stored in the cache -- The time to live for that value (how long until it expires and needs to be recomputed) -- A size estimate - -When `maxMemory` is reached, cache entries need to be evicted. Theoretically, -it would be possible to use reflection on every value placed in the cache -to get its exact size in bytes. This would be very expansive and slow though. -Also, size can change. Instead of this library calculating the size in bytes, you, the user, -have to provide a size for every value in whatever unit you like (as long as it is the same unit everywhere). - -Suggestions on what to use as size: `len(str)` for strings, `len(slice) * size_of_slice_type`, etc.. It is possible -to use `1` as size for every entry, in that case at most `maxMemory` entries will be in the cache at the same time. - -## Affects on GC - -Because of the way a garbage collector decides when to run ([explained in the -runtime package](https://pkg.go.dev/runtime)), having large amounts of data -sitting in your cache might increase the memory consumption of your process by -two times the maximum size of the cache. You can decrease the *target -percentage* to reduce the effect, but then you might have negative performance -effects when your cache is not filled. diff --git a/pkg/lrucache/cache.go b/pkg/lrucache/cache.go deleted file mode 100644 index 220c53b..0000000 --- a/pkg/lrucache/cache.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package lrucache - -import ( - "sync" - "time" -) - -// Type of the closure that must be passed to `Get` to -// compute the value in case it is not cached. -// -// returned values are the computed value to be stored in the cache, -// the duration until this value will expire and a size estimate. -type ComputeValue func() (value interface{}, ttl time.Duration, size int) - -type cacheEntry struct { - key string - value interface{} - - expiration time.Time - size int - waitingForComputation int - - next, prev *cacheEntry -} - -type Cache struct { - mutex sync.Mutex - cond *sync.Cond - maxmemory, usedmemory int - entries map[string]*cacheEntry - head, tail *cacheEntry -} - -// Return a new instance of a LRU In-Memory Cache. -// Read [the README](./README.md) for more information -// on what is going on with `maxmemory`. -func New(maxmemory int) *Cache { - cache := &Cache{ - maxmemory: maxmemory, - entries: map[string]*cacheEntry{}, - } - cache.cond = sync.NewCond(&cache.mutex) - return cache -} - -// Return the cached value for key `key` or call `computeValue` and -// store its return value in the cache. If called, the closure will be -// called synchronous and __shall not call methods on the same cache__ -// or a deadlock might ocure. If `computeValue` is nil, the cache is checked -// and if no entry was found, nil is returned. If another goroutine is currently -// computing that value, the result is waited for. -func (c *Cache) Get(key string, computeValue ComputeValue) interface{} { - now := time.Now() - - c.mutex.Lock() - if entry, ok := c.entries[key]; ok { - // The expiration not being set is what shows us that - // the computation of that value is still ongoing. - for entry.expiration.IsZero() { - entry.waitingForComputation += 1 - c.cond.Wait() - entry.waitingForComputation -= 1 - } - - if now.After(entry.expiration) { - if !c.evictEntry(entry) { - if entry.expiration.IsZero() { - panic("LRUCACHE/CACHE > cache entry that shoud have been waited for could not be evicted.") - } - c.mutex.Unlock() - return entry.value - } - } else { - if entry != c.head { - c.unlinkEntry(entry) - c.insertFront(entry) - } - c.mutex.Unlock() - return entry.value - } - } - - if computeValue == nil { - c.mutex.Unlock() - return nil - } - - entry := &cacheEntry{ - key: key, - waitingForComputation: 1, - } - - c.entries[key] = entry - - hasPaniced := true - defer func() { - if hasPaniced { - c.mutex.Lock() - delete(c.entries, key) - entry.expiration = now - entry.waitingForComputation -= 1 - } - c.mutex.Unlock() - }() - - c.mutex.Unlock() - value, ttl, size := computeValue() - c.mutex.Lock() - hasPaniced = false - - entry.value = value - entry.expiration = now.Add(ttl) - entry.size = size - entry.waitingForComputation -= 1 - - // Only broadcast if other goroutines are actually waiting - // for a result. - if entry.waitingForComputation > 0 { - // TODO: Have more than one condition variable so that there are - // less unnecessary wakeups. - c.cond.Broadcast() - } - - c.usedmemory += size - c.insertFront(entry) - - // Evict only entries with a size of more than zero. - // This is the only loop in the implementation outside of the `Keys` - // method. - evictionCandidate := c.tail - for c.usedmemory > c.maxmemory && evictionCandidate != nil { - nextCandidate := evictionCandidate.prev - if (evictionCandidate.size > 0 || now.After(evictionCandidate.expiration)) && - evictionCandidate.waitingForComputation == 0 { - c.evictEntry(evictionCandidate) - } - evictionCandidate = nextCandidate - } - - return value -} - -// Put a new value in the cache. If another goroutine is calling `Get` and -// computing the value, this function waits for the computation to be done -// before it overwrites the value. -func (c *Cache) Put(key string, value interface{}, size int, ttl time.Duration) { - now := time.Now() - c.mutex.Lock() - defer c.mutex.Unlock() - - if entry, ok := c.entries[key]; ok { - for entry.expiration.IsZero() { - entry.waitingForComputation += 1 - c.cond.Wait() - entry.waitingForComputation -= 1 - } - - c.usedmemory -= entry.size - entry.expiration = now.Add(ttl) - entry.size = size - entry.value = value - c.usedmemory += entry.size - - c.unlinkEntry(entry) - c.insertFront(entry) - return - } - - entry := &cacheEntry{ - key: key, - value: value, - expiration: now.Add(ttl), - } - c.entries[key] = entry - c.insertFront(entry) -} - -// Remove the value at key `key` from the cache. -// Return true if the key was in the cache and false -// otherwise. It is possible that true is returned even -// though the value already expired. -// It is possible that false is returned even though the value -// will show up in the cache if this function is called on a key -// while that key is beeing computed. -func (c *Cache) Del(key string) bool { - c.mutex.Lock() - defer c.mutex.Unlock() - - if entry, ok := c.entries[key]; ok { - return c.evictEntry(entry) - } - return false -} - -// Call f for every entry in the cache. Some sanity checks -// and eviction of expired keys are done as well. -// The cache is fully locked for the complete duration of this call! -func (c *Cache) Keys(f func(key string, val interface{})) { - c.mutex.Lock() - defer c.mutex.Unlock() - - now := time.Now() - - size := 0 - for key, e := range c.entries { - if key != e.key { - panic("LRUCACHE/CACHE > key mismatch") - } - - if now.After(e.expiration) { - if c.evictEntry(e) { - continue - } - } - - if e.prev != nil { - if e.prev.next != e { - panic("LRUCACHE/CACHE > list corrupted") - } - } - - if e.next != nil { - if e.next.prev != e { - panic("LRUCACHE/CACHE > list corrupted") - } - } - - size += e.size - f(key, e.value) - } - - if size != c.usedmemory { - panic("LRUCACHE/CACHE > size calculations failed") - } - - if c.head != nil { - if c.tail == nil || c.head.prev != nil { - panic("LRUCACHE/CACHE > head/tail corrupted") - } - } - - if c.tail != nil { - if c.head == nil || c.tail.next != nil { - panic("LRUCACHE/CACHE > head/tail corrupted") - } - } -} - -func (c *Cache) insertFront(e *cacheEntry) { - e.next = c.head - c.head = e - - e.prev = nil - if e.next != nil { - e.next.prev = e - } - - if c.tail == nil { - c.tail = e - } -} - -func (c *Cache) unlinkEntry(e *cacheEntry) { - if e == c.head { - c.head = e.next - } - if e.prev != nil { - e.prev.next = e.next - } - if e.next != nil { - e.next.prev = e.prev - } - if e == c.tail { - c.tail = e.prev - } -} - -func (c *Cache) evictEntry(e *cacheEntry) bool { - if e.waitingForComputation != 0 { - // panic("LRUCACHE/CACHE > cannot evict this entry as other goroutines need the value") - return false - } - - c.unlinkEntry(e) - c.usedmemory -= e.size - delete(c.entries, e.key) - return true -} diff --git a/pkg/lrucache/cache_test.go b/pkg/lrucache/cache_test.go deleted file mode 100644 index 8bff40e..0000000 --- a/pkg/lrucache/cache_test.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package lrucache - -import ( - "sync" - "sync/atomic" - "testing" - "time" -) - -func TestBasics(t *testing.T) { - cache := New(123) - - value1 := cache.Get("foo", func() (interface{}, time.Duration, int) { - return "bar", 1 * time.Second, 0 - }) - - if value1.(string) != "bar" { - t.Error("cache returned wrong value") - } - - value2 := cache.Get("foo", func() (interface{}, time.Duration, int) { - t.Error("value should be cached") - return "", 0, 0 - }) - - if value2.(string) != "bar" { - t.Error("cache returned wrong value") - } - - existed := cache.Del("foo") - if !existed { - t.Error("delete did not work as expected") - } - - value3 := cache.Get("foo", func() (interface{}, time.Duration, int) { - return "baz", 1 * time.Second, 0 - }) - - if value3.(string) != "baz" { - t.Error("cache returned wrong value") - } - - cache.Keys(func(key string, value interface{}) { - if key != "foo" || value.(string) != "baz" { - t.Error("cache corrupted") - } - }) -} - -func TestExpiration(t *testing.T) { - cache := New(123) - - failIfCalled := func() (interface{}, time.Duration, int) { - t.Error("Value should be cached!") - return "", 0, 0 - } - - val1 := cache.Get("foo", func() (interface{}, time.Duration, int) { - return "bar", 5 * time.Millisecond, 0 - }) - val2 := cache.Get("bar", func() (interface{}, time.Duration, int) { - return "foo", 20 * time.Millisecond, 0 - }) - - val3 := cache.Get("foo", failIfCalled).(string) - val4 := cache.Get("bar", failIfCalled).(string) - - if val1 != val3 || val3 != "bar" || val2 != val4 || val4 != "foo" { - t.Error("Wrong values returned") - } - - time.Sleep(10 * time.Millisecond) - - val5 := cache.Get("foo", func() (interface{}, time.Duration, int) { - return "baz", 0, 0 - }) - val6 := cache.Get("bar", failIfCalled) - - if val5.(string) != "baz" || val6.(string) != "foo" { - t.Error("unexpected values") - } - - cache.Keys(func(key string, val interface{}) { - if key != "bar" || val.(string) != "foo" { - t.Error("wrong value expired") - } - }) - - time.Sleep(15 * time.Millisecond) - cache.Keys(func(key string, val interface{}) { - t.Error("cache should be empty now") - }) -} - -func TestEviction(t *testing.T) { - c := New(100) - failIfCalled := func() (interface{}, time.Duration, int) { - t.Error("Value should be cached!") - return "", 0, 0 - } - - v1 := c.Get("foo", func() (interface{}, time.Duration, int) { - return "bar", 1 * time.Second, 1000 - }) - - v2 := c.Get("foo", func() (interface{}, time.Duration, int) { - return "baz", 1 * time.Second, 1000 - }) - - if v1.(string) != "bar" || v2.(string) != "baz" { - t.Error("wrong values returned") - } - - c.Keys(func(key string, val interface{}) { - t.Error("cache should be empty now") - }) - - _ = c.Get("A", func() (interface{}, time.Duration, int) { - return "a", 1 * time.Second, 50 - }) - - _ = c.Get("B", func() (interface{}, time.Duration, int) { - return "b", 1 * time.Second, 50 - }) - - _ = c.Get("A", failIfCalled) - _ = c.Get("B", failIfCalled) - _ = c.Get("C", func() (interface{}, time.Duration, int) { - return "c", 1 * time.Second, 50 - }) - - _ = c.Get("B", failIfCalled) - _ = c.Get("C", failIfCalled) - - v4 := c.Get("A", func() (interface{}, time.Duration, int) { - return "evicted", 1 * time.Second, 25 - }) - - if v4.(string) != "evicted" { - t.Error("value should have been evicted") - } - - c.Keys(func(key string, val interface{}) { - if key != "A" && key != "C" { - t.Errorf("'%s' was not expected", key) - } - }) -} - -// I know that this is a shity test, -// time is relative and unreliable. -func TestConcurrency(t *testing.T) { - c := New(100) - var wg sync.WaitGroup - - numActions := 20000 - numThreads := 4 - wg.Add(numThreads) - - var concurrentModifications int32 = 0 - - for i := 0; i < numThreads; i++ { - go func() { - for j := 0; j < numActions; j++ { - _ = c.Get("key", func() (interface{}, time.Duration, int) { - m := atomic.AddInt32(&concurrentModifications, 1) - if m != 1 { - t.Error("only one goroutine at a time should calculate a value for the same key") - } - - time.Sleep(1 * time.Millisecond) - atomic.AddInt32(&concurrentModifications, -1) - return "value", 3 * time.Millisecond, 1 - }) - } - - wg.Done() - }() - } - - wg.Wait() - - c.Keys(func(key string, val interface{}) {}) -} - -func TestPanic(t *testing.T) { - c := New(100) - - c.Put("bar", "baz", 3, 1*time.Minute) - - testpanic := func() { - defer func() { - if r := recover(); r != nil { - if r.(string) != "oops" { - t.Fatal("unexpected panic value") - } - } - }() - - _ = c.Get("foo", func() (value interface{}, ttl time.Duration, size int) { - panic("oops") - }) - - t.Fatal("should have paniced!") - } - - testpanic() - - v := c.Get("bar", func() (value interface{}, ttl time.Duration, size int) { - t.Fatal("should not be called!") - return nil, 0, 0 - }) - - if v.(string) != "baz" { - t.Fatal("unexpected value") - } - - testpanic() -} diff --git a/pkg/lrucache/handler.go b/pkg/lrucache/handler.go deleted file mode 100644 index 90b7527..0000000 --- a/pkg/lrucache/handler.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package lrucache - -import ( - "bytes" - "net/http" - "strconv" - "time" -) - -// HttpHandler is can be used as HTTP Middleware in order to cache requests, -// for example static assets. By default, the request's raw URI is used as key and nothing else. -// Results with a status code other than 200 are cached with a TTL of zero seconds, -// so basically re-fetched as soon as the current fetch is done and a new request -// for that URI is done. -type HttpHandler struct { - cache *Cache - fetcher http.Handler - defaultTTL time.Duration - - // Allows overriding the way the cache key is extracted - // from the http request. The defailt is to use the RequestURI. - CacheKey func(*http.Request) string -} - -var _ http.Handler = (*HttpHandler)(nil) - -type cachedResponseWriter struct { - w http.ResponseWriter - statusCode int - buf bytes.Buffer -} - -type cachedResponse struct { - headers http.Header - statusCode int - data []byte - fetched time.Time -} - -var _ http.ResponseWriter = (*cachedResponseWriter)(nil) - -func (crw *cachedResponseWriter) Header() http.Header { - return crw.w.Header() -} - -func (crw *cachedResponseWriter) Write(bytes []byte) (int, error) { - return crw.buf.Write(bytes) -} - -func (crw *cachedResponseWriter) WriteHeader(statusCode int) { - crw.statusCode = statusCode -} - -// Returns a new caching HttpHandler. If no entry in the cache is found or it was too old, `fetcher` is called with -// a modified http.ResponseWriter and the response is stored in the cache. If `fetcher` sets the "Expires" header, -// the ttl is set appropriately (otherwise, the default ttl passed as argument here is used). -// `maxmemory` should be in the unit bytes. -func NewHttpHandler(maxmemory int, ttl time.Duration, fetcher http.Handler) *HttpHandler { - return &HttpHandler{ - cache: New(maxmemory), - defaultTTL: ttl, - fetcher: fetcher, - CacheKey: func(r *http.Request) string { - return r.RequestURI - }, - } -} - -// gorilla/mux style middleware: -func NewMiddleware(maxmemory int, ttl time.Duration) func(http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - return NewHttpHandler(maxmemory, ttl, next) - } -} - -// Tries to serve a response to r from cache or calls next and stores the response to the cache for the next time. -func (h *HttpHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - h.ServeHTTP(rw, r) - return - } - - cr := h.cache.Get(h.CacheKey(r), func() (interface{}, time.Duration, int) { - crw := &cachedResponseWriter{ - w: rw, - statusCode: 200, - buf: bytes.Buffer{}, - } - - h.fetcher.ServeHTTP(crw, r) - - cr := &cachedResponse{ - headers: rw.Header().Clone(), - statusCode: crw.statusCode, - data: crw.buf.Bytes(), - fetched: time.Now(), - } - cr.headers.Set("Content-Length", strconv.Itoa(len(cr.data))) - - ttl := h.defaultTTL - if cr.statusCode != http.StatusOK { - ttl = 0 - } else if cr.headers.Get("Expires") != "" { - if expires, err := http.ParseTime(cr.headers.Get("Expires")); err == nil { - ttl = time.Until(expires) - } - } - - return cr, ttl, len(cr.data) - }).(*cachedResponse) - - for key, val := range cr.headers { - rw.Header()[key] = val - } - - cr.headers.Set("Age", strconv.Itoa(int(time.Since(cr.fetched).Seconds()))) - - rw.WriteHeader(cr.statusCode) - rw.Write(cr.data) -} diff --git a/pkg/lrucache/handler_test.go b/pkg/lrucache/handler_test.go deleted file mode 100644 index d908339..0000000 --- a/pkg/lrucache/handler_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package lrucache - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func TestHandlerBasics(t *testing.T) { - r := httptest.NewRequest(http.MethodGet, "/test1", nil) - rw := httptest.NewRecorder() - shouldBeCalled := true - - handler := NewHttpHandler(1000, time.Second, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Write([]byte("Hello World!")) - - if !shouldBeCalled { - t.Fatal("fetcher expected to be called") - } - })) - - handler.ServeHTTP(rw, r) - - if rw.Code != 200 { - t.Fatal("unexpected status code") - } - - if !bytes.Equal(rw.Body.Bytes(), []byte("Hello World!")) { - t.Fatal("unexpected body") - } - - rw = httptest.NewRecorder() - shouldBeCalled = false - handler.ServeHTTP(rw, r) - - if rw.Code != 200 { - t.Fatal("unexpected status code") - } - - if !bytes.Equal(rw.Body.Bytes(), []byte("Hello World!")) { - t.Fatal("unexpected body") - } -} - -// func TestHandlerExpiration(t *testing.T) { -// r := httptest.NewRequest(http.MethodGet, "/test1", nil) -// rw := httptest.NewRecorder() -// i := 1 -// now := time.Now() - -// handler := NewHttpHandler(1000, 1*time.Second, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { -// rw.Header().Set("Expires", now.Add(10*time.Millisecond).Format(http.TimeFormat)) -// rw.Write([]byte(strconv.Itoa(i))) -// })) - -// handler.ServeHTTP(rw, r) -// if !(rw.Body.String() == strconv.Itoa(1)) { -// t.Fatal("unexpected body") -// } - -// i += 1 - -// time.Sleep(11 * time.Millisecond) -// rw = httptest.NewRecorder() -// handler.ServeHTTP(rw, r) -// if !(rw.Body.String() == strconv.Itoa(1)) { -// t.Fatal("unexpected body") -// } -// } diff --git a/pkg/resampler/resampler.go b/pkg/resampler/resampler.go deleted file mode 100644 index ebc7e88..0000000 --- a/pkg/resampler/resampler.go +++ /dev/null @@ -1,123 +0,0 @@ -package resampler - -import ( - "errors" - "fmt" - "math" - - "github.com/ClusterCockpit/cc-backend/pkg/schema" -) - -func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int64) ([]schema.Float, int64, error) { - if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency { - return data, old_frequency, nil - } - - if new_frequency%old_frequency != 0 { - return nil, 0, errors.New("new sampling frequency should be multiple of the old frequency") - } - - var step int = int(new_frequency / old_frequency) - var new_data_length = len(data) / step - - if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) { - return data, old_frequency, nil - } - - new_data := make([]schema.Float, new_data_length) - - for i := 0; i < new_data_length; i++ { - new_data[i] = data[i*step] - } - - return new_data, new_frequency, nil -} - -// Inspired by one of the algorithms from https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf -// Adapted from https://github.com/haoel/downsampling/blob/master/core/lttb.go -func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_frequency int) ([]schema.Float, int, error) { - - if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency { - return data, old_frequency, nil - } - - if new_frequency%old_frequency != 0 { - return nil, 0, errors.New(fmt.Sprintf("new sampling frequency : %d should be multiple of the old frequency : %d", new_frequency, old_frequency)) - } - - var step int = int(new_frequency / old_frequency) - var new_data_length = len(data) / step - - if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) { - return data, old_frequency, nil - } - - new_data := make([]schema.Float, 0, new_data_length) - - // Bucket size. Leave room for start and end data points - bucketSize := float64(len(data)-2) / float64(new_data_length-2) - - new_data = append(new_data, data[0]) // Always add the first point - - // We have 3 pointers represent for - // > bucketLow - the current bucket's beginning location - // > bucketMiddle - the current bucket's ending location, - // also the beginning location of next bucket - // > bucketHight - the next bucket's ending location. - bucketLow := 1 - bucketMiddle := int(math.Floor(bucketSize)) + 1 - - var prevMaxAreaPoint int - - for i := 0; i < new_data_length-2; i++ { - - bucketHigh := int(math.Floor(float64(i+2)*bucketSize)) + 1 - if bucketHigh >= len(data)-1 { - bucketHigh = len(data) - 2 - } - - // Calculate point average for next bucket (containing c) - avgPointX, avgPointY := calculateAverageDataPoint(data[bucketMiddle:bucketHigh+1], int64(bucketMiddle)) - - // Get the range for current bucket - currBucketStart := bucketLow - currBucketEnd := bucketMiddle - - // Point a - pointX := prevMaxAreaPoint - pointY := data[prevMaxAreaPoint] - - maxArea := -1.0 - - var maxAreaPoint int - flag_ := 0 - for ; currBucketStart < currBucketEnd; currBucketStart++ { - - area := calculateTriangleArea(schema.Float(pointX), pointY, avgPointX, avgPointY, schema.Float(currBucketStart), data[currBucketStart]) - if area > maxArea { - maxArea = area - maxAreaPoint = currBucketStart - } - if math.IsNaN(float64(avgPointY)) { - flag_ = 1 - - } - } - - if flag_ == 1 { - new_data = append(new_data, schema.NaN) // Pick this point from the bucket - - } else { - new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket - } - prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint - - //move to the next window - bucketLow = bucketMiddle - bucketMiddle = bucketHigh - } - - new_data = append(new_data, data[len(data)-1]) // Always add last - - return new_data, new_frequency, nil -} diff --git a/pkg/resampler/util.go b/pkg/resampler/util.go deleted file mode 100644 index 36d8bed..0000000 --- a/pkg/resampler/util.go +++ /dev/null @@ -1,35 +0,0 @@ -package resampler - -import ( - "math" - - "github.com/ClusterCockpit/cc-backend/pkg/schema" -) - -func calculateTriangleArea(paX, paY, pbX, pbY, pcX, pcY schema.Float) float64 { - area := ((paX-pcX)*(pbY-paY) - (paX-pbX)*(pcY-paY)) * 0.5 - return math.Abs(float64(area)) -} - -func calculateAverageDataPoint(points []schema.Float, xStart int64) (avgX schema.Float, avgY schema.Float) { - flag := 0 - for _, point := range points { - avgX += schema.Float(xStart) - avgY += point - xStart++ - if math.IsNaN(float64(point)) { - flag = 1 - } - } - - l := schema.Float(len(points)) - - avgX /= l - avgY /= l - - if flag == 1 { - return avgX, schema.NaN - } else { - return avgX, avgY - } -} diff --git a/pkg/runtimeEnv/setup.go b/pkg/runtimeEnv/setup.go index cb77b26..e23a004 100644 --- a/pkg/runtimeEnv/setup.go +++ b/pkg/runtimeEnv/setup.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package runtimeEnv @@ -12,7 +12,7 @@ import ( "strconv" "syscall" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" ) // Changes the processes user and group to that @@ -23,13 +23,13 @@ func DropPrivileges(username string, group string) error { if group != "" { g, err := user.LookupGroup(group) if err != nil { - log.Warn("Error while looking up group") + cclog.Warn("Error while looking up group") return err } gid, _ := strconv.Atoi(g.Gid) if err := syscall.Setgid(gid); err != nil { - log.Warn("Error while setting gid") + cclog.Warn("Error while setting gid") return err } } @@ -37,13 +37,13 @@ func DropPrivileges(username string, group string) error { if username != "" { u, err := user.Lookup(username) if err != nil { - log.Warn("Error while looking up user") + cclog.Warn("Error while looking up user") return err } uid, _ := strconv.Atoi(u.Uid) if err := syscall.Setuid(uid); err != nil { - log.Warn("Error while setting uid") + cclog.Warn("Error while setting uid") return err } } diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go deleted file mode 100644 index 1b9f2cc..0000000 --- a/pkg/schema/cluster.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "fmt" - "strconv" -) - -type Accelerator struct { - ID string `json:"id"` - Type string `json:"type"` - Model string `json:"model"` -} - -type Topology struct { - Node []int `json:"node"` - Socket [][]int `json:"socket"` - MemoryDomain [][]int `json:"memoryDomain"` - Die [][]*int `json:"die,omitempty"` - Core [][]int `json:"core"` - Accelerators []*Accelerator `json:"accelerators,omitempty"` -} - -type MetricValue struct { - Unit Unit `json:"unit"` - Value float64 `json:"value"` -} - -type SubCluster struct { - Name string `json:"name"` - Nodes string `json:"nodes"` - ProcessorType string `json:"processorType"` - Topology Topology `json:"topology"` - FlopRateScalar MetricValue `json:"flopRateScalar"` - FlopRateSimd MetricValue `json:"flopRateSimd"` - MemoryBandwidth MetricValue `json:"memoryBandwidth"` - MetricConfig []MetricConfig `json:"metricConfig,omitempty"` - Footprint []string `json:"footprint,omitempty"` - EnergyFootprint []string `json:"energyFootprint,omitempty"` - SocketsPerNode int `json:"socketsPerNode"` - CoresPerSocket int `json:"coresPerSocket"` - ThreadsPerCore int `json:"threadsPerCore"` -} - -type Metric struct { - Name string `json:"name"` - Unit Unit `json:"unit"` - Peak float64 `json:"peak"` - Normal float64 `json:"normal"` - Caution float64 `json:"caution"` - Alert float64 `json:"alert"` -} - -type SubClusterConfig struct { - Metric - Footprint string `json:"footprint,omitempty"` - Energy string `json:"energy"` - Remove bool `json:"remove"` - LowerIsBetter bool `json:"lowerIsBetter"` -} - -type MetricConfig struct { - Metric - Energy string `json:"energy"` - Scope MetricScope `json:"scope"` - Aggregation string `json:"aggregation"` - Footprint string `json:"footprint,omitempty"` - SubClusters []*SubClusterConfig `json:"subClusters,omitempty"` - Timestep int `json:"timestep"` - LowerIsBetter bool `json:"lowerIsBetter"` -} - -type Cluster struct { - Name string `json:"name"` - MetricConfig []*MetricConfig `json:"metricConfig"` - SubClusters []*SubCluster `json:"subClusters"` -} - -type ClusterSupport struct { - Cluster string `json:"cluster"` - SubClusters []string `json:"subclusters"` -} - -type GlobalMetricListItem struct { - Name string `json:"name"` - Unit Unit `json:"unit"` - Scope MetricScope `json:"scope"` - Footprint string `json:"footprint,omitempty"` - Availability []ClusterSupport `json:"availability"` -} - -// Return a list of socket IDs given a list of hwthread IDs. Even if just one -// hwthread is in that socket, add it to the list. If no hwthreads other than -// those in the argument list are assigned to one of the sockets in the first -// return value, return true as the second value. TODO: Optimize this, there -// must be a more efficient way/algorithm. -func (topo *Topology) GetSocketsFromHWThreads( - hwthreads []int, -) (sockets []int, exclusive bool) { - socketsMap := map[int]int{} - for _, hwthread := range hwthreads { - for socket, hwthreadsInSocket := range topo.Socket { - for _, hwthreadInSocket := range hwthreadsInSocket { - if hwthread == hwthreadInSocket { - socketsMap[socket] += 1 - } - } - } - } - - exclusive = true - hwthreadsPerSocket := len(topo.Node) / len(topo.Socket) - sockets = make([]int, 0, len(socketsMap)) - for socket, count := range socketsMap { - sockets = append(sockets, socket) - exclusive = exclusive && count == hwthreadsPerSocket - } - - return sockets, exclusive -} - -// Return a list of socket IDs given a list of core IDs. Even if just one -// core is in that socket, add it to the list. If no cores other than -// those in the argument list are assigned to one of the sockets in the first -// return value, return true as the second value. TODO: Optimize this, there -// must be a more efficient way/algorithm. -func (topo *Topology) GetSocketsFromCores( - cores []int, -) (sockets []int, exclusive bool) { - socketsMap := map[int]int{} - for _, core := range cores { - for _, hwthreadInCore := range topo.Core[core] { - for socket, hwthreadsInSocket := range topo.Socket { - for _, hwthreadInSocket := range hwthreadsInSocket { - if hwthreadInCore == hwthreadInSocket { - socketsMap[socket] += 1 - } - } - } - } - } - - exclusive = true - hwthreadsPerSocket := len(topo.Node) / len(topo.Socket) - sockets = make([]int, 0, len(socketsMap)) - for socket, count := range socketsMap { - sockets = append(sockets, socket) - exclusive = exclusive && count == hwthreadsPerSocket - } - - return sockets, exclusive -} - -// Return a list of core IDs given a list of hwthread IDs. Even if just one -// hwthread is in that core, add it to the list. If no hwthreads other than -// those in the argument list are assigned to one of the cores in the first -// return value, return true as the second value. TODO: Optimize this, there -// must be a more efficient way/algorithm. -func (topo *Topology) GetCoresFromHWThreads( - hwthreads []int, -) (cores []int, exclusive bool) { - coresMap := map[int]int{} - for _, hwthread := range hwthreads { - for core, hwthreadsInCore := range topo.Core { - for _, hwthreadInCore := range hwthreadsInCore { - if hwthread == hwthreadInCore { - coresMap[core] += 1 - } - } - } - } - - exclusive = true - hwthreadsPerCore := len(topo.Node) / len(topo.Core) - cores = make([]int, 0, len(coresMap)) - for core, count := range coresMap { - cores = append(cores, core) - exclusive = exclusive && count == hwthreadsPerCore - } - - return cores, exclusive -} - -// Return a list of memory domain IDs given a list of hwthread IDs. Even if -// just one hwthread is in that memory domain, add it to the list. If no -// hwthreads other than those in the argument list are assigned to one of the -// memory domains in the first return value, return true as the second value. -// TODO: Optimize this, there must be a more efficient way/algorithm. -func (topo *Topology) GetMemoryDomainsFromHWThreads( - hwthreads []int, -) (memDoms []int, exclusive bool) { - memDomsMap := map[int]int{} - for _, hwthread := range hwthreads { - for memDom, hwthreadsInmemDom := range topo.MemoryDomain { - for _, hwthreadInmemDom := range hwthreadsInmemDom { - if hwthread == hwthreadInmemDom { - memDomsMap[memDom] += 1 - } - } - } - } - - exclusive = true - hwthreadsPermemDom := len(topo.Node) / len(topo.MemoryDomain) - memDoms = make([]int, 0, len(memDomsMap)) - for memDom, count := range memDomsMap { - memDoms = append(memDoms, memDom) - exclusive = exclusive && count == hwthreadsPermemDom - } - - return memDoms, exclusive -} - -// Temporary fix to convert back from int id to string id for accelerators -func (topo *Topology) GetAcceleratorID(id int) (string, error) { - if id < 0 { - fmt.Printf("ID smaller than 0!\n") - return topo.Accelerators[0].ID, nil - } else if id < len(topo.Accelerators) { - return topo.Accelerators[id].ID, nil - } else { - return "", fmt.Errorf("index %d out of range", id) - } -} - -// Return list of hardware (string) accelerator IDs -func (topo *Topology) GetAcceleratorIDs() []string { - accels := make([]string, 0) - for _, accel := range topo.Accelerators { - accels = append(accels, accel.ID) - } - return accels -} - -// Outdated? Or: Return indices of accelerators in parent array? -func (topo *Topology) GetAcceleratorIDsAsInt() ([]int, error) { - accels := make([]int, 0) - for _, accel := range topo.Accelerators { - id, err := strconv.Atoi(accel.ID) - if err != nil { - return nil, err - } - accels = append(accels, id) - } - return accels, nil -} diff --git a/pkg/schema/config.go b/pkg/schema/config.go deleted file mode 100644 index eda3d91..0000000 --- a/pkg/schema/config.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "encoding/json" - "time" -) - -type LdapConfig struct { - Url string `json:"url"` - UserBase string `json:"user_base"` - SearchDN string `json:"search_dn"` - UserBind string `json:"user_bind"` - UserFilter string `json:"user_filter"` - UserAttr string `json:"username_attr"` - SyncInterval string `json:"sync_interval"` // Parsed using time.ParseDuration. - SyncDelOldUsers bool `json:"sync_del_old_users"` - - // Should an non-existent user be added to the DB if user exists in ldap directory - SyncUserOnLogin bool `json:"syncUserOnLogin"` -} - -type OpenIDConfig struct { - Provider string `json:"provider"` - SyncUserOnLogin bool `json:"syncUserOnLogin"` - UpdateUserOnLogin bool `json:"updateUserOnLogin"` -} - -type JWTAuthConfig struct { - // Specifies for how long a JWT token shall be valid - // as a string parsable by time.ParseDuration(). - MaxAge string `json:"max-age"` - - // Specifies which cookie should be checked for a JWT token (if no authorization header is present) - CookieName string `json:"cookieName"` - - // Deny login for users not in database (but defined in JWT). - // Ignore user roles defined in JWTs ('roles' claim), get them from db. - ValidateUser bool `json:"validateUser"` - - // Specifies which issuer should be accepted when validating external JWTs ('iss' claim) - TrustedIssuer string `json:"trustedIssuer"` - - // Should an non-existent user be added to the DB based on the information in the token - SyncUserOnLogin bool `json:"syncUserOnLogin"` - - // Should an existent user be updated in the DB based on the information in the token - UpdateUserOnLogin bool `json:"updateUserOnLogin"` -} - -type IntRange struct { - From int `json:"from"` - To int `json:"to"` -} - -type TimeRange struct { - From *time.Time `json:"from"` - To *time.Time `json:"to"` - Range string `json:"range,omitempty"` -} - -type FilterRanges struct { - Duration *IntRange `json:"duration"` - NumNodes *IntRange `json:"numNodes"` - StartTime *TimeRange `json:"startTime"` -} - -type ClusterConfig struct { - Name string `json:"name"` - FilterRanges *FilterRanges `json:"filterRanges"` - MetricDataRepository json.RawMessage `json:"metricDataRepository"` -} - -type Retention struct { - Policy string `json:"policy"` - Location string `json:"location"` - Age int `json:"age"` - IncludeDB bool `json:"includeDB"` -} - -type ResampleConfig struct { - // Array of resampling target resolutions, in seconds; Example: [600,300,60] - Resolutions []int `json:"resolutions"` - // Trigger next zoom level at less than this many visible datapoints - Trigger int `json:"trigger"` -} - -type CronFrequency struct { - // Duration Update Worker [Defaults to '2m'] - CommitJobWorker string `json:"commit-job-worker"` - // Duration Update Worker [Defaults to '5m'] - DurationWorker string `json:"duration-worker"` - // Metric-Footprint Update Worker [Defaults to '10m'] - FootprintWorker string `json:"footprint-worker"` -} - -// Format of the configuration (file). See below for the defaults. -type ProgramConfig struct { - // Address where the http (or https) server will listen on (for example: 'localhost:80'). - Addr string `json:"addr"` - - // Addresses from which secured admin API endpoints can be reached, can be wildcard "*" - ApiAllowedIPs []string `json:"apiAllowedIPs"` - - // Drop root permissions once .env was read and the port was taken. - User string `json:"user"` - Group string `json:"group"` - - // Disable authentication (for everything: API, Web-UI, ...) - DisableAuthentication bool `json:"disable-authentication"` - - // If `embed-static-files` is true (default), the frontend files are directly - // embeded into the go binary and expected to be in web/frontend. Only if - // it is false the files in `static-files` are served instead. - EmbedStaticFiles bool `json:"embed-static-files"` - StaticFiles string `json:"static-files"` - - // 'sqlite3' or 'mysql' (mysql will work for mariadb as well) - DBDriver string `json:"db-driver"` - - // For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!). - DB string `json:"db"` - - // Config for job archive - Archive json.RawMessage `json:"archive"` - - // Keep all metric data in the metric data repositories, - // do not write to the job-archive. - DisableArchive bool `json:"disable-archive"` - - EnableJobTaggers bool `json:"enable-job-taggers"` - - // Validate json input against schema - Validate bool `json:"validate"` - - // For LDAP Authentication and user synchronisation. - LdapConfig *LdapConfig `json:"ldap"` - JwtConfig *JWTAuthConfig `json:"jwts"` - OpenIDConfig *OpenIDConfig `json:"oidc"` - - // If 0 or empty, the session does not expire! - SessionMaxAge string `json:"session-max-age"` - - // If both those options are not empty, use HTTPS using those certificates. - HttpsCertFile string `json:"https-cert-file"` - HttpsKeyFile string `json:"https-key-file"` - - // If not the empty string and `addr` does not end in ":80", - // redirect every request incoming at port 80 to that url. - RedirectHttpTo string `json:"redirect-http-to"` - - // If overwritten, at least all the options in the defaults below must - // be provided! Most options here can be overwritten by the user. - UiDefaults map[string]any `json:"ui-defaults"` - - // If exists, will enable dynamic zoom in frontend metric plots using the configured values - EnableResampling *ResampleConfig `json:"enable-resampling"` - - // Where to store MachineState files - MachineStateDir string `json:"machine-state-dir"` - - // If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. - StopJobsExceedingWalltime int `json:"stop-jobs-exceeding-walltime"` - - // Defines time X in seconds in which jobs are considered to be "short" and will be filtered in specific views. - ShortRunningJobsDuration int `json:"short-running-jobs-duration"` - - // Energy Mix CO2 Emission Constant [g/kWh] - // If entered, displays estimated CO2 emission for job based on jobs totalEnergy - EmissionConstant int `json:"emission-constant"` - - // Frequency of cron job workers - CronFrequency *CronFrequency `json:"cron-frequency"` - - // Array of Clusters - Clusters []*ClusterConfig `json:"clusters"` -} diff --git a/pkg/schema/float.go b/pkg/schema/float.go deleted file mode 100644 index e7d9857..0000000 --- a/pkg/schema/float.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "errors" - "io" - "math" - "strconv" - - "github.com/ClusterCockpit/cc-backend/pkg/log" -) - -// A custom float type is used so that (Un)MarshalJSON and -// (Un)MarshalGQL can be overloaded and NaN/null can be used. -// The default behaviour of putting every nullable value behind -// a pointer has a bigger overhead. -type Float float64 - -var NaN Float = Float(math.NaN()) -var nullAsBytes []byte = []byte("null") - -func (f Float) IsNaN() bool { - return math.IsNaN(float64(f)) -} - -// NaN will be serialized to `null`. -func (f Float) MarshalJSON() ([]byte, error) { - if f.IsNaN() { - return nullAsBytes, nil - } - - return strconv.AppendFloat(make([]byte, 0, 10), float64(f), 'f', 2, 64), nil -} - -// `null` will be unserialized to NaN. -func (f *Float) UnmarshalJSON(input []byte) error { - s := string(input) - if s == "null" { - *f = NaN - return nil - } - - val, err := strconv.ParseFloat(s, 64) - if err != nil { - log.Warn("Error while parsing custom float") - return err - } - *f = Float(val) - return nil -} - -// UnmarshalGQL implements the graphql.Unmarshaler interface. -func (f *Float) UnmarshalGQL(v interface{}) error { - f64, ok := v.(float64) - if !ok { - return errors.New("invalid Float scalar") - } - - *f = Float(f64) - return nil -} - -// MarshalGQL implements the graphql.Marshaler interface. -// NaN will be serialized to `null`. -func (f Float) MarshalGQL(w io.Writer) { - if f.IsNaN() { - w.Write(nullAsBytes) - } else { - w.Write(strconv.AppendFloat(make([]byte, 0, 10), float64(f), 'f', 2, 64)) - } -} - -// Only used via REST-API, not via GraphQL. -// This uses a lot less allocations per series, -// but it turns out that the performance increase -// from using this is not that big. -func (s *Series) MarshalJSON() ([]byte, error) { - buf := make([]byte, 0, 512+len(s.Data)*8) - buf = append(buf, `{"hostname":"`...) - buf = append(buf, s.Hostname...) - buf = append(buf, '"') - if s.Id != nil { - buf = append(buf, `,"id":"`...) - buf = append(buf, *s.Id...) - buf = append(buf, '"') - } - buf = append(buf, `,"statistics":{"min":`...) - buf = strconv.AppendFloat(buf, s.Statistics.Min, 'f', 2, 64) - buf = append(buf, `,"avg":`...) - buf = strconv.AppendFloat(buf, s.Statistics.Avg, 'f', 2, 64) - buf = append(buf, `,"max":`...) - buf = strconv.AppendFloat(buf, s.Statistics.Max, 'f', 2, 64) - buf = append(buf, '}') - buf = append(buf, `,"data":[`...) - for i := 0; i < len(s.Data); i++ { - if i != 0 { - buf = append(buf, ',') - } - - if s.Data[i].IsNaN() { - buf = append(buf, `null`...) - } else { - buf = strconv.AppendFloat(buf, float64(s.Data[i]), 'f', 2, 32) - } - } - buf = append(buf, ']', '}') - return buf, nil -} - -func ConvertFloatToFloat64(s []Float) []float64 { - fp := make([]float64, len(s)) - - for i, val := range s { - fp[i] = float64(val) - } - - return fp -} - -func GetFloat64ToFloat(s []float64) []Float { - fp := make([]Float, len(s)) - - for i, val := range s { - fp[i] = Float(val) - } - - return fp -} diff --git a/pkg/schema/job.go b/pkg/schema/job.go deleted file mode 100644 index 9ef51aa..0000000 --- a/pkg/schema/job.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "errors" - "fmt" - "io" -) - -// Job struct type -// -// This type contains all metadata of a HPC job. -// -// Job model -// @Description Information of a HPC job. - -type Job struct { - Cluster string `json:"cluster" db:"cluster" example:"fritz"` - SubCluster string `json:"subCluster" db:"subcluster" example:"main"` - Partition string `json:"partition,omitempty" db:"cluster_partition" example:"main"` - Project string `json:"project" db:"project" example:"abcd200"` - User string `json:"user" db:"hpc_user" example:"abcd100h"` - State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` - Tags []*Tag `json:"tags,omitempty"` - RawEnergyFootprint []byte `json:"-" db:"energy_footprint"` - RawFootprint []byte `json:"-" db:"footprint"` - RawMetaData []byte `json:"-" db:"meta_data"` - RawResources []byte `json:"-" db:"resources"` - Resources []*Resource `json:"resources"` - EnergyFootprint map[string]float64 `json:"energyFootprint"` - Footprint map[string]float64 `json:"footprint"` - MetaData map[string]string `json:"metaData"` - ConcurrentJobs JobLinkResultList `json:"concurrentJobs"` - Energy float64 `json:"energy" db:"energy"` - ArrayJobId int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` - Walltime int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` - RequestedMemory int64 `json:"requestedMemory,omitempty" db:"requested_memory" example:"128000" minimum:"1"` // in MB - JobID int64 `json:"jobId" db:"job_id" example:"123000"` - Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` - SMT int32 `json:"smt,omitempty" db:"smt" example:"4"` - MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` - Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` - NumAcc int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` - NumHWThreads int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` - NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` - Statistics map[string]JobStatistics `json:"statistics"` - ID *int64 `json:"id,omitempty" db:"id"` - StartTime int64 `json:"startTime" db:"start_time" example:"1649723812"` -} - -type JobLink struct { - ID int64 `json:"id"` - JobID int64 `json:"jobId"` -} - -type JobLinkResultList struct { - Items []*JobLink `json:"items"` - Count int `json:"count"` -} - -const ( - MonitoringStatusDisabled int32 = 0 - MonitoringStatusRunningOrArchiving int32 = 1 - MonitoringStatusArchivingFailed int32 = 2 - MonitoringStatusArchivingSuccessful int32 = 3 -) - -// var JobDefaults Job = Job{ -// Exclusive: 1, -// MonitoringStatus: MonitoringStatusRunningOrArchiving, -// } - -type Unit struct { - Base string `json:"base"` - Prefix string `json:"prefix,omitempty"` -} - -// JobStatistics model -// @Description Specification for job metric statistics. -type JobStatistics struct { - Unit Unit `json:"unit"` - Avg float64 `json:"avg" example:"2500" minimum:"0"` // Job metric average - Min float64 `json:"min" example:"2000" minimum:"0"` // Job metric minimum - Max float64 `json:"max" example:"3000" minimum:"0"` // Job metric maximum -} - -// Tag model -// @Description Defines a tag using name and type. -type Tag struct { - Type string `json:"type" db:"tag_type" example:"Debug"` - Name string `json:"name" db:"tag_name" example:"Testjob"` - Scope string `json:"scope" db:"tag_scope" example:"global"` - ID int64 `json:"id" db:"id"` -} - -// Resource model -// @Description A resource used by a job -type Resource struct { - Hostname string `json:"hostname"` - Configuration string `json:"configuration,omitempty"` - HWThreads []int `json:"hwthreads,omitempty"` - Accelerators []string `json:"accelerators,omitempty"` -} - -type JobState string - -const ( - JobStateRunning JobState = "running" - JobStateCompleted JobState = "completed" - JobStateFailed JobState = "failed" - JobStateCancelled JobState = "cancelled" - JobStateStopped JobState = "stopped" - JobStateTimeout JobState = "timeout" - JobStatePreempted JobState = "preempted" - JobStateOutOfMemory JobState = "out_of_memory" -) - -func (j Job) GoString() string { - return fmt.Sprintf("Job{ID:%d, StartTime:%d, JobID:%v, BaseJob:%v}", - j.ID, j.StartTime, j.JobID, j) -} - -func (e *JobState) UnmarshalGQL(v any) error { - str, ok := v.(string) - if !ok { - return fmt.Errorf("SCHEMA/JOB > enums must be strings") - } - - *e = JobState(str) - if !e.Valid() { - return errors.New("SCHEMA/JOB > invalid job state") - } - - return nil -} - -func (e JobState) MarshalGQL(w io.Writer) { - fmt.Fprintf(w, "\"%s\"", e) -} - -func (e JobState) Valid() bool { - return e == JobStateRunning || - e == JobStateCompleted || - e == JobStateFailed || - e == JobStateCancelled || - e == JobStateStopped || - e == JobStateTimeout || - e == JobStatePreempted || - e == JobStateOutOfMemory -} diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go deleted file mode 100644 index fbb85e4..0000000 --- a/pkg/schema/metrics.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "fmt" - "io" - "math" - "sort" - "unsafe" - - "github.com/ClusterCockpit/cc-backend/internal/util" -) - -type JobData map[string]map[MetricScope]*JobMetric -type ScopedJobStats map[string]map[MetricScope][]*ScopedStats - -type JobMetric struct { - StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"` - Unit Unit `json:"unit"` - Series []Series `json:"series"` - Timestep int `json:"timestep"` -} - -type Series struct { - Id *string `json:"id,omitempty"` - Hostname string `json:"hostname"` - Data []Float `json:"data"` - Statistics MetricStatistics `json:"statistics"` -} - -type ScopedStats struct { - Hostname string `json:"hostname"` - Id *string `json:"id,omitempty"` - Data *MetricStatistics `json:"data"` -} - -type MetricStatistics struct { - Avg float64 `json:"avg"` - Min float64 `json:"min"` - Max float64 `json:"max"` -} - -type StatsSeries struct { - Percentiles map[int][]Float `json:"percentiles,omitempty"` - Mean []Float `json:"mean"` - Median []Float `json:"median"` - Min []Float `json:"min"` - Max []Float `json:"max"` -} - -type MetricScope string - -const ( - MetricScopeInvalid MetricScope = "invalid_scope" - - MetricScopeNode MetricScope = "node" - MetricScopeSocket MetricScope = "socket" - MetricScopeMemoryDomain MetricScope = "memoryDomain" - MetricScopeCore MetricScope = "core" - MetricScopeHWThread MetricScope = "hwthread" - - MetricScopeAccelerator MetricScope = "accelerator" -) - -var metricScopeGranularity map[MetricScope]int = map[MetricScope]int{ - MetricScopeNode: 10, - MetricScopeSocket: 5, - MetricScopeMemoryDomain: 4, - MetricScopeCore: 3, - MetricScopeHWThread: 2, - /* Special-Case Accelerator - * -> No conversion possible if native scope is HWTHREAD - * -> Therefore needs to be less than HWTREAD, else max() would return unhandled case - * -> If nativeScope is accelerator, accelerator metrics return correctly - */ - MetricScopeAccelerator: 1, - - MetricScopeInvalid: -1, -} - -func (e *MetricScope) LT(other MetricScope) bool { - a := metricScopeGranularity[*e] - b := metricScopeGranularity[other] - return a < b -} - -func (e *MetricScope) LTE(other MetricScope) bool { - a := metricScopeGranularity[*e] - b := metricScopeGranularity[other] - return a <= b -} - -func (e *MetricScope) Max(other MetricScope) MetricScope { - a := metricScopeGranularity[*e] - b := metricScopeGranularity[other] - if a > b { - return *e - } - return other -} - -func (e *MetricScope) UnmarshalGQL(v interface{}) error { - str, ok := v.(string) - if !ok { - return fmt.Errorf("SCHEMA/METRICS > enums must be strings") - } - - *e = MetricScope(str) - if !e.Valid() { - return fmt.Errorf("SCHEMA/METRICS > %s is not a valid MetricScope", str) - } - return nil -} - -func (e MetricScope) MarshalGQL(w io.Writer) { - fmt.Fprintf(w, "\"%s\"", e) -} - -func (e MetricScope) Valid() bool { - gran, ok := metricScopeGranularity[e] - return ok && gran > 0 -} - -func (jd *JobData) Size() int { - n := 128 - for _, scopes := range *jd { - for _, metric := range scopes { - if metric.StatisticsSeries != nil { - n += len(metric.StatisticsSeries.Max) - n += len(metric.StatisticsSeries.Mean) - n += len(metric.StatisticsSeries.Median) - n += len(metric.StatisticsSeries.Min) - } - - for _, series := range metric.Series { - n += len(series.Data) - } - } - } - return n * int(unsafe.Sizeof(Float(0))) -} - -const smooth bool = false - -func (jm *JobMetric) AddStatisticsSeries() { - if jm.StatisticsSeries != nil || len(jm.Series) < 4 { - return - } - - n, m := 0, len(jm.Series[0].Data) - for _, series := range jm.Series { - if len(series.Data) > n { - n = len(series.Data) - } - if len(series.Data) < m { - m = len(series.Data) - } - } - - // mean := make([]Float, n) - min, median, max := make([]Float, n), make([]Float, n), make([]Float, n) - i := 0 - for ; i < m; i++ { - seriesCount := len(jm.Series) - // ssum := 0.0 - smin, smed, smax := math.MaxFloat32, make([]float64, seriesCount), -math.MaxFloat32 - notnan := 0 - for j := 0; j < seriesCount; j++ { - x := float64(jm.Series[j].Data[i]) - if math.IsNaN(x) { - continue - } - - notnan += 1 - // ssum += x - smed[j] = x - smin = math.Min(smin, x) - smax = math.Max(smax, x) - } - - if notnan < 3 { - min[i] = NaN - // mean[i] = NaN - median[i] = NaN - max[i] = NaN - } else { - min[i] = Float(smin) - // mean[i] = Float(ssum / float64(notnan)) - max[i] = Float(smax) - - medianRaw, err := util.Median(smed) - if err != nil { - median[i] = NaN - } else { - median[i] = Float(medianRaw) - } - } - } - - for ; i < n; i++ { - min[i] = NaN - // mean[i] = NaN - median[i] = NaN - max[i] = NaN - } - - if smooth { - for i := 2; i < len(median)-2; i++ { - if min[i].IsNaN() { - continue - } - - min[i] = (min[i-2] + min[i-1] + min[i] + min[i+1] + min[i+2]) / 5 - max[i] = (max[i-2] + max[i-1] + max[i] + max[i+1] + max[i+2]) / 5 - // mean[i] = (mean[i-2] + mean[i-1] + mean[i] + mean[i+1] + mean[i+2]) / 5 - // Reduce Median further - smoothRaw := []float64{float64(median[i-2]), float64(median[i-1]), float64(median[i]), float64(median[i+1]), float64(median[i+2])} - smoothMedian, err := util.Median(smoothRaw) - if err != nil { - median[i] = NaN - } else { - median[i] = Float(smoothMedian) - } - } - } - - jm.StatisticsSeries = &StatsSeries{Median: median, Min: min, Max: max} // Mean: mean -} - -func (jd *JobData) AddNodeScope(metric string) bool { - scopes, ok := (*jd)[metric] - if !ok { - return false - } - - maxScope := MetricScopeInvalid - for scope := range scopes { - maxScope = maxScope.Max(scope) - } - - if maxScope == MetricScopeInvalid || maxScope == MetricScopeNode { - return false - } - - jm := scopes[maxScope] - hosts := make(map[string][]Series, 32) - for _, series := range jm.Series { - hosts[series.Hostname] = append(hosts[series.Hostname], series) - } - - nodeJm := &JobMetric{ - Unit: jm.Unit, - Timestep: jm.Timestep, - Series: make([]Series, 0, len(hosts)), - } - for hostname, series := range hosts { - min, sum, max := math.MaxFloat32, 0.0, -math.MaxFloat32 - for _, series := range series { - sum += series.Statistics.Avg - min = math.Min(min, series.Statistics.Min) - max = math.Max(max, series.Statistics.Max) - } - - n, m := 0, len(jm.Series[0].Data) - for _, series := range jm.Series { - if len(series.Data) > n { - n = len(series.Data) - } - if len(series.Data) < m { - m = len(series.Data) - } - } - - i, data := 0, make([]Float, len(series[0].Data)) - for ; i < m; i++ { - x := Float(0.0) - for _, series := range jm.Series { - x += series.Data[i] - } - data[i] = x - } - - for ; i < n; i++ { - data[i] = NaN - } - - nodeJm.Series = append(nodeJm.Series, Series{ - Hostname: hostname, - Statistics: MetricStatistics{Min: min, Avg: sum / float64(len(series)), Max: max}, - Data: data, - }) - } - - scopes[MetricScopeNode] = nodeJm - return true -} - -func (jd *JobData) RoundMetricStats() { - // TODO: Make Digit-Precision Configurable? (Currently: Fixed to 2 Digits) - for _, scopes := range *jd { - for _, jm := range scopes { - for index := range jm.Series { - jm.Series[index].Statistics = MetricStatistics{ - Avg: (math.Round(jm.Series[index].Statistics.Avg*100) / 100), - Min: (math.Round(jm.Series[index].Statistics.Min*100) / 100), - Max: (math.Round(jm.Series[index].Statistics.Max*100) / 100), - } - } - } - } -} - -func (jm *JobMetric) AddPercentiles(ps []int) bool { - if jm.StatisticsSeries == nil { - jm.AddStatisticsSeries() - } - - if len(jm.Series) < 3 { - return false - } - - if jm.StatisticsSeries.Percentiles == nil { - jm.StatisticsSeries.Percentiles = make(map[int][]Float, len(ps)) - } - - n := 0 - for _, series := range jm.Series { - if len(series.Data) > n { - n = len(series.Data) - } - } - - data := make([][]float64, n) - for i := 0; i < n; i++ { - vals := make([]float64, 0, len(jm.Series)) - for _, series := range jm.Series { - if i < len(series.Data) { - vals = append(vals, float64(series.Data[i])) - } - } - - sort.Float64s(vals) - data[i] = vals - } - - for _, p := range ps { - if p < 1 || p > 99 { - panic("SCHEMA/METRICS > invalid percentile") - } - - if _, ok := jm.StatisticsSeries.Percentiles[p]; ok { - continue - } - - percentiles := make([]Float, n) - for i := 0; i < n; i++ { - sorted := data[i] - percentiles[i] = Float(sorted[(len(sorted)*p)/100]) - } - - jm.StatisticsSeries.Percentiles[p] = percentiles - } - - return true -} diff --git a/pkg/schema/node.go b/pkg/schema/node.go deleted file mode 100644 index 7fd4b70..0000000 --- a/pkg/schema/node.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -type NodeState string - -const ( - NodeStateAllocated NodeState = "allocated" - NodeStateReserved NodeState = "reserved" - NodeStateIdle NodeState = "idle" - NodeStateMixed NodeState = "mixed" - NodeStateDown NodeState = "down" - NodeStateUnknown NodeState = "unknown" -) - -type MonitoringState string - -const ( - MonitoringStateFull MonitoringState = "full" - MonitoringStatePartial MonitoringState = "partial" - MonitoringStateFailed MonitoringState = "failed" -) - -type Node struct { - ID int64 `json:"id" db:"id"` - Hostname string `json:"hostname" db:"hostname" example:"fritz"` - Cluster string `json:"cluster" db:"cluster" example:"fritz"` - SubCluster string `json:"subCluster" db:"subcluster" example:"main"` - NodeState NodeState `json:"nodeState" db:"node_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` - HealthState MonitoringState `json:"healthState" db:"health_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` - CpusAllocated int `json:"cpusAllocated" db:"cpus_allocated"` - CpusTotal int `json:"cpusTotal" db:"cpus_total"` - MemoryAllocated int `json:"memoryAllocated" db:"memory_allocated"` - MemoryTotal int `json:"memoryTotal" db:"memory_total"` - GpusAllocated int `json:"gpusAllocated" db:"gpus_allocated"` - GpusTotal int `json:"gpusTotal" db:"gpus_total"` - RawMetaData []byte `json:"-" db:"meta_data"` - MetaData map[string]string `json:"metaData"` -} diff --git a/pkg/schema/schemas/cluster.schema.json b/pkg/schema/schemas/cluster.schema.json deleted file mode 100644 index c60c100..0000000 --- a/pkg/schema/schemas/cluster.schema.json +++ /dev/null @@ -1,339 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://cluster.schema.json", - "title": "HPC cluster description", - "description": "Meta data information of a HPC cluster", - "type": "object", - "properties": { - "name": { - "description": "The unique identifier of a cluster", - "type": "string" - }, - "metricConfig": { - "description": "Metric specifications", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "description": "Metric name", - "type": "string" - }, - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "scope": { - "description": "Native measurement resolution", - "type": "string", - "enum": [ - "node", - "socket", - "memoryDomain", - "core", - "hwthread", - "accelerator" - ] - }, - "timestep": { - "description": "Frequency of timeseries points in seconds", - "type": "integer" - }, - "aggregation": { - "description": "How the metric is aggregated", - "type": "string", - "enum": [ - "sum", - "avg" - ] - }, - "footprint": { - "description": "Is it a footprint metric and what type", - "type": "string", - "enum": [ - "avg", - "max", - "min" - ] - }, - "energy": { - "description": "Is it used to calculate job energy", - "type": "string", - "enum": [ - "power", - "energy" - ] - }, - "lowerIsBetter": { - "description": "Is lower better.", - "type": "boolean" - }, - "peak": { - "description": "Metric peak threshold (Upper metric limit)", - "type": "number" - }, - "normal": { - "description": "Metric normal threshold", - "type": "number" - }, - "caution": { - "description": "Metric caution threshold (Suspicious but does not require immediate action)", - "type": "number" - }, - "alert": { - "description": "Metric alert threshold (Requires immediate action)", - "type": "number" - }, - "subClusters": { - "description": "Array of cluster hardware partition metric thresholds", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "description": "Hardware partition name", - "type": "string" - }, - "footprint": { - "description": "Is it a footprint metric and what type. Overwrite global setting", - "type": "string", - "enum": [ - "avg", - "max", - "min" - ] - }, - "energy": { - "description": "Is it used to calculate job energy. Overwrite global", - "type": "string", - "enum": [ - "power", - "energy" - ] - }, - "lowerIsBetter": { - "description": "Is lower better. Overwrite global", - "type": "boolean" - }, - "peak": { - "description": "The maximum possible metric value", - "type": "number" - }, - "normal": { - "description": "A common metric value level", - "type": "number" - }, - "caution": { - "description": "Metric value requires attention", - "type": "number" - }, - "alert": { - "description": "Metric value requiring immediate attention", - "type": "number" - }, - "remove": { - "description": "Remove this metric for this subcluster", - "type": "boolean" - } - }, - "required": [ - "name" - ] - } - } - }, - "required": [ - "name", - "unit", - "scope", - "timestep", - "aggregation", - "peak", - "normal", - "caution", - "alert" - ] - }, - "minItems": 1 - }, - "subClusters": { - "description": "Array of cluster hardware partitions", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "description": "Hardware partition name", - "type": "string" - }, - "processorType": { - "description": "Processor type", - "type": "string" - }, - "socketsPerNode": { - "description": "Number of sockets per node", - "type": "integer" - }, - "coresPerSocket": { - "description": "Number of cores per socket", - "type": "integer" - }, - "threadsPerCore": { - "description": "Number of SMT threads per core", - "type": "integer" - }, - "flopRateScalar": { - "description": "Theoretical node peak flop rate for scalar code in GFlops/s", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "value": { - "type": "number" - } - } - }, - "flopRateSimd": { - "description": "Theoretical node peak flop rate for SIMD code in GFlops/s", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "value": { - "type": "number" - } - } - }, - "memoryBandwidth": { - "description": "Theoretical node peak memory bandwidth in GB/s", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "value": { - "type": "number" - } - } - }, - "nodes": { - "description": "Node list expression", - "type": "string" - }, - "topology": { - "description": "Node topology", - "type": "object", - "properties": { - "node": { - "description": "HwTread lists of node", - "type": "array", - "items": { - "type": "integer" - } - }, - "socket": { - "description": "HwTread lists of sockets", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } - }, - "memoryDomain": { - "description": "HwTread lists of memory domains", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } - }, - "die": { - "description": "HwTread lists of dies", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } - }, - "core": { - "description": "HwTread lists of cores", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } - }, - "accelerators": { - "type": "array", - "description": "List of of accelerator devices", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The unique device id" - }, - "type": { - "type": "string", - "description": "The accelerator type", - "enum": [ - "Nvidia GPU", - "AMD GPU", - "Intel GPU" - ] - }, - "model": { - "type": "string", - "description": "The accelerator model" - } - }, - "required": [ - "id", - "type", - "model" - ] - } - } - }, - "required": [ - "node", - "socket", - "memoryDomain" - ] - } - }, - "required": [ - "name", - "nodes", - "topology", - "processorType", - "socketsPerNode", - "coresPerSocket", - "threadsPerCore", - "flopRateScalar", - "flopRateSimd", - "memoryBandwidth" - ] - }, - "minItems": 1 - } - }, - "required": [ - "name", - "metricConfig", - "subClusters" - ] -} diff --git a/pkg/schema/schemas/config.schema.json b/pkg/schema/schemas/config.schema.json deleted file mode 100644 index 2d22d6f..0000000 --- a/pkg/schema/schemas/config.schema.json +++ /dev/null @@ -1,446 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://config.schema.json", - "title": "cc-backend configuration file schema", - "type": "object", - "properties": { - "addr": { - "description": "Address where the http (or https) server will listen on (for example: 'localhost:80').", - "type": "string" - }, - "apiAllowedIPs": { - "description": "Addresses from which secured API endpoints can be reached", - "type": "array", - "items": { - "type": "string" - } - }, - "user": { - "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.", - "type": "string" - }, - "group": { - "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.", - "type": "string" - }, - "disable-authentication": { - "description": "Disable authentication (for everything: API, Web-UI, ...).", - "type": "boolean" - }, - "embed-static-files": { - "description": "If all files in `web/frontend/public` should be served from within the binary itself (they are embedded) or not.", - "type": "boolean" - }, - "static-files": { - "description": "Folder where static assets can be found, if embed-static-files is false.", - "type": "string" - }, - "db-driver": { - "description": "sqlite3 or mysql (mysql will work for mariadb as well).", - "type": "string", - "enum": ["sqlite3", "mysql"] - }, - "db": { - "description": "For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).", - "type": "string" - }, - "archive": { - "description": "Configuration keys for job-archive", - "type": "object", - "properties": { - "kind": { - "description": "Backend type for job-archive", - "type": "string", - "enum": ["file", "s3"] - }, - "path": { - "description": "Path to job archive for file backend", - "type": "string" - }, - "compression": { - "description": "Setup automatic compression for jobs older than number of days", - "type": "integer" - }, - "retention": { - "description": "Configuration keys for retention", - "type": "object", - "properties": { - "policy": { - "description": "Retention policy", - "type": "string", - "enum": ["none", "delete", "move"] - }, - "includeDB": { - "description": "Also remove jobs from database", - "type": "boolean" - }, - "age": { - "description": "Act on jobs with startTime older than age (in days)", - "type": "integer" - }, - "location": { - "description": "The target directory for retention. Only applicable for retention move.", - "type": "string" - } - }, - "required": ["policy"] - } - }, - "required": ["kind"] - }, - "disable-archive": { - "description": "Keep all metric data in the metric data repositories, do not write to the job-archive.", - "type": "boolean" - }, - "enable-job-taggers": { - "description": "Turn on automatic application and jobclass taggers", - "type": "boolean" - }, - "validate": { - "description": "Validate all input json documents against json schema.", - "type": "boolean" - }, - "session-max-age": { - "description": "Specifies for how long a session shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire!", - "type": "string" - }, - "https-cert-file": { - "description": "Filepath to SSL certificate. If also https-key-file is set use HTTPS using those certificates.", - "type": "string" - }, - "https-key-file": { - "description": "Filepath to SSL key file. If also https-cert-file is set use HTTPS using those certificates.", - "type": "string" - }, - "redirect-http-to": { - "description": "If not the empty string and addr does not end in :80, redirect every request incoming at port 80 to that url.", - "type": "string" - }, - "stop-jobs-exceeding-walltime": { - "description": "If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job.", - "type": "integer" - }, - "short-running-jobs-duration": { - "description": "Do not show running jobs shorter than X seconds.", - "type": "integer" - }, - "emission-constant": { - "description": ".", - "type": "integer" - }, - "cron-frequency": { - "description": "Frequency of cron job workers.", - "type": "object", - "properties": { - "duration-worker": { - "description": "Duration Update Worker [Defaults to '5m']", - "type": "string" - }, - "footprint-worker": { - "description": "Metric-Footprint Update Worker [Defaults to '10m']", - "type": "string" - } - } - }, - "enable-resampling": { - "description": "Enable dynamic zoom in frontend metric plots.", - "type": "object", - "properties": { - "trigger": { - "description": "Trigger next zoom level at less than this many visible datapoints.", - "type": "integer" - }, - "resolutions": { - "description": "Array of resampling target resolutions, in seconds.", - "type": "array", - "items": { - "type": "integer" - } - } - }, - "required": ["trigger", "resolutions"] - }, - "jwts": { - "description": "For JWT token authentication.", - "type": "object", - "properties": { - "max-age": { - "description": "Configure how long a token is valid. As string parsable by time.ParseDuration()", - "type": "string" - }, - "cookieName": { - "description": "Cookie that should be checked for a JWT token.", - "type": "string" - }, - "validateUser": { - "description": "Deny login for users not in database (but defined in JWT). Overwrite roles in JWT with database roles.", - "type": "boolean" - }, - "trustedIssuer": { - "description": "Issuer that should be accepted when validating external JWTs ", - "type": "string" - }, - "syncUserOnLogin": { - "description": "Add non-existent user to DB at login attempt with values provided in JWT.", - "type": "boolean" - } - }, - "required": ["max-age"] - }, - "oidc": { - "provider": { - "description": "", - "type": "string" - }, - "syncUserOnLogin": { - "description": "", - "type": "boolean" - }, - "updateUserOnLogin": { - "description": "", - "type": "boolean" - }, - "required": ["provider"] - }, - "ldap": { - "description": "For LDAP Authentication and user synchronisation.", - "type": "object", - "properties": { - "url": { - "description": "URL of LDAP directory server.", - "type": "string" - }, - "user_base": { - "description": "Base DN of user tree root.", - "type": "string" - }, - "search_dn": { - "description": "DN for authenticating LDAP admin account with general read rights.", - "type": "string" - }, - "user_bind": { - "description": "Expression used to authenticate users via LDAP bind. Must contain uid={username}.", - "type": "string" - }, - "user_filter": { - "description": "Filter to extract users for syncing.", - "type": "string" - }, - "username_attr": { - "description": "Attribute with full username. Default: gecos", - "type": "string" - }, - "sync_interval": { - "description": "Interval used for syncing local user table with LDAP directory. Parsed using time.ParseDuration.", - "type": "string" - }, - "sync_del_old_users": { - "description": "Delete obsolete users in database.", - "type": "boolean" - }, - "syncUserOnLogin": { - "description": "Add non-existent user to DB at login attempt if user exists in Ldap directory", - "type": "boolean" - } - }, - "required": ["url", "user_base", "search_dn", "user_bind", "user_filter"] - }, - "clusters": { - "description": "Configuration for the clusters to be displayed.", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "description": "The name of the cluster.", - "type": "string" - }, - "metricDataRepository": { - "description": "Type of the metric data repository for this cluster", - "type": "object", - "properties": { - "kind": { - "type": "string", - "enum": ["influxdb", "prometheus", "cc-metric-store", "test"] - }, - "url": { - "type": "string" - }, - "token": { - "type": "string" - } - }, - "required": ["kind", "url"] - }, - "filterRanges": { - "description": "This option controls the slider ranges for the UI controls of numNodes, duration, and startTime.", - "type": "object", - "properties": { - "numNodes": { - "description": "UI slider range for number of nodes", - "type": "object", - "properties": { - "from": { - "type": "integer" - }, - "to": { - "type": "integer" - } - }, - "required": ["from", "to"] - }, - "duration": { - "description": "UI slider range for duration", - "type": "object", - "properties": { - "from": { - "type": "integer" - }, - "to": { - "type": "integer" - } - }, - "required": ["from", "to"] - }, - "startTime": { - "description": "UI slider range for start time", - "type": "object", - "properties": { - "from": { - "type": "string", - "format": "date-time" - }, - "to": { - "type": "null" - } - }, - "required": ["from", "to"] - } - }, - "required": ["numNodes", "duration", "startTime"] - } - }, - "required": ["name", "metricDataRepository", "filterRanges"], - "minItems": 1 - } - }, - "ui-defaults": { - "description": "Default configuration for web UI", - "type": "object", - "properties": { - "plot_general_colorBackground": { - "description": "Color plot background according to job average threshold limits", - "type": "boolean" - }, - "plot_general_lineWidth": { - "description": "Initial linewidth", - "type": "integer" - }, - "plot_list_jobsPerPage": { - "description": "Jobs shown per page in job lists", - "type": "integer" - }, - "plot_view_plotsPerRow": { - "description": "Number of plots per row in single job view", - "type": "integer" - }, - "plot_view_showPolarplot": { - "description": "Option to toggle polar plot in single job view", - "type": "boolean" - }, - "plot_view_showRoofline": { - "description": "Option to toggle roofline plot in single job view", - "type": "boolean" - }, - "plot_view_showStatTable": { - "description": "Option to toggle the node statistic table in single job view", - "type": "boolean" - }, - "system_view_selectedMetric": { - "description": "Initial metric shown in system view", - "type": "string" - }, - "job_view_showFootprint": { - "description": "Option to toggle footprint ui in single job view", - "type": "boolean" - }, - "job_list_usePaging": { - "description": "Option to switch from continous scroll to paging", - "type": "boolean" - }, - "analysis_view_histogramMetrics": { - "description": "Metrics to show as job count histograms in analysis view", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "analysis_view_scatterPlotMetrics": { - "description": "Initial scatter plto configuration in analysis view", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string", - "minItems": 2, - "maxItems": 2 - }, - "minItems": 1 - } - }, - "job_view_nodestats_selectedMetrics": { - "description": "Initial metrics shown in node statistics table of single job view", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "job_view_selectedMetrics": { - "description": "Initial metrics shown as plots in single job view", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "plot_general_colorscheme": { - "description": "Initial color scheme", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "plot_list_selectedMetrics": { - "description": "Initial metric plots shown in jobs lists", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - } - }, - "required": [ - "plot_general_colorBackground", - "plot_general_lineWidth", - "plot_list_jobsPerPage", - "plot_view_plotsPerRow", - "plot_view_showPolarplot", - "plot_view_showRoofline", - "plot_view_showStatTable", - "system_view_selectedMetric", - "job_view_showFootprint", - "job_list_usePaging", - "analysis_view_histogramMetrics", - "analysis_view_scatterPlotMetrics", - "job_view_nodestats_selectedMetrics", - "job_view_selectedMetrics", - "plot_general_colorscheme", - "plot_list_selectedMetrics" - ] - } - }, - "required": ["jwts", "clusters", "apiAllowedIPs"] -} diff --git a/pkg/schema/schemas/job-data.schema.json b/pkg/schema/schemas/job-data.schema.json deleted file mode 100644 index c0c492b..0000000 --- a/pkg/schema/schemas/job-data.schema.json +++ /dev/null @@ -1,490 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://job-data.schema.json", - "title": "Job metric data list", - "description": "Collection of metric data of a HPC job", - "type": "object", - "properties": { - "mem_used": { - "description": "Memory capacity used", - "type": "object", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "flops_any": { - "description": "Total flop rate with DP flops scaled up", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "mem_bw": { - "description": "Main memory bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "net_bw": { - "description": "Total fast interconnect network bandwidth", - "type": "object", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "ipc": { - "description": "Instructions executed per cycle", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "cpu_user": { - "description": "CPU user active core utilization", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "cpu_load": { - "description": "CPU requested core utilization (load 1m)", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "flops_dp": { - "description": "Double precision flop rate", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "flops_sp": { - "description": "Single precision flops rate", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "vectorization_ratio": { - "description": "Fraction of arithmetic instructions using SIMD instructions", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "cpu_power": { - "description": "CPU power consumption", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "mem_power": { - "description": "Memory power consumption", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "acc_utilization": { - "description": "GPU utilization", - "properties": { - "accelerator": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "accelerator" - ] - }, - "acc_mem_used": { - "description": "GPU memory capacity used", - "properties": { - "accelerator": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "accelerator" - ] - }, - "acc_power": { - "description": "GPU power consumption", - "properties": { - "accelerator": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "accelerator" - ] - }, - "clock": { - "description": "Average core frequency", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "eth_read_bw": { - "description": "Ethernet read bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "eth_write_bw": { - "description": "Ethernet write bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "filesystems": { - "description": "Array of filesystems", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "nfs", - "lustre", - "gpfs", - "nvme", - "ssd", - "hdd", - "beegfs" - ] - }, - "read_bw": { - "description": "File system read bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "write_bw": { - "description": "File system write bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "read_req": { - "description": "File system read requests", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "write_req": { - "description": "File system write requests", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "inodes": { - "description": "File system write requests", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "accesses": { - "description": "File system open and close", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "fsync": { - "description": "File system fsync", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "create": { - "description": "File system create", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "open": { - "description": "File system open", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "close": { - "description": "File system close", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "seek": { - "description": "File system seek", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - } - }, - "required": [ - "name", - "type", - "read_bw", - "write_bw" - ] - }, - "minItems": 1 - } - }, - "ic_rcv_packets": { - "description": "Network interconnect read packets", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "ic_send_packets": { - "description": "Network interconnect send packet", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "ic_read_bw": { - "description": "Network interconnect read bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "ic_write_bw": { - "description": "Network interconnect write bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "required": [ - "cpu_user", - "cpu_load", - "mem_used", - "flops_any", - "mem_bw", - "net_bw", - "filesystems" - ] -} diff --git a/pkg/schema/schemas/job-meta.schema.json b/pkg/schema/schemas/job-meta.schema.json deleted file mode 100644 index db7475c..0000000 --- a/pkg/schema/schemas/job-meta.schema.json +++ /dev/null @@ -1,351 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://job-meta.schema.json", - "title": "Job meta data", - "description": "Meta data information of a HPC job", - "type": "object", - "properties": { - "jobId": { - "description": "The unique identifier of a job", - "type": "integer" - }, - "user": { - "description": "The unique identifier of a user", - "type": "string" - }, - "project": { - "description": "The unique identifier of a project", - "type": "string" - }, - "cluster": { - "description": "The unique identifier of a cluster", - "type": "string" - }, - "subCluster": { - "description": "The unique identifier of a sub cluster", - "type": "string" - }, - "partition": { - "description": "The Slurm partition to which the job was submitted", - "type": "string" - }, - "arrayJobId": { - "description": "The unique identifier of an array job", - "type": "integer" - }, - "numNodes": { - "description": "Number of nodes used", - "type": "integer", - "exclusiveMinimum": 0 - }, - "numHwthreads": { - "description": "Number of HWThreads used", - "type": "integer", - "exclusiveMinimum": 0 - }, - "numAcc": { - "description": "Number of accelerators used", - "type": "integer", - "exclusiveMinimum": 0 - }, - "exclusive": { - "description": "Specifies how nodes are shared. 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive, 2 - Shared among multiple jobs of same user", - "type": "integer", - "minimum": 0, - "maximum": 2 - }, - "monitoringStatus": { - "description": "State of monitoring system during job run", - "type": "integer" - }, - "smt": { - "description": "SMT threads used by job", - "type": "integer" - }, - "walltime": { - "description": "Requested walltime of job in seconds", - "type": "integer", - "exclusiveMinimum": 0 - }, - "jobState": { - "description": "Final state of job", - "type": "string", - "enum": [ - "completed", - "failed", - "cancelled", - "stopped", - "out_of_memory", - "timeout" - ] - }, - "startTime": { - "description": "Start epoch time stamp in seconds", - "type": "integer", - "exclusiveMinimum": 0 - }, - "duration": { - "description": "Duration of job in seconds", - "type": "integer", - "exclusiveMinimum": 0 - }, - "resources": { - "description": "Resources used by job", - "type": "array", - "items": { - "type": "object", - "properties": { - "hostname": { - "type": "string" - }, - "hwthreads": { - "type": "array", - "description": "List of OS processor ids", - "items": { - "type": "integer" - } - }, - "accelerators": { - "type": "array", - "description": "List of of accelerator device ids", - "items": { - "type": "string" - } - }, - "configuration": { - "type": "string", - "description": "The configuration options of the node" - } - }, - "required": [ - "hostname" - ], - "minItems": 1 - } - }, - "metaData": { - "description": "Additional information about the job", - "type": "object", - "properties": { - "jobScript": { - "type": "string", - "description": "The batch script of the job" - }, - "jobName": { - "type": "string", - "description": "Slurm Job name" - }, - "slurmInfo": { - "type": "string", - "description": "Additional slurm infos as show by scontrol show job" - } - } - }, - "tags": { - "description": "List of tags", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string" - } - }, - "required": [ - "name", - "type" - ] - }, - "uniqueItems": true - }, - "statistics": { - "description": "Job statistic data", - "type": "object", - "properties": { - "mem_used": { - "description": "Memory capacity used (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "cpu_load": { - "description": "CPU requested core utilization (load 1m) (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "flops_any": { - "description": "Total flop rate with DP flops scaled up (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "mem_bw": { - "description": "Main memory bandwidth (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "net_bw": { - "description": "Total fast interconnect network bandwidth (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "file_bw": { - "description": "Total file IO bandwidth (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ipc": { - "description": "Instructions executed per cycle", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "cpu_user": { - "description": "CPU user active core utilization", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "flops_dp": { - "description": "Double precision flop rate", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "flops_sp": { - "description": "Single precision flops rate", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "rapl_power": { - "description": "CPU power consumption", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "acc_used": { - "description": "GPU utilization", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "acc_mem_used": { - "description": "GPU memory capacity used", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "acc_power": { - "description": "GPU power consumption", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "clock": { - "description": "Average core frequency", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "eth_read_bw": { - "description": "Ethernet read bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "eth_write_bw": { - "description": "Ethernet write bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ic_rcv_packets": { - "description": "Network interconnect read packets", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ic_send_packets": { - "description": "Network interconnect send packet", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ic_read_bw": { - "description": "Network interconnect read bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ic_write_bw": { - "description": "Network interconnect write bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "filesystems": { - "description": "Array of filesystems", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "nfs", - "lustre", - "gpfs", - "nvme", - "ssd", - "hdd", - "beegfs" - ] - }, - "read_bw": { - "description": "File system read bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "write_bw": { - "description": "File system write bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "read_req": { - "description": "File system read requests", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "write_req": { - "description": "File system write requests", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "inodes": { - "description": "File system write requests", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "accesses": { - "description": "File system open and close", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "fsync": { - "description": "File system fsync", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "create": { - "description": "File system create", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "open": { - "description": "File system open", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "close": { - "description": "File system close", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "seek": { - "description": "File system seek", - "$ref": "embedfs://job-metric-statistics.schema.json" - } - }, - "required": [ - "name", - "type", - "read_bw", - "write_bw" - ] - }, - "minItems": 1 - } - }, - "required": [ - "cpu_user", - "cpu_load", - "mem_used", - "flops_any", - "mem_bw" - ] - } - }, - "required": [ - "jobId", - "user", - "project", - "cluster", - "subCluster", - "numNodes", - "exclusive", - "startTime", - "jobState", - "duration", - "resources", - "statistics" - ] -} diff --git a/pkg/schema/schemas/job-metric-data.schema.json b/pkg/schema/schemas/job-metric-data.schema.json deleted file mode 100644 index ad499bf..0000000 --- a/pkg/schema/schemas/job-metric-data.schema.json +++ /dev/null @@ -1,216 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://job-metric-data.schema.json", - "title": "Job metric data", - "description": "Metric data of a HPC job", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "timestep": { - "description": "Measurement interval in seconds", - "type": "integer" - }, - "thresholds": { - "description": "Metric thresholds for specific system", - "type": "object", - "properties": { - "peak": { - "type": "number" - }, - "normal": { - "type": "number" - }, - "caution": { - "type": "number" - }, - "alert": { - "type": "number" - } - } - }, - "statisticsSeries": { - "type": "object", - "description": "Statistics series across topology", - "properties": { - "min": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "max": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "mean": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "percentiles": { - "type": "object", - "properties": { - "10": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "20": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "30": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "40": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "50": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "60": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "70": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "80": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "90": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "25": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "75": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - } - } - } - } - }, - "series": { - "type": "array", - "items": { - "type": "object", - "properties": { - "hostname": { - "type": "string" - }, - "id": { - "type": "string" - }, - "statistics": { - "type": "object", - "description": "Statistics across time dimension", - "properties": { - "avg": { - "description": "Series average", - "type": "number", - "minimum": 0 - }, - "min": { - "description": "Series minimum", - "type": "number", - "minimum": 0 - }, - "max": { - "description": "Series maximum", - "type": "number", - "minimum": 0 - } - }, - "required": [ - "avg", - "min", - "max" - ] - }, - "data": { - "type": "array", - "contains": { - "type": "number", - "minimum": 0 - }, - "minItems": 1 - } - }, - "required": [ - "hostname", - "statistics", - "data" - ] - } - } - }, - "required": [ - "unit", - "timestep", - "series" - ] -} diff --git a/pkg/schema/schemas/job-metric-statistics.schema.json b/pkg/schema/schemas/job-metric-statistics.schema.json deleted file mode 100644 index f753ed3..0000000 --- a/pkg/schema/schemas/job-metric-statistics.schema.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://job-metric-statistics.schema.json", - "title": "Job statistics", - "description": "Format specification for job metric statistics", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "avg": { - "description": "Job metric average", - "type": "number", - "minimum": 0 - }, - "min": { - "description": "Job metric minimum", - "type": "number", - "minimum": 0 - }, - "max": { - "description": "Job metric maximum", - "type": "number", - "minimum": 0 - } - }, - "required": [ - "unit", - "avg", - "min", - "max" - ] -} diff --git a/pkg/schema/schemas/unit.schema.json b/pkg/schema/schemas/unit.schema.json deleted file mode 100644 index a8a2b4d..0000000 --- a/pkg/schema/schemas/unit.schema.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://unit.schema.json", - "title": "Metric unit", - "description": "Format specification for job metric units", - "type": "object", - "properties": { - "base": { - "description": "Metric base unit", - "type": "string", - "enum": [ - "B", - "F", - "B/s", - "F/s", - "CPI", - "IPC", - "Hz", - "W", - "J", - "°C", - "" - ] - }, - "prefix": { - "description": "Unit prefix", - "type": "string", - "enum": [ - "K", - "M", - "G", - "T", - "P", - "E" - ] - } - }, - "required": [ - "base" - ] -} diff --git a/pkg/schema/user.go b/pkg/schema/user.go deleted file mode 100644 index 2fff453..0000000 --- a/pkg/schema/user.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "fmt" - "slices" - "strings" -) - -type Role int - -const ( - RoleAnonymous Role = iota - RoleApi - RoleUser - RoleManager - RoleSupport - RoleAdmin - RoleError -) - -type AuthSource int - -const ( - AuthViaLocalPassword AuthSource = iota - AuthViaLDAP - AuthViaToken - AuthViaOIDC - AuthViaAll -) - -type AuthType int - -const ( - AuthToken AuthType = iota - AuthSession -) - -type User struct { - Username string `json:"username"` - Password string `json:"-"` - Name string `json:"name"` - Email string `json:"email"` - Roles []string `json:"roles"` - Projects []string `json:"projects"` - AuthType AuthType `json:"authType"` - AuthSource AuthSource `json:"authSource"` -} - -func (u *User) HasProject(project string) bool { - return slices.Contains(u.Projects, project) -} - -func GetRoleString(roleInt Role) string { - return [6]string{"anonymous", "api", "user", "manager", "support", "admin"}[roleInt] -} - -func getRoleEnum(roleStr string) Role { - switch strings.ToLower(roleStr) { - case "admin": - return RoleAdmin - case "support": - return RoleSupport - case "manager": - return RoleManager - case "user": - return RoleUser - case "api": - return RoleApi - case "anonymous": - return RoleAnonymous - default: - return RoleError - } -} - -func IsValidRole(role string) bool { - return getRoleEnum(role) != RoleError -} - -// Check if User has SPECIFIED role AND role is VALID -func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) { - if IsValidRole(role) { - for _, r := range u.Roles { - if r == role { - return true, true - } - } - return false, true - } - return false, false -} - -// Check if User has SPECIFIED role -func (u *User) HasRole(role Role) bool { - for _, r := range u.Roles { - if r == GetRoleString(role) { - return true - } - } - return false -} - -// Check if User has ANY of the listed roles -func (u *User) HasAnyRole(queryroles []Role) bool { - for _, ur := range u.Roles { - for _, qr := range queryroles { - if ur == GetRoleString(qr) { - return true - } - } - } - return false -} - -// Check if User has ALL of the listed roles -func (u *User) HasAllRoles(queryroles []Role) bool { - target := len(queryroles) - matches := 0 - for _, ur := range u.Roles { - for _, qr := range queryroles { - if ur == GetRoleString(qr) { - matches += 1 - break - } - } - } - - if matches == target { - return true - } else { - return false - } -} - -// Check if User has NONE of the listed roles -func (u *User) HasNotRoles(queryroles []Role) bool { - matches := 0 - for _, ur := range u.Roles { - for _, qr := range queryroles { - if ur == GetRoleString(qr) { - matches += 1 - break - } - } - } - - if matches == 0 { - return true - } else { - return false - } -} - -// Called by API endpoint '/roles/' from frontend: Only required for admin config -> Check Admin Role -func GetValidRoles(user *User) ([]string, error) { - var vals []string - if user.HasRole(RoleAdmin) { - for i := RoleApi; i < RoleError; i++ { - vals = append(vals, GetRoleString(i)) - } - return vals, nil - } - - return vals, fmt.Errorf("%s: only admins are allowed to fetch a list of roles", user.Username) -} - -// Called by routerConfig web.page setup in backend: Only requires known user -func GetValidRolesMap(user *User) (map[string]Role, error) { - named := make(map[string]Role) - if user.HasNotRoles([]Role{RoleAnonymous}) { - for i := RoleApi; i < RoleError; i++ { - named[GetRoleString(i)] = i - } - return named, nil - } - return named, fmt.Errorf("only known users are allowed to fetch a list of roles") -} - -// Find highest role -func (u *User) GetAuthLevel() Role { - if u.HasRole(RoleAdmin) { - return RoleAdmin - } else if u.HasRole(RoleSupport) { - return RoleSupport - } else if u.HasRole(RoleManager) { - return RoleManager - } else if u.HasRole(RoleUser) { - return RoleUser - } else if u.HasRole(RoleApi) { - return RoleApi - } else if u.HasRole(RoleAnonymous) { - return RoleAnonymous - } else { - return RoleError - } -} diff --git a/pkg/schema/user_test.go b/pkg/schema/user_test.go deleted file mode 100644 index ce3ab3b..0000000 --- a/pkg/schema/user_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "testing" -) - -func TestHasValidRole(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user"}} - - exists, _ := u.HasValidRole("user") - - if !exists { - t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("user"): EXISTS = %v, expected 'true'.`, exists) - } -} - -func TestHasNotValidRole(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user"}} - - exists, _ := u.HasValidRole("manager") - - if exists { - t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("manager"): EXISTS = %v, expected 'false'.`, exists) - } -} - -func TestHasInvalidRole(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user"}} - - _, valid := u.HasValidRole("invalid") - - if valid { - t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("invalid"): VALID = %v, expected 'false'.`, valid) - } -} - -func TestHasNotInvalidRole(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user"}} - - _, valid := u.HasValidRole("user") - - if !valid { - t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("user"): VALID = %v, expected 'true'.`, valid) - } -} - -func TestHasRole(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user"}} - - exists := u.HasRole(RoleUser) - - if !exists { - t.Fatalf(`User{Roles: ["user"]} -> HasRole(RoleUser): EXISTS = %v, expected 'true'.`, exists) - } -} - -func TestHasNotRole(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user"}} - - exists := u.HasRole(RoleManager) - - if exists { - t.Fatalf(`User{Roles: ["user"]} -> HasRole(RoleManager): EXISTS = %v, expected 'false'.`, exists) - } -} - -func TestHasAnyRole(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user", "manager"}} - - result := u.HasAnyRole([]Role{RoleManager, RoleSupport, RoleAdmin}) - - if !result { - t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAnyRole([]Role{RoleManager, RoleSupport, RoleAdmin}): RESULT = %v, expected 'true'.`, result) - } -} - -func TestHasNotAnyRole(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user", "manager"}} - - result := u.HasAnyRole([]Role{RoleSupport, RoleAdmin}) - - if result { - t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAllRoles([]Role{RoleSupport, RoleAdmin}): RESULT = %v, expected 'false'.`, result) - } -} - -func TestHasAllRoles(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user", "manager", "support"}} - - result := u.HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport}) - - if !result { - t.Fatalf(`User{Roles: ["user", "manager", "support"]} -> HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport}): RESULT = %v, expected 'true'.`, result) - } -} - -func TestHasNotAllRoles(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user", "manager"}} - - result := u.HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport}) - - if result { - t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport}): RESULT = %v, expected 'false'.`, result) - } -} - -func TestHasNotRoles(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user", "manager"}} - - result := u.HasNotRoles([]Role{RoleSupport, RoleAdmin}) - - if !result { - t.Fatalf(`User{Roles: ["user", "manager"]} -> HasNotRoles([]Role{RoleSupport, RoleAdmin}): RESULT = %v, expected 'true'.`, result) - } -} - -func TestHasAllNotRoles(t *testing.T) { - u := User{Username: "testuser", Roles: []string{"user", "manager"}} - - result := u.HasNotRoles([]Role{RoleUser, RoleManager}) - - if result { - t.Fatalf(`User{Roles: ["user", "manager"]} -> HasNotRoles([]Role{RoleUser, RoleManager}): RESULT = %v, expected 'false'.`, result) - } -} diff --git a/pkg/schema/validate.go b/pkg/schema/validate.go deleted file mode 100644 index d14adf5..0000000 --- a/pkg/schema/validate.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "embed" - "encoding/json" - "fmt" - "io" - "path/filepath" - "strings" - - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/santhosh-tekuri/jsonschema/v5" -) - -type Kind int - -const ( - Meta Kind = iota + 1 - Data - Config - ClusterCfg -) - -//go:embed schemas/* -var schemaFiles embed.FS - -func Validate(k Kind, r io.Reader) error { - jsonschema.Loaders["embedfs"] = func(s string) (io.ReadCloser, error) { - f := filepath.Join("schemas", strings.Split(s, "//")[1]) - return schemaFiles.Open(f) - } - var s *jsonschema.Schema - var err error - - switch k { - case Meta: - s, err = jsonschema.Compile("embedfs://job-meta.schema.json") - case Data: - s, err = jsonschema.Compile("embedfs://job-data.schema.json") - case ClusterCfg: - s, err = jsonschema.Compile("embedfs://cluster.schema.json") - case Config: - s, err = jsonschema.Compile("embedfs://config.schema.json") - default: - return fmt.Errorf("SCHEMA/VALIDATE > unkown schema kind: %#v", k) - } - - if err != nil { - log.Errorf("Error while compiling json schema for kind '%#v'", k) - return err - } - - var v interface{} - if err = json.NewDecoder(r).Decode(&v); err != nil { - log.Warnf("Error while decoding raw json schema: %#v", err) - return err - } - - if err = s.Validate(v); err != nil { - return fmt.Errorf("SCHEMA/VALIDATE > %#v", err) - } - - return nil -} diff --git a/pkg/schema/validate_test.go b/pkg/schema/validate_test.go deleted file mode 100644 index 9d97dbf..0000000 --- a/pkg/schema/validate_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package schema - -import ( - "bytes" - "testing" -) - -func TestValidateConfig(t *testing.T) { - json := []byte(`{ - "jwts": { - "max-age": "2m" - }, - "apiAllowedIPs": [ - "*" - ], - "clusters": [ - { - "name": "testcluster", - "metricDataRepository": { - "kind": "cc-metric-store", - "url": "localhost:8082"}, - "filterRanges": { - "numNodes": { "from": 1, "to": 64 }, - "duration": { "from": 0, "to": 86400 }, - "startTime": { "from": "2022-01-01T00:00:00Z", "to": null } - }}] -}`) - - if err := Validate(Config, bytes.NewReader(json)); err != nil { - t.Errorf("Error is not nil! %v", err) - } -} - -func TestValidateCluster(t *testing.T) { - json := []byte(`{ - "name": "emmy", - "subClusters": [ - { - "name": "main", - "processorType": "Intel IvyBridge", - "socketsPerNode": 2, - "coresPerSocket": 10, - "threadsPerCore": 2, - "flopRateScalar": { - "unit": { - "prefix": "G", - "base": "F/s" - }, - "value": 14 - }, - "flopRateSimd": { - "unit": { - "prefix": "G", - "base": "F/s" - }, - "value": 112 - }, - "memoryBandwidth": { - "unit": { - "prefix": "G", - "base": "B/s" - }, - "value": 24 - }, - "numberOfNodes": 70, - "nodes": "w11[27-45,49-63,69-72]", - "topology": { - "node": [0,20,1,21,2,22,3,23,4,24,5,25,6,26,7,27,8,28,9,29,10,30,11,31,12,32,13,33,14,34,15,35,16,36,17,37,18,38,19,39], - "socket": [ - [0,20,1,21,2,22,3,23,4,24,5,25,6,26,7,27,8,28,9,29], - [10,30,11,31,12,32,13,33,14,34,15,35,16,36,17,37,18,38,19,39] - ], - "memoryDomain": [ - [0,20,1,21,2,22,3,23,4,24,5,25,6,26,7,27,8,28,9,29], - [10,30,11,31,12,32,13,33,14,34,15,35,16,36,17,37,18,38,19,39] - ], - "core": [ - [0,20],[1,21],[2,22],[3,23],[4,24],[5,25],[6,26],[7,27],[8,28],[9,29],[10,30],[11,31],[12,32],[13,33],[14,34],[15,35],[16,36],[17,37],[18,38],[19,39] - ] - } - } - ], - "metricConfig": [ - { - "name": "cpu_load", - "scope": "hwthread", - "unit": {"base": ""}, - "aggregation": "avg", - "timestep": 60, - "peak": 4, - "normal": 2, - "caution": 1, - "alert": 0.25 - } - ] -}`) - - if err := Validate(ClusterCfg, bytes.NewReader(json)); err != nil { - t.Errorf("Error is not nil! %v", err) - } -} diff --git a/tools/archive-manager/main.go b/tools/archive-manager/main.go index 7c842ff..0cf5f98 100644 --- a/tools/archive-manager/main.go +++ b/tools/archive-manager/main.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package main @@ -13,7 +13,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" ) func parseDate(in string) int64 { @@ -22,7 +22,7 @@ func parseDate(in string) int64 { if in != "" { t, err := time.ParseInLocation(shortForm, in, loc) if err != nil { - log.Abortf("Archive Manager Main: Date parse failed with input: '%s'\nError: %s\n", in, err.Error()) + cclog.Abortf("Archive Manager Main: Date parse failed with input: '%s'\nError: %s\n", in, err.Error()) } return t.Unix() } @@ -46,18 +46,18 @@ func main() { archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", srcPath) - log.Init(flagLogLevel, flagLogDateTime) + cclog.Init(flagLogLevel, flagLogDateTime) config.Init(flagConfigFile) if err := archive.Init(json.RawMessage(archiveCfg), false); err != nil { - log.Fatal(err) + cclog.Fatal(err) } ar := archive.GetHandle() if flagValidate { config.Keys.Validate = true for job := range ar.Iter(true) { - log.Printf("Validate %s - %d\n", job.Meta.Cluster, job.Meta.JobID) + cclog.Printf("Validate %s - %d\n", job.Meta.Cluster, job.Meta.JobID) } os.Exit(0) } diff --git a/tools/gen-keypair/main.go b/tools/gen-keypair/main.go index ff9c5c3..6ed3e11 100644 --- a/tools/gen-keypair/main.go +++ b/tools/gen-keypair/main.go @@ -1,5 +1,5 @@ // Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. +// All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package main diff --git a/web/web.go b/web/web.go index f543e53..7318284 100644 --- a/web/web.go +++ b/web/web.go @@ -12,10 +12,10 @@ import ( "strings" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" + cclog "github.com/ClusterCockpit/cc-lib/ccLogger" + "github.com/ClusterCockpit/cc-lib/schema" + "github.com/ClusterCockpit/cc-lib/util" ) /// Go's embed is only allowed to embed files in a subdirectory of the embedding package ([see here](https://github.com/golang/go/issues/46056)). @@ -26,7 +26,7 @@ var frontendFiles embed.FS func ServeFiles() http.Handler { publicFiles, err := fs.Sub(frontendFiles, "frontend/public") if err != nil { - log.Abortf("Serve Files: Could not find 'frontend/public' file directory.\nError: %s\n", err.Error()) + cclog.Abortf("Serve Files: Could not find 'frontend/public' file directory.\nError: %s\n", err.Error()) } return http.FileServer(http.FS(publicFiles)) } @@ -48,25 +48,22 @@ func init() { if path == "templates/login.tmpl" { if util.CheckFileExists("./var/login.tmpl") { - log.Info("overwrite login.tmpl with local file") - templates[strings.TrimPrefix(path, "templates/")] = - template.Must(template.Must(base.Clone()).ParseFiles("./var/login.tmpl")) + cclog.Info("overwrite login.tmpl with local file") + templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFiles("./var/login.tmpl")) return nil } } if path == "templates/imprint.tmpl" { if util.CheckFileExists("./var/imprint.tmpl") { - log.Info("overwrite imprint.tmpl with local file") - templates[strings.TrimPrefix(path, "templates/")] = - template.Must(template.Must(base.Clone()).ParseFiles("./var/imprint.tmpl")) + cclog.Info("overwrite imprint.tmpl with local file") + templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFiles("./var/imprint.tmpl")) return nil } } if path == "templates/privacy.tmpl" { if util.CheckFileExists("./var/privacy.tmpl") { - log.Info("overwrite privacy.tmpl with local file") - templates[strings.TrimPrefix(path, "templates/")] = - template.Must(template.Must(base.Clone()).ParseFiles("./var/privacy.tmpl")) + cclog.Info("overwrite privacy.tmpl with local file") + templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFiles("./var/privacy.tmpl")) return nil } } @@ -74,7 +71,7 @@ func init() { templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFS(templateFiles, path)) return nil }); err != nil { - log.Abortf("Web init(): Could not find frontend template files.\nError: %s\n", err.Error()) + cclog.Abortf("Web init(): Could not find frontend template files.\nError: %s\n", err.Error()) } _ = base @@ -105,7 +102,7 @@ type Page struct { func RenderTemplate(rw http.ResponseWriter, file string, page *Page) { t, ok := templates[file] if !ok { - log.Errorf("WEB/WEB > template '%s' not found", file) + cclog.Errorf("WEB/WEB > template '%s' not found", file) } if page.Clusters == nil { @@ -123,8 +120,8 @@ func RenderTemplate(rw http.ResponseWriter, file string, page *Page) { } } - log.Debugf("Page config : %v\n", page.Config) + cclog.Debugf("Page config : %v\n", page.Config) if err := t.Execute(rw, page); err != nil { - log.Errorf("Template error: %s", err.Error()) + cclog.Errorf("Template error: %s", err.Error()) } }