Port to cc-lib. Extend legal header.

This commit is contained in:
2025-06-30 12:06:35 +02:00
parent 544fb35121
commit 639e1b9c6d
120 changed files with 1140 additions and 6410 deletions

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api_test
@@ -27,8 +27,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/gorilla/mux"
_ "github.com/mattn/go-sqlite3"
@@ -116,7 +116,7 @@ func setup(t *testing.T) *api.RestApi {
]
}`
log.Init("info", true)
cclog.Init("info", true)
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
if err := os.Mkdir(jobarchive, 0777); err != nil {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api
@@ -12,7 +12,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
)
// GetClustersApiResponse model

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api
@@ -23,8 +23,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/gorilla/mux"
)
@@ -198,7 +198,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
results = append(results, job)
}
log.Debugf("/api/jobs: %d jobs returned", len(results))
cclog.Debugf("/api/jobs: %d jobs returned", len(results))
rw.Header().Add("Content-Type", "application/json")
bw := bufio.NewWriter(rw)
defer bw.Flush()
@@ -286,12 +286,12 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request)
if r.URL.Query().Get("all-metrics") == "true" {
data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution)
if err != nil {
log.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
return
}
}
log.Debugf("/api/job/%s: get job %d", id, job.JobID)
cclog.Debugf("/api/job/%s: get job %d", id, job.JobID)
rw.Header().Add("Content-Type", "application/json")
bw := bufio.NewWriter(rw)
defer bw.Flush()
@@ -382,7 +382,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution)
if err != nil {
log.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
return
}
@@ -397,7 +397,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
}
}
log.Debugf("/api/job/%s: get job %d", id, job.JobID)
cclog.Debugf("/api/job/%s: get job %d", id, job.JobID)
rw.Header().Add("Content-Type", "application/json")
bw := bufio.NewWriter(rw)
defer bw.Flush()
@@ -565,7 +565,7 @@ func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) {
for _, rtag := range req {
// Only Global and Admin Tags
if rtag.Scope != "global" && rtag.Scope != "admin" {
log.Warnf("Cannot delete private tag for job %d: Skip", job.JobID)
cclog.Warnf("Cannot delete private tag for job %d: Skip", job.JobID)
continue
}
@@ -611,7 +611,7 @@ func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) {
for _, rtag := range req {
// Only Global and Admin Tags
if rtag.Scope != "global" && rtag.Scope != "admin" {
log.Warn("Cannot delete private tag: Skip")
cclog.Warn("Cannot delete private tag: Skip")
continue
}
@@ -654,7 +654,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
return
}
log.Printf("REST: %s\n", req.GoString())
cclog.Printf("REST: %s\n", req.GoString())
req.State = schema.JobStateRunning
if err := importer.SanityChecks(&req); err != nil {
@@ -697,7 +697,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
}
}
log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
cclog.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusCreated)
json.NewEncoder(rw).Encode(DefaultApiResponse{
@@ -737,7 +737,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
return
}
// log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req)
// cclog.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req)
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
if err != nil {
job, err = api.JobRepository.FindCached(req.JobId, req.Cluster, req.StartTime)
@@ -920,7 +920,7 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
}
api.JobRepository.Mutex.Unlock()
log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%d, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State)
cclog.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%d, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State)
// Send a response (with status OK). This means that errors that happen from here on forward
// can *NOT* be communicated to the client. If reading from a MetricDataRepository or

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api
@@ -10,7 +10,7 @@ import (
"strings"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
)
type Node struct {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api
@@ -16,9 +16,9 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/auth"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/ClusterCockpit/cc-lib/util"
"github.com/gorilla/mux"
)
@@ -130,7 +130,7 @@ type DefaultApiResponse struct {
}
func handleError(err error, statusCode int, rw http.ResponseWriter) {
log.Warnf("REST ERROR : %s", err.Error())
cclog.Warnf("REST ERROR : %s", err.Error())
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(statusCode)
json.NewEncoder(rw).Encode(ErrorResponse{
@@ -161,7 +161,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
if !noticeExists {
ntxt, err := os.Create("./var/notice.txt")
if err != nil {
log.Errorf("Creating ./var/notice.txt failed: %s", err.Error())
cclog.Errorf("Creating ./var/notice.txt failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
}
@@ -170,7 +170,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
if newContent != "" {
if err := os.WriteFile("./var/notice.txt", []byte(newContent), 0o666); err != nil {
log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
cclog.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
} else {
@@ -178,7 +178,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
}
} else {
if err := os.WriteFile("./var/notice.txt", []byte(""), 0o666); err != nil {
log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
cclog.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
} else {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api
@@ -10,7 +10,7 @@ import (
"net/http"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/gorilla/mux"
)

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archiver
@@ -10,8 +10,8 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -40,7 +40,7 @@ func archivingWorker() {
// not using meta data, called to load JobMeta into Cache?
// will fail if job meta not in repository
if _, err := jobRepo.FetchMetadata(job); err != nil {
log.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error())
cclog.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error())
jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed)
continue
}
@@ -49,7 +49,7 @@ func archivingWorker() {
// TODO: Maybe use context with cancel/timeout here
jobMeta, err := ArchiveJob(job, context.Background())
if err != nil {
log.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error())
cclog.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error())
jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed)
continue
}
@@ -57,21 +57,21 @@ func archivingWorker() {
stmt := sq.Update("job").Where("job.id = ?", job.ID)
if stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta); err != nil {
log.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error())
cclog.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error())
continue
}
if stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta); err != nil {
log.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error())
cclog.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error())
continue
}
// Update the jobs database entry one last time:
stmt = jobRepo.MarkArchived(stmt, schema.MonitoringStatusArchivingSuccessful)
if err := jobRepo.Execute(stmt); err != nil {
log.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error())
cclog.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error())
continue
}
log.Debugf("archiving job %d took %s", job.JobID, time.Since(start))
log.Printf("archiving job (dbid: %d) successful", job.ID)
cclog.Debugf("archiving job %d took %s", job.JobID, time.Since(start))
cclog.Printf("archiving job (dbid: %d) successful", job.ID)
repository.CallJobStopHooks(job)
archivePending.Done()
@@ -84,7 +84,7 @@ func archivingWorker() {
// Trigger async archiving
func TriggerArchiving(job *schema.Job) {
if archiveChannel == nil {
log.Fatal("Cannot archive without archiving channel. Did you Start the archiver?")
cclog.Fatal("Cannot archive without archiving channel. Did you Start the archiver?")
}
archivePending.Add(1)

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archiver
@@ -11,8 +11,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
// Writes a running job to the job-archive
@@ -36,7 +36,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.Job, error) {
jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s)
if err != nil {
log.Error("Error wile loading job data for archiving")
cclog.Error("Error wile loading job data for archiving")
return nil, err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -22,9 +22,9 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/ClusterCockpit/cc-lib/util"
"github.com/gorilla/sessions"
)
@@ -66,7 +66,7 @@ func (auth *Authentication) AuthViaSession(
) (*schema.User, error) {
session, err := auth.sessionStore.Get(r, "session")
if err != nil {
log.Error("Error while getting session store")
cclog.Error("Error while getting session store")
return nil, err
}
@@ -93,16 +93,16 @@ func Init() {
sessKey := os.Getenv("SESSION_KEY")
if sessKey == "" {
log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
cclog.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
bytes := make([]byte, 32)
if _, err := rand.Read(bytes); err != nil {
log.Fatal("Error while initializing authentication -> failed to generate random bytes for session key")
cclog.Fatal("Error while initializing authentication -> failed to generate random bytes for session key")
}
authInstance.sessionStore = sessions.NewCookieStore(bytes)
} else {
bytes, err := base64.StdEncoding.DecodeString(sessKey)
if err != nil {
log.Fatal("Error while initializing authentication -> decoding session key failed")
cclog.Fatal("Error while initializing authentication -> decoding session key failed")
}
authInstance.sessionStore = sessions.NewCookieStore(bytes)
}
@@ -114,41 +114,41 @@ func Init() {
if config.Keys.LdapConfig != nil {
ldapAuth := &LdapAuthenticator{}
if err := ldapAuth.Init(); err != nil {
log.Warn("Error while initializing authentication -> ldapAuth init failed")
cclog.Warn("Error while initializing authentication -> ldapAuth init failed")
} else {
authInstance.LdapAuth = ldapAuth
authInstance.authenticators = append(authInstance.authenticators, authInstance.LdapAuth)
}
} else {
log.Info("Missing LDAP configuration: No LDAP support!")
cclog.Info("Missing LDAP configuration: No LDAP support!")
}
if config.Keys.JwtConfig != nil {
authInstance.JwtAuth = &JWTAuthenticator{}
if err := authInstance.JwtAuth.Init(); err != nil {
log.Fatal("Error while initializing authentication -> jwtAuth init failed")
cclog.Fatal("Error while initializing authentication -> jwtAuth init failed")
}
jwtSessionAuth := &JWTSessionAuthenticator{}
if err := jwtSessionAuth.Init(); err != nil {
log.Info("jwtSessionAuth init failed: No JWT login support!")
cclog.Info("jwtSessionAuth init failed: No JWT login support!")
} else {
authInstance.authenticators = append(authInstance.authenticators, jwtSessionAuth)
}
jwtCookieSessionAuth := &JWTCookieSessionAuthenticator{}
if err := jwtCookieSessionAuth.Init(); err != nil {
log.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!")
cclog.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!")
} else {
authInstance.authenticators = append(authInstance.authenticators, jwtCookieSessionAuth)
}
} else {
log.Info("Missing JWT configuration: No JWT token support!")
cclog.Info("Missing JWT configuration: No JWT token support!")
}
authInstance.LocalAuth = &LocalAuthenticator{}
if err := authInstance.LocalAuth.Init(); err != nil {
log.Fatal("Error while initializing authentication -> localAuth init failed")
cclog.Fatal("Error while initializing authentication -> localAuth init failed")
}
authInstance.authenticators = append(authInstance.authenticators, authInstance.LocalAuth)
})
@@ -156,7 +156,7 @@ func Init() {
func GetAuthInstance() *Authentication {
if authInstance == nil {
log.Fatal("Authentication module not initialized!")
cclog.Fatal("Authentication module not initialized!")
}
return authInstance
@@ -167,14 +167,14 @@ func handleTokenUser(tokenUser *schema.User) {
dbUser, err := r.GetUser(tokenUser.Username)
if err != nil && err != sql.ErrNoRows {
log.Errorf("Error while loading user '%s': %v", tokenUser.Username, err)
cclog.Errorf("Error while loading user '%s': %v", tokenUser.Username, err)
} else if err == sql.ErrNoRows && config.Keys.JwtConfig.SyncUserOnLogin { // Adds New User
if err := r.AddUser(tokenUser); err != nil {
log.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err)
cclog.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err)
}
} else if err == nil && config.Keys.JwtConfig.UpdateUserOnLogin { // Update Existing User
if err := r.UpdateUser(dbUser, tokenUser); err != nil {
log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
cclog.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
}
}
}
@@ -184,14 +184,14 @@ func handleOIDCUser(OIDCUser *schema.User) {
dbUser, err := r.GetUser(OIDCUser.Username)
if err != nil && err != sql.ErrNoRows {
log.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err)
cclog.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err)
} else if err == sql.ErrNoRows && config.Keys.OpenIDConfig.SyncUserOnLogin { // Adds New User
if err := r.AddUser(OIDCUser); err != nil {
log.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err)
cclog.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err)
}
} else if err == nil && config.Keys.OpenIDConfig.UpdateUserOnLogin { // Update Existing User
if err := r.UpdateUser(dbUser, OIDCUser); err != nil {
log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
cclog.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
}
}
}
@@ -199,7 +199,7 @@ func handleOIDCUser(OIDCUser *schema.User) {
func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, user *schema.User) error {
session, err := auth.sessionStore.New(r, "session")
if err != nil {
log.Errorf("session creation failed: %s", err.Error())
cclog.Errorf("session creation failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
return err
}
@@ -215,7 +215,7 @@ func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request,
session.Values["projects"] = user.Projects
session.Values["roles"] = user.Roles
if err := auth.sessionStore.Save(r, rw, session); err != nil {
log.Warnf("session save failed: %s", err.Error())
cclog.Warnf("session save failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
return err
}
@@ -236,7 +236,7 @@ func (auth *Authentication) Login(
limiter := getIPUserLimiter(ip, username)
if !limiter.Allow() {
log.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username)
cclog.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username)
onfailure(rw, r, errors.New("too many login attempts, try again in a few minutes"))
return
}
@@ -246,7 +246,7 @@ func (auth *Authentication) Login(
var err error
dbUser, err = repository.GetUserRepository().GetUser(username)
if err != nil && err != sql.ErrNoRows {
log.Errorf("Error while loading user '%v'", username)
cclog.Errorf("Error while loading user '%v'", username)
}
}
@@ -256,12 +256,12 @@ func (auth *Authentication) Login(
if user, ok = authenticator.CanLogin(dbUser, username, rw, r); !ok {
continue
} else {
log.Debugf("Can login with user %v", user)
cclog.Debugf("Can login with user %v", user)
}
user, err := authenticator.Login(user, rw, r)
if err != nil {
log.Warnf("user login failed: %s", err.Error())
cclog.Warnf("user login failed: %s", err.Error())
onfailure(rw, r, err)
return
}
@@ -270,7 +270,7 @@ func (auth *Authentication) Login(
return
}
log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
cclog.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
if r.FormValue("redirect") != "" {
@@ -282,7 +282,7 @@ func (auth *Authentication) Login(
return
}
log.Debugf("login failed: no authenticator applied")
cclog.Debugf("login failed: no authenticator applied")
onfailure(rw, r, errors.New("no authenticator applied"))
})
}
@@ -294,14 +294,14 @@ func (auth *Authentication) Auth(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
if err != nil {
log.Infof("auth -> authentication failed: %s", err.Error())
cclog.Infof("auth -> authentication failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnauthorized)
return
}
if user == nil {
user, err = auth.AuthViaSession(rw, r)
if err != nil {
log.Infof("auth -> authentication failed: %s", err.Error())
cclog.Infof("auth -> authentication failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnauthorized)
return
}
@@ -312,7 +312,7 @@ func (auth *Authentication) Auth(
return
}
log.Info("auth -> authentication failed")
cclog.Info("auth -> authentication failed")
onfailure(rw, r, errors.New("unauthorized (please login first)"))
})
}
@@ -324,14 +324,14 @@ func (auth *Authentication) AuthApi(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
if err != nil {
log.Infof("auth api -> authentication failed: %s", err.Error())
cclog.Infof("auth api -> authentication failed: %s", err.Error())
onfailure(rw, r, err)
return
}
ipErr := securedCheck(user, r)
if ipErr != nil {
log.Infof("auth api -> secured check failed: %s", ipErr.Error())
cclog.Infof("auth api -> secured check failed: %s", ipErr.Error())
onfailure(rw, r, ipErr)
return
}
@@ -351,11 +351,11 @@ func (auth *Authentication) AuthApi(
return
}
default:
log.Info("auth api -> authentication failed: missing role")
cclog.Info("auth api -> authentication failed: missing role")
onfailure(rw, r, errors.New("unauthorized"))
}
}
log.Info("auth api -> authentication failed: no auth")
cclog.Info("auth api -> authentication failed: no auth")
onfailure(rw, r, errors.New("unauthorized"))
})
}
@@ -367,7 +367,7 @@ func (auth *Authentication) AuthUserApi(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
if err != nil {
log.Infof("auth user api -> authentication failed: %s", err.Error())
cclog.Infof("auth user api -> authentication failed: %s", err.Error())
onfailure(rw, r, err)
return
}
@@ -387,11 +387,11 @@ func (auth *Authentication) AuthUserApi(
return
}
default:
log.Info("auth user api -> authentication failed: missing role")
cclog.Info("auth user api -> authentication failed: missing role")
onfailure(rw, r, errors.New("unauthorized"))
}
}
log.Info("auth user api -> authentication failed: no auth")
cclog.Info("auth user api -> authentication failed: no auth")
onfailure(rw, r, errors.New("unauthorized"))
})
}
@@ -403,7 +403,7 @@ func (auth *Authentication) AuthConfigApi(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.AuthViaSession(rw, r)
if err != nil {
log.Infof("auth config api -> authentication failed: %s", err.Error())
cclog.Infof("auth config api -> authentication failed: %s", err.Error())
onfailure(rw, r, err)
return
}
@@ -412,7 +412,7 @@ func (auth *Authentication) AuthConfigApi(
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return
}
log.Info("auth config api -> authentication failed: no auth")
cclog.Info("auth config api -> authentication failed: no auth")
onfailure(rw, r, errors.New("unauthorized"))
})
}
@@ -424,7 +424,7 @@ func (auth *Authentication) AuthFrontendApi(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.AuthViaSession(rw, r)
if err != nil {
log.Infof("auth frontend api -> authentication failed: %s", err.Error())
cclog.Infof("auth frontend api -> authentication failed: %s", err.Error())
onfailure(rw, r, err)
return
}
@@ -433,7 +433,7 @@ func (auth *Authentication) AuthFrontendApi(
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return
}
log.Info("auth frontend api -> authentication failed: no auth")
cclog.Info("auth frontend api -> authentication failed: no auth")
onfailure(rw, r, errors.New("unauthorized"))
})
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/golang-jwt/jwt/v5"
)
@@ -28,17 +28,17 @@ type JWTAuthenticator struct {
func (ja *JWTAuthenticator) Init() error {
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
if pubKey == "" || privKey == "" {
log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
cclog.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
} else {
bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil {
log.Warn("Could not decode JWT public key")
cclog.Warn("Could not decode JWT public key")
return err
}
ja.publicKey = ed25519.PublicKey(bytes)
bytes, err = base64.StdEncoding.DecodeString(privKey)
if err != nil {
log.Warn("Could not decode JWT private key")
cclog.Warn("Could not decode JWT private key")
return err
}
ja.privateKey = ed25519.PrivateKey(bytes)
@@ -70,11 +70,11 @@ func (ja *JWTAuthenticator) AuthViaJWT(
return ja.publicKey, nil
})
if err != nil {
log.Warn("Error while parsing JWT token")
cclog.Warn("Error while parsing JWT token")
return nil, err
}
if !token.Valid {
log.Warn("jwt token claims are not valid")
cclog.Warn("jwt token claims are not valid")
return nil, errors.New("jwt token claims are not valid")
}
@@ -90,7 +90,7 @@ func (ja *JWTAuthenticator) AuthViaJWT(
user, err := ur.GetUser(sub)
// Deny any logins for unknown usernames
if err != nil {
log.Warn("Could not find user from JWT in internal database.")
cclog.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
// Take user roles from database instead of trusting the JWT

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/golang-jwt/jwt/v5"
)
@@ -31,18 +31,18 @@ var _ Authenticator = (*JWTCookieSessionAuthenticator)(nil)
func (ja *JWTCookieSessionAuthenticator) Init() error {
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
if pubKey == "" || privKey == "" {
log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
cclog.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
return errors.New("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
} else {
bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil {
log.Warn("Could not decode JWT public key")
cclog.Warn("Could not decode JWT public key")
return err
}
ja.publicKey = ed25519.PublicKey(bytes)
bytes, err = base64.StdEncoding.DecodeString(privKey)
if err != nil {
log.Warn("Could not decode JWT private key")
cclog.Warn("Could not decode JWT private key")
return err
}
ja.privateKey = ed25519.PrivateKey(bytes)
@@ -53,13 +53,13 @@ func (ja *JWTCookieSessionAuthenticator) Init() error {
if keyFound && pubKeyCrossLogin != "" {
bytes, err := base64.StdEncoding.DecodeString(pubKeyCrossLogin)
if err != nil {
log.Warn("Could not decode cross login JWT public key")
cclog.Warn("Could not decode cross login JWT public key")
return err
}
ja.publicKeyCrossLogin = ed25519.PublicKey(bytes)
} else {
ja.publicKeyCrossLogin = nil
log.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
cclog.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
return errors.New("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
}
@@ -67,22 +67,22 @@ func (ja *JWTCookieSessionAuthenticator) Init() error {
// Warn if other necessary settings are not configured
if jc != nil {
if jc.CookieName == "" {
log.Info("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
cclog.Info("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
return errors.New("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
}
if !jc.ValidateUser {
log.Info("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
cclog.Info("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
}
if jc.TrustedIssuer == "" {
log.Info("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
cclog.Info("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
return errors.New("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
}
} else {
log.Warn("config for JWTs not configured (cross login via JWT cookie will fail)")
cclog.Warn("config for JWTs not configured (cross login via JWT cookie will fail)")
return errors.New("config for JWTs not configured (cross login via JWT cookie will fail)")
}
log.Info("JWT Cookie Session authenticator successfully registered")
cclog.Info("JWT Cookie Session authenticator successfully registered")
return nil
}
@@ -140,12 +140,12 @@ func (ja *JWTCookieSessionAuthenticator) Login(
return ja.publicKey, nil
})
if err != nil {
log.Warn("JWT cookie session: error while parsing token")
cclog.Warn("JWT cookie session: error while parsing token")
return nil, err
}
if !token.Valid {
log.Warn("jwt token claims are not valid")
cclog.Warn("jwt token claims are not valid")
return nil, errors.New("jwt token claims are not valid")
}
@@ -159,12 +159,12 @@ func (ja *JWTCookieSessionAuthenticator) Login(
var err error
user, err = repository.GetUserRepository().GetUser(sub)
if err != nil && err != sql.ErrNoRows {
log.Errorf("Error while loading user '%v'", sub)
cclog.Errorf("Error while loading user '%v'", sub)
}
// Deny any logins for unknown usernames
if user == nil {
log.Warn("Could not find user from JWT in internal database.")
cclog.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
} else {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/golang-jwt/jwt/v5"
)
@@ -30,13 +30,13 @@ func (ja *JWTSessionAuthenticator) Init() error {
if pubKey := os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" {
bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil {
log.Warn("Could not decode cross login JWT HS512 key")
cclog.Warn("Could not decode cross login JWT HS512 key")
return err
}
ja.loginTokenKey = bytes
}
log.Info("JWT Session authenticator successfully registered")
cclog.Info("JWT Session authenticator successfully registered")
return nil
}
@@ -67,12 +67,12 @@ func (ja *JWTSessionAuthenticator) Login(
return nil, fmt.Errorf("unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg())
})
if err != nil {
log.Warn("Error while parsing jwt token")
cclog.Warn("Error while parsing jwt token")
return nil, err
}
if !token.Valid {
log.Warn("jwt token claims are not valid")
cclog.Warn("jwt token claims are not valid")
return nil, errors.New("jwt token claims are not valid")
}
@@ -86,12 +86,12 @@ func (ja *JWTSessionAuthenticator) Login(
var err error
user, err = repository.GetUserRepository().GetUser(sub)
if err != nil && err != sql.ErrNoRows {
log.Errorf("Error while loading user '%v'", sub)
cclog.Errorf("Error while loading user '%v'", sub)
}
// Deny any logins for unknown usernames
if user == nil {
log.Warn("Could not find user from JWT in internal database.")
cclog.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
} else {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -13,8 +13,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/go-ldap/ldap/v3"
)
@@ -28,7 +28,7 @@ var _ Authenticator = (*LdapAuthenticator)(nil)
func (la *LdapAuthenticator) Init() error {
la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD")
if la.syncPassword == "" {
log.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
cclog.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
}
lc := config.Keys.LdapConfig
@@ -58,7 +58,7 @@ func (la *LdapAuthenticator) CanLogin(
if lc.SyncUserOnLogin {
l, err := la.getLdapConnection(true)
if err != nil {
log.Error("LDAP connection error")
cclog.Error("LDAP connection error")
}
defer l.Close()
@@ -71,12 +71,12 @@ func (la *LdapAuthenticator) CanLogin(
sr, err := l.Search(searchRequest)
if err != nil {
log.Warn(err)
cclog.Warn(err)
return nil, false
}
if len(sr.Entries) != 1 {
log.Warn("LDAP: User does not exist or too many entries returned")
cclog.Warn("LDAP: User does not exist or too many entries returned")
return nil, false
}
@@ -96,7 +96,7 @@ func (la *LdapAuthenticator) CanLogin(
}
if err := repository.GetUserRepository().AddUser(user); err != nil {
log.Errorf("User '%s' LDAP: Insert into DB failed", username)
cclog.Errorf("User '%s' LDAP: Insert into DB failed", username)
return nil, false
}
@@ -114,14 +114,14 @@ func (la *LdapAuthenticator) Login(
) (*schema.User, error) {
l, err := la.getLdapConnection(false)
if err != nil {
log.Warn("Error while getting ldap connection")
cclog.Warn("Error while getting ldap connection")
return nil, err
}
defer l.Close()
userDn := strings.Replace(config.Keys.LdapConfig.UserBind, "{username}", user.Username, -1)
if err := l.Bind(userDn, r.FormValue("password")); err != nil {
log.Errorf("AUTH/LDAP > Authentication for user %s failed: %v",
cclog.Errorf("AUTH/LDAP > Authentication for user %s failed: %v",
user.Username, err)
return nil, fmt.Errorf("Authentication failed")
}
@@ -148,7 +148,7 @@ func (la *LdapAuthenticator) Sync() error {
l, err := la.getLdapConnection(true)
if err != nil {
log.Error("LDAP connection error")
cclog.Error("LDAP connection error")
return err
}
defer l.Close()
@@ -159,7 +159,7 @@ func (la *LdapAuthenticator) Sync() error {
lc.UserFilter,
[]string{"dn", "uid", la.UserAttr}, nil))
if err != nil {
log.Warn("LDAP search error")
cclog.Warn("LDAP search error")
return err
}
@@ -182,7 +182,7 @@ func (la *LdapAuthenticator) Sync() error {
for username, where := range users {
if where == IN_DB && lc.SyncDelOldUsers {
ur.DelUser(username)
log.Debugf("sync: remove %v (does not show up in LDAP anymore)", username)
cclog.Debugf("sync: remove %v (does not show up in LDAP anymore)", username)
} else if where == IN_LDAP {
name := newnames[username]
@@ -198,9 +198,9 @@ func (la *LdapAuthenticator) Sync() error {
AuthSource: schema.AuthViaLDAP,
}
log.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name)
cclog.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name)
if err := ur.AddUser(user); err != nil {
log.Errorf("User '%s' LDAP: Insert into DB failed", username)
cclog.Errorf("User '%s' LDAP: Insert into DB failed", username)
return err
}
}
@@ -213,14 +213,14 @@ func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) {
lc := config.Keys.LdapConfig
conn, err := ldap.DialURL(lc.Url)
if err != nil {
log.Warn("LDAP URL dial failed")
cclog.Warn("LDAP URL dial failed")
return nil, err
}
if admin {
if err := conn.Bind(lc.SearchDN, la.syncPassword); err != nil {
conn.Close()
log.Warn("LDAP connection bind failed")
cclog.Warn("LDAP connection bind failed")
return nil, err
}
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -8,8 +8,8 @@ import (
"fmt"
"net/http"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"golang.org/x/crypto/bcrypt"
)
@@ -27,19 +27,19 @@ func (la *LocalAuthenticator) CanLogin(
user *schema.User,
username string,
rw http.ResponseWriter,
r *http.Request) (*schema.User, bool) {
r *http.Request,
) (*schema.User, bool) {
return user, user != nil && user.AuthSource == schema.AuthViaLocalPassword
}
func (la *LocalAuthenticator) Login(
user *schema.User,
rw http.ResponseWriter,
r *http.Request) (*schema.User, error) {
r *http.Request,
) (*schema.User, error) {
if e := bcrypt.CompareHashAndPassword([]byte(user.Password),
[]byte(r.FormValue("password"))); e != nil {
log.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username)
cclog.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username)
return nil, fmt.Errorf("Authentication failed")
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/coreos/go-oidc/v3/oidc"
"github.com/gorilla/mux"
"golang.org/x/oauth2"
@@ -51,15 +51,15 @@ func setCallbackCookie(w http.ResponseWriter, r *http.Request, name, value strin
func NewOIDC(a *Authentication) *OIDC {
provider, err := oidc.NewProvider(context.Background(), config.Keys.OpenIDConfig.Provider)
if err != nil {
log.Fatal(err)
cclog.Fatal(err)
}
clientID := os.Getenv("OID_CLIENT_ID")
if clientID == "" {
log.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)")
cclog.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)")
}
clientSecret := os.Getenv("OID_CLIENT_SECRET")
if clientSecret == "" {
log.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)")
cclog.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)")
}
client := &oauth2.Config{
@@ -173,7 +173,7 @@ func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) {
}
oa.authentication.SaveSession(rw, r, user)
log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
cclog.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
http.RedirectHandler("/", http.StatusTemporaryRedirect).ServeHTTP(rw, r.WithContext(ctx))
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package config
@@ -9,8 +9,8 @@ import (
"encoding/json"
"os"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
var Keys schema.ProgramConfig = schema.ProgramConfig{
@@ -53,20 +53,20 @@ func Init(flagConfigFile string) {
raw, err := os.ReadFile(flagConfigFile)
if err != nil {
if !os.IsNotExist(err) {
log.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
cclog.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
}
} else {
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
log.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
cclog.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
if err := dec.Decode(&Keys); err != nil {
log.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
}
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
log.Abort("Config Init: At least one cluster required in config. Exited with error.")
cclog.Abort("Config Init: At least one cluster required in config. Exited with error.")
}
}
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package config

View File

@@ -1,3 +1,7 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package config
import (

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package model

View File

@@ -8,7 +8,7 @@ import (
"strconv"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
)
type Count struct {
@@ -170,7 +170,6 @@ type NamedStatsWithScope struct {
type NodeFilter struct {
Hostname *StringInput `json:"hostname,omitempty"`
Cluster *StringInput `json:"cluster,omitempty"`
SubCluster *StringInput `json:"subCluster,omitempty"`
NodeState *string `json:"nodeState,omitempty"`
HealthState *schema.NodeState `json:"healthState,omitempty"`
}

View File

@@ -4,7 +4,7 @@ import (
"sync"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/jmoiron/sqlx"
)
@@ -32,7 +32,7 @@ func Init() {
func GetResolverInstance() *Resolver {
if resolverInstance == nil {
log.Fatal("Authentication module not initialized!")
cclog.Fatal("Authentication module not initialized!")
}
return resolverInstance

View File

@@ -20,8 +20,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
// Partitions is the resolver for the partitions field.
@@ -54,7 +54,7 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod
func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) {
rawFootprint, err := r.Repo.FetchFootprint(obj)
if err != nil {
log.Warn("Error while fetching job footprint data")
cclog.Warn("Error while fetching job footprint data")
return nil, err
}
@@ -79,7 +79,7 @@ func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.
func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]*model.EnergyFootprintValue, error) {
rawEnergyFootprint, err := r.Repo.FetchEnergyFootprint(obj)
if err != nil {
log.Warn("Error while fetching job energy footprint data")
cclog.Warn("Error while fetching job energy footprint data")
return nil, err
}
@@ -143,12 +143,12 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s
// Create in DB
id, err := r.Repo.CreateTag(typeArg, name, scope)
if err != nil {
log.Warn("Error while creating tag")
cclog.Warn("Error while creating tag")
return nil, err
}
return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil
} else {
log.Warnf("Not authorized to create tag with scope: %s", scope)
cclog.Warnf("Not authorized to create tag with scope: %s", scope)
return nil, fmt.Errorf("not authorized to create tag with scope: %s", scope)
}
}
@@ -168,7 +168,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
jid, err := strconv.ParseInt(job, 10, 64)
if err != nil {
log.Warn("Error while adding tag to job")
cclog.Warn("Error while adding tag to job")
return nil, err
}
@@ -177,14 +177,14 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
log.Warn("Error while parsing tag id")
cclog.Warn("Error while parsing tag id")
return nil, err
}
// Test Exists
_, _, tscope, exists := r.Repo.TagInfo(tid)
if !exists {
log.Warnf("Tag does not exist (ID): %d", tid)
cclog.Warnf("Tag does not exist (ID): %d", tid)
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
}
@@ -194,11 +194,11 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
user.Username == tscope {
// Add to Job
if tags, err = r.Repo.AddTag(user, jid, tid); err != nil {
log.Warn("Error while adding tag")
cclog.Warn("Error while adding tag")
return nil, err
}
} else {
log.Warnf("Not authorized to add tag: %d", tid)
cclog.Warnf("Not authorized to add tag: %d", tid)
return nil, fmt.Errorf("not authorized to add tag: %d", tid)
}
}
@@ -215,7 +215,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
jid, err := strconv.ParseInt(job, 10, 64)
if err != nil {
log.Warn("Error while parsing job id")
cclog.Warn("Error while parsing job id")
return nil, err
}
@@ -224,14 +224,14 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
log.Warn("Error while parsing tag id")
cclog.Warn("Error while parsing tag id")
return nil, err
}
// Test Exists
_, _, tscope, exists := r.Repo.TagInfo(tid)
if !exists {
log.Warnf("Tag does not exist (ID): %d", tid)
cclog.Warnf("Tag does not exist (ID): %d", tid)
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
}
@@ -241,11 +241,11 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
user.Username == tscope {
// Remove from Job
if tags, err = r.Repo.RemoveTag(user, jid, tid); err != nil {
log.Warn("Error while removing tag")
cclog.Warn("Error while removing tag")
return nil, err
}
} else {
log.Warnf("Not authorized to remove tag: %d", tid)
cclog.Warnf("Not authorized to remove tag: %d", tid)
return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
}
@@ -267,14 +267,14 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
log.Warn("Error while parsing tag id for removal")
cclog.Warn("Error while parsing tag id for removal")
return nil, err
}
// Test Exists
_, _, tscope, exists := r.Repo.TagInfo(tid)
if !exists {
log.Warnf("Tag does not exist (ID): %d", tid)
cclog.Warnf("Tag does not exist (ID): %d", tid)
return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
}
@@ -282,13 +282,13 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin
if user.HasRole(schema.RoleAdmin) && (tscope == "global" || tscope == "admin") || user.Username == tscope {
// Remove from DB
if err = r.Repo.RemoveTagById(tid); err != nil {
log.Warn("Error while removing tag")
cclog.Warn("Error while removing tag")
return nil, err
} else {
tags = append(tags, int(tid))
}
} else {
log.Warnf("Not authorized to remove tag: %d", tid)
cclog.Warnf("Not authorized to remove tag: %d", tid)
return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
}
}
@@ -298,7 +298,7 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin
// UpdateConfiguration is the resolver for the updateConfiguration field.
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil {
log.Warn("Error while updating user config")
cclog.Warn("Error while updating user config")
return nil, err
}
@@ -344,7 +344,7 @@ func (r *queryResolver) User(ctx context.Context, username string) (*model.User,
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
data, err := r.Repo.AllocatedNodes(cluster)
if err != nil {
log.Warn("Error while fetching allocated nodes")
cclog.Warn("Error while fetching allocated nodes")
return nil, err
}
@@ -364,7 +364,7 @@ func (r *queryResolver) Node(ctx context.Context, id string) (*schema.Node, erro
repo := repository.GetNodeRepository()
numericId, err := strconv.ParseInt(id, 10, 64)
if err != nil {
log.Warn("Error while parsing job id")
cclog.Warn("Error while parsing job id")
return nil, err
}
return repo.GetNode(numericId, false)
@@ -387,13 +387,13 @@ func (r *queryResolver) NodeStats(ctx context.Context, filter []*model.NodeFilte
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
numericId, err := strconv.ParseInt(id, 10, 64)
if err != nil {
log.Warn("Error while parsing job id")
cclog.Warn("Error while parsing job id")
return nil, err
}
job, err := r.Repo.FindById(ctx, numericId)
if err != nil {
log.Warn("Error while finding job by id")
cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -420,13 +420,13 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
job, err := r.Query().Job(ctx, id)
if err != nil {
log.Warn("Error while querying job for metrics")
cclog.Warn("Error while querying job for metrics")
return nil, err
}
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
if err != nil {
log.Warn("Error while loading job data")
cclog.Warn("Error while loading job data")
return nil, err
}
@@ -448,13 +448,13 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) {
job, err := r.Query().Job(ctx, id)
if err != nil {
log.Warnf("Error while querying job %s for metadata", id)
cclog.Warnf("Error while querying job %s for metadata", id)
return nil, err
}
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
if err != nil {
log.Warnf("Error while loading jobStats data for job id %s", id)
cclog.Warnf("Error while loading jobStats data for job id %s", id)
return nil, err
}
@@ -473,13 +473,13 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin
func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) {
job, err := r.Query().Job(ctx, id)
if err != nil {
log.Warnf("Error while querying job %s for metadata", id)
cclog.Warnf("Error while querying job %s for metadata", id)
return nil, err
}
data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
if err != nil {
log.Warnf("Error while loading scopedJobStats data for job id %s", id)
cclog.Warnf("Error while loading scopedJobStats data for job id %s", id)
return nil, err
}
@@ -518,13 +518,13 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
if err != nil {
log.Warn("Error while querying jobs")
cclog.Warn("Error while querying jobs")
return nil, err
}
count, err := r.Repo.CountJobs(ctx, filter)
if err != nil {
log.Warn("Error while counting jobs")
cclog.Warn("Error while counting jobs")
return nil, err
}
@@ -540,7 +540,7 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
}
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
if err != nil {
log.Warn("Error while querying next jobs")
cclog.Warn("Error while querying next jobs")
return nil, err
}
@@ -636,7 +636,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job
jobs, err := r.Repo.QueryJobs(ctx, filter, nil, order)
if err != nil {
log.Warn("Error while querying jobs for comparison")
cclog.Warn("Error while querying jobs for comparison")
return nil, err
}
@@ -644,7 +644,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job
for _, job := range jobs {
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
if err != nil {
log.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
continue
// return nil, err
}
@@ -701,7 +701,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
log.Warn("error while loading node data")
cclog.Warn("error while loading node data")
return nil, err
}
@@ -713,7 +713,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
}
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
if err != nil {
log.Warnf("error in nodeMetrics resolver: %s", err)
cclog.Warnf("error in nodeMetrics resolver: %s", err)
}
for metric, scopedMetrics := range metrics {
@@ -757,7 +757,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub
data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx)
if err != nil {
log.Warn("error while loading node data")
cclog.Warn("error while loading node data")
return nil, err
}
@@ -769,7 +769,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub
}
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
if err != nil {
log.Warnf("error in nodeMetrics resolver: %s", err)
cclog.Warnf("error in nodeMetrics resolver: %s", err)
}
for metric, scopedMetrics := range metrics {
@@ -824,12 +824,10 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
// SubCluster returns generated.SubClusterResolver implementation.
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
type (
clusterResolver struct{ *Resolver }
jobResolver struct{ *Resolver }
metricValueResolver struct{ *Resolver }
mutationResolver struct{ *Resolver }
nodeResolver struct{ *Resolver }
queryResolver struct{ *Resolver }
subClusterResolver struct{ *Resolver }
)
type clusterResolver struct{ *Resolver }
type jobResolver struct{ *Resolver }
type metricValueResolver struct{ *Resolver }
type mutationResolver struct{ *Resolver }
type nodeResolver struct{ *Resolver }
type queryResolver struct{ *Resolver }
type subClusterResolver struct{ *Resolver }

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package graph
@@ -12,9 +12,8 @@ import (
"github.com/99designs/gqlgen/graphql"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
// "github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
const MAX_JOBS_FOR_ANALYSIS = 500
@@ -28,7 +27,7 @@ func (r *queryResolver) rooflineHeatmap(
) ([][]float64, error) {
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
if err != nil {
log.Error("Error while querying jobs for roofline")
cclog.Error("Error while querying jobs for roofline")
return nil, err
}
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
@@ -56,13 +55,13 @@ func (r *queryResolver) rooflineHeatmap(
jobdata, err := metricDataDispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0)
if err != nil {
log.Errorf("Error while loading roofline metrics for job %d", job.ID)
cclog.Errorf("Error while loading roofline metrics for job %d", job.ID)
return nil, err
}
flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"]
if flops_ == nil && membw_ == nil {
log.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID)
cclog.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID)
continue
// return nil, fmt.Errorf("GRAPH/UTIL > 'flops_any' or 'mem_bw' missing for job %d", job.ID)
}
@@ -70,7 +69,7 @@ func (r *queryResolver) rooflineHeatmap(
flops, ok1 := flops_["node"]
membw, ok2 := membw_["node"]
if !ok1 || !ok2 {
log.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
cclog.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
continue
// TODO/FIXME:
// return nil, errors.New("GRAPH/UTIL > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
@@ -105,7 +104,7 @@ func (r *queryResolver) rooflineHeatmap(
func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
if err != nil {
log.Error("Error while querying jobs for footprint")
cclog.Error("Error while querying jobs for footprint")
return nil, err
}
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
@@ -128,7 +127,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
}
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
log.Error("Error while loading averages for footprint")
cclog.Error("Error while loading averages for footprint")
return nil, err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
// Import all jobs specified as `<path-to-meta.json>:<path-to-data.json>,...`
@@ -31,7 +31,7 @@ func HandleImportFlag(flag string) error {
raw, err := os.ReadFile(files[0])
if err != nil {
log.Warn("Error while reading metadata file for import")
cclog.Warn("Error while reading metadata file for import")
return err
}
@@ -47,13 +47,13 @@ func HandleImportFlag(flag string) error {
MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
}
if err = dec.Decode(&job); err != nil {
log.Warn("Error while decoding raw json metadata for import")
cclog.Warn("Error while decoding raw json metadata for import")
return err
}
raw, err = os.ReadFile(files[1])
if err != nil {
log.Warn("Error while reading jobdata file for import")
cclog.Warn("Error while reading jobdata file for import")
return err
}
@@ -66,7 +66,7 @@ func HandleImportFlag(flag string) error {
dec.DisallowUnknownFields()
jobData := schema.JobData{}
if err = dec.Decode(&jobData); err != nil {
log.Warn("Error while decoding raw json jobdata for import")
cclog.Warn("Error while decoding raw json jobdata for import")
return err
}
@@ -74,7 +74,7 @@ func HandleImportFlag(flag string) error {
sc, err := archive.GetSubCluster(job.Cluster, job.SubCluster)
if err != nil {
log.Errorf("cannot get subcluster: %s", err.Error())
cclog.Errorf("cannot get subcluster: %s", err.Error())
return err
}
@@ -94,7 +94,7 @@ func HandleImportFlag(flag string) error {
job.RawFootprint, err = json.Marshal(job.Footprint)
if err != nil {
log.Warn("Error while marshaling job footprint")
cclog.Warn("Error while marshaling job footprint")
return err
}
@@ -108,7 +108,7 @@ func HandleImportFlag(flag string) error {
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp)
cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp)
// FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Energy: Power (in Watts) * Time (in Seconds)
@@ -120,7 +120,7 @@ func HandleImportFlag(flag string) error {
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
}
} else {
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID)
cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID)
}
job.EnergyFootprint[fp] = metricEnergy
@@ -129,45 +129,45 @@ func HandleImportFlag(flag string) error {
job.Energy = (math.Round(totalEnergy*100.0) / 100.0)
if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil {
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID)
cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID)
return err
}
job.RawResources, err = json.Marshal(job.Resources)
if err != nil {
log.Warn("Error while marshaling job resources")
cclog.Warn("Error while marshaling job resources")
return err
}
job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil {
log.Warn("Error while marshaling job metadata")
cclog.Warn("Error while marshaling job metadata")
return err
}
if err = SanityChecks(&job); err != nil {
log.Warn("BaseJob SanityChecks failed")
cclog.Warn("BaseJob SanityChecks failed")
return err
}
if err = archive.GetHandle().ImportJob(&job, &jobData); err != nil {
log.Error("Error while importing job")
cclog.Error("Error while importing job")
return err
}
id, err := r.InsertJob(&job)
if err != nil {
log.Warn("Error while job db insert")
cclog.Warn("Error while job db insert")
return err
}
for _, tag := range job.Tags {
if err := r.ImportTag(id, tag.Type, tag.Name, tag.Scope); err != nil {
log.Error("Error while adding or creating tag on import")
cclog.Error("Error while adding or creating tag on import")
return err
}
}
log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
cclog.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
}
return nil
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer_test
@@ -16,7 +16,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/importer"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
func copyFile(s string, d string) error {
@@ -78,7 +78,7 @@ func setup(t *testing.T) *repository.JobRepository {
}
]}`
log.Init("info", true)
cclog.Init("info", true)
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
@@ -13,8 +13,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
const (
@@ -27,15 +27,15 @@ const (
func InitDB() error {
r := repository.GetJobRepository()
if err := r.Flush(); err != nil {
log.Errorf("repository initDB(): %v", err)
cclog.Errorf("repository initDB(): %v", err)
return err
}
starttime := time.Now()
log.Print("Building job table...")
cclog.Print("Building job table...")
t, err := r.TransactionInit()
if err != nil {
log.Warn("Error while initializing SQL transactions")
cclog.Warn("Error while initializing SQL transactions")
return err
}
tags := make(map[string]int64)
@@ -63,7 +63,7 @@ func InitDB() error {
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
if err != nil {
log.Errorf("cannot get subcluster: %s", err.Error())
cclog.Errorf("cannot get subcluster: %s", err.Error())
return err
}
@@ -83,7 +83,7 @@ func InitDB() error {
jobMeta.RawFootprint, err = json.Marshal(jobMeta.Footprint)
if err != nil {
log.Warn("Error while marshaling job footprint")
cclog.Warn("Error while marshaling job footprint")
return err
}
@@ -97,7 +97,7 @@ func InitDB() error {
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
// FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Energy: Power (in Watts) * Time (in Seconds)
@@ -109,7 +109,7 @@ func InitDB() error {
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
}
} else {
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
}
jobMeta.EnergyFootprint[fp] = metricEnergy
@@ -118,26 +118,26 @@ func InitDB() error {
jobMeta.Energy = (math.Round(totalEnergy*100.0) / 100.0)
if jobMeta.RawEnergyFootprint, err = json.Marshal(jobMeta.EnergyFootprint); err != nil {
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
return err
}
jobMeta.RawResources, err = json.Marshal(jobMeta.Resources)
if err != nil {
log.Errorf("repository initDB(): %v", err)
cclog.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
jobMeta.RawMetaData, err = json.Marshal(jobMeta.MetaData)
if err != nil {
log.Errorf("repository initDB(): %v", err)
cclog.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
if err := SanityChecks(jobMeta); err != nil {
log.Errorf("repository initDB(): %v", err)
cclog.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
@@ -145,7 +145,7 @@ func InitDB() error {
id, err := r.TransactionAddNamed(t,
repository.NamedJobInsert, jobMeta)
if err != nil {
log.Errorf("repository initDB(): %v", err)
cclog.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
@@ -158,7 +158,7 @@ func InitDB() error {
addTagQuery,
tag.Name, tag.Type)
if err != nil {
log.Errorf("Error adding tag: %v", err)
cclog.Errorf("Error adding tag: %v", err)
errorOccured++
continue
}
@@ -176,11 +176,11 @@ func InitDB() error {
}
if errorOccured > 0 {
log.Warnf("Error in import of %d jobs!", errorOccured)
cclog.Warnf("Error in import of %d jobs!", errorOccured)
}
r.TransactionEnd(t)
log.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
cclog.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
return nil
}
@@ -190,7 +190,7 @@ func SanityChecks(job *schema.Job) error {
return fmt.Errorf("no such cluster: %v", job.Cluster)
}
if err := archive.AssignSubCluster(job); err != nil {
log.Warn("Error while assigning subcluster to job")
cclog.Warn("Error while assigning subcluster to job")
return err
}
if !job.State.Valid() {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
@@ -7,7 +7,7 @@ package importer
import (
"math"
ccunits "github.com/ClusterCockpit/cc-units"
ccunits "github.com/ClusterCockpit/cc-lib/ccUnits"
)
func getNormalizationFactor(v float64) (float64, int) {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
@@ -8,7 +8,7 @@ import (
"fmt"
"testing"
ccunits "github.com/ClusterCockpit/cc-units"
ccunits "github.com/ClusterCockpit/cc-lib/ccUnits"
)
func TestNormalizeFactor(t *testing.T) {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricDataDispatcher
@@ -14,10 +14,10 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
"github.com/ClusterCockpit/cc-backend/pkg/resampler"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/lrucache"
"github.com/ClusterCockpit/cc-lib/resampler"
"github.com/ClusterCockpit/cc-lib/schema"
)
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
@@ -68,10 +68,10 @@ func LoadData(job *schema.Job,
jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution)
if err != nil {
if len(jd) != 0 {
log.Warnf("partial error: %s", err.Error())
cclog.Warnf("partial error: %s", err.Error())
// return err, 0, 0 // Reactivating will block archiving on one partial error
} else {
log.Error("Error while loading job data from metric repository")
cclog.Error("Error while loading job data from metric repository")
return err, 0, 0
}
}
@@ -80,15 +80,15 @@ func LoadData(job *schema.Job,
var jd_temp schema.JobData
jd_temp, err = archive.GetHandle().LoadJobData(job)
if err != nil {
log.Error("Error while loading job data from archive")
cclog.Error("Error while loading job data from archive")
return err, 0, 0
}
//Deep copy the cached archive hashmap
// Deep copy the cached archive hashmap
jd = metricdata.DeepCopy(jd_temp)
//Resampling for archived data.
//Pass the resolution from frontend here.
// Resampling for archived data.
// Pass the resolution from frontend here.
for _, v := range jd {
for _, v_ := range v {
timestep := 0
@@ -178,7 +178,7 @@ func LoadData(job *schema.Job,
})
if err, ok := data.(error); ok {
log.Error("Error in returned dataset")
cclog.Error("Error in returned dataset")
return nil, err
}
@@ -203,7 +203,7 @@ func LoadAverages(
stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion?
if err != nil {
log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
cclog.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
return err
}
@@ -231,7 +231,6 @@ func LoadScopedJobStats(
scopes []schema.MetricScope,
ctx context.Context,
) (schema.ScopedJobStats, error) {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
}
@@ -243,7 +242,7 @@ func LoadScopedJobStats(
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
if err != nil {
log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
cclog.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return nil, err
}
@@ -268,7 +267,7 @@ func LoadJobStats(
stats, err := repo.LoadStats(job, metrics, ctx)
if err != nil {
log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
cclog.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return data, err
}
@@ -318,9 +317,9 @@ func LoadNodeData(
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
if len(data) != 0 {
log.Warnf("partial error: %s", err.Error())
cclog.Warnf("partial error: %s", err.Error())
} else {
log.Error("Error while loading node data from metric repository")
cclog.Error("Error while loading node data from metric repository")
return nil, err
}
}
@@ -355,9 +354,9 @@ func LoadNodeListData(
data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx)
if err != nil {
if len(data) != 0 {
log.Warnf("partial error: %s", err.Error())
cclog.Warnf("partial error: %s", err.Error())
} else {
log.Error("Error while loading node data from metric repository")
cclog.Error("Error while loading node data from metric repository")
return nil, totalNodes, hasNextPage, err
}
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdata
@@ -18,8 +18,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
type CCMetricStoreConfig struct {
@@ -82,7 +82,7 @@ type ApiMetricData struct {
func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
var config CCMetricStoreConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Warn("Error while unmarshaling raw json config")
cclog.Warn("Error while unmarshaling raw json config")
return err
}
@@ -129,13 +129,13 @@ func (ccms *CCMetricStore) doRequest(
) (*ApiQueryResponse, error) {
buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(body); err != nil {
log.Errorf("Error while encoding request body: %s", err.Error())
cclog.Errorf("Error while encoding request body: %s", err.Error())
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf)
if err != nil {
log.Errorf("Error while building request body: %s", err.Error())
cclog.Errorf("Error while building request body: %s", err.Error())
return nil, err
}
if ccms.jwt != "" {
@@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest(
res, err := ccms.client.Do(req)
if err != nil {
log.Errorf("Error while performing request: %s", err.Error())
cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -161,7 +161,7 @@ func (ccms *CCMetricStore) doRequest(
var resBody ApiQueryResponse
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
log.Errorf("Error while decoding result body: %s", err.Error())
cclog.Errorf("Error while decoding result body: %s", err.Error())
return nil, err
}
@@ -177,7 +177,7 @@ func (ccms *CCMetricStore) LoadData(
) (schema.JobData, error) {
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution)
if err != nil {
log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
return nil, err
}
@@ -192,7 +192,7 @@ func (ccms *CCMetricStore) LoadData(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
log.Errorf("Error while performing request: %s", err.Error())
cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -298,7 +298,7 @@ func (ccms *CCMetricStore) buildQueries(
mc := archive.GetMetricConfig(job.Cluster, metric)
if mc == nil {
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
log.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
cclog.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
continue
}
@@ -572,7 +572,7 @@ func (ccms *CCMetricStore) LoadStats(
) (map[string]map[string]schema.MetricStatistics, error) {
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
if err != nil {
log.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
return nil, err
}
@@ -587,7 +587,7 @@ func (ccms *CCMetricStore) LoadStats(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
log.Errorf("Error while performing request: %s", err.Error())
cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -597,7 +597,7 @@ func (ccms *CCMetricStore) LoadStats(
metric := ccms.toLocalName(query.Metric)
data := res[0]
if data.Error != nil {
log.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
cclog.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
continue
}
@@ -608,7 +608,7 @@ func (ccms *CCMetricStore) LoadStats(
}
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
log.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
cclog.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
continue
}
@@ -631,7 +631,7 @@ func (ccms *CCMetricStore) LoadScopedStats(
) (schema.ScopedJobStats, error) {
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
if err != nil {
log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
return nil, err
}
@@ -646,7 +646,7 @@ func (ccms *CCMetricStore) LoadScopedStats(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
log.Errorf("Error while performing request: %s", err.Error())
cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -747,7 +747,7 @@ func (ccms *CCMetricStore) LoadNodeData(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
log.Errorf("Error while performing request: %s", err.Error())
cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -863,7 +863,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
if err != nil {
log.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
return nil, totalNodes, hasNextPage, err
}
@@ -878,7 +878,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
log.Errorf("Error while performing request: %s", err.Error())
cclog.Errorf("Error while performing request: %s", err.Error())
return nil, totalNodes, hasNextPage, err
}
@@ -982,7 +982,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
if subCluster != "" {
subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster)
if scterr != nil {
log.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error())
cclog.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error())
return nil, nil, scterr
}
}
@@ -992,7 +992,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
mc := archive.GetMetricConfig(cluster, metric)
if mc == nil {
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
log.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster)
cclog.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster)
continue
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdata
@@ -12,8 +12,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
type MetricDataRepository interface {
@@ -46,7 +46,7 @@ func Init() error {
Kind string `json:"kind"`
}
if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
log.Warn("Error while unmarshaling raw json MetricDataRepository")
cclog.Warn("Error while unmarshaling raw json MetricDataRepository")
return err
}
@@ -63,7 +63,7 @@ func Init() error {
}
if err := mdr.Init(cluster.MetricDataRepository); err != nil {
log.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
return err
}
metricDataRepos[cluster.Name] = mdr

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2022 DKRZ
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdata
@@ -22,8 +22,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
promapi "github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
promcfg "github.com/prometheus/common/config"
@@ -160,7 +160,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
var config PrometheusDataRepositoryConfig
// parse config
if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Warn("Error while unmarshaling raw json config")
cclog.Warn("Error while unmarshaling raw json config")
return err
}
// support basic authentication
@@ -179,7 +179,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
RoundTripper: rt,
})
if err != nil {
log.Error("Error while initializing new prometheus client")
cclog.Error("Error while initializing new prometheus client")
return err
}
// init query client
@@ -192,9 +192,9 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
for metric, templ := range config.Templates {
pdb.templates[metric], err = template.New(metric).Parse(templ)
if err == nil {
log.Debugf("Added PromQL template for %s: %s", metric, templ)
cclog.Debugf("Added PromQL template for %s: %s", metric, templ)
} else {
log.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
cclog.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
}
}
return nil
@@ -221,7 +221,7 @@ func (pdb *PrometheusDataRepository) FormatQuery(
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ))
} else {
query := buf.String()
log.Debugf("PromQL: %s", query)
cclog.Debugf("PromQL: %s", query)
return query, nil
}
} else {
@@ -285,7 +285,7 @@ func (pdb *PrometheusDataRepository) LoadData(
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
log.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
cclog.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
@@ -293,12 +293,12 @@ func (pdb *PrometheusDataRepository) LoadData(
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(job.Cluster, metric)
if metricConfig == nil {
log.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
cclog.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
return nil, errors.New("Prometheus config error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
if err != nil {
log.Warn("Error while formatting prometheus query")
cclog.Warn("Error while formatting prometheus query")
return nil, err
}
@@ -310,11 +310,11 @@ func (pdb *PrometheusDataRepository) LoadData(
}
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
cclog.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
return nil, errors.New("Prometheus query error")
}
if len(warnings) > 0 {
log.Warnf("Warnings: %v\n", warnings)
cclog.Warnf("Warnings: %v\n", warnings)
}
// init data structures
@@ -360,7 +360,7 @@ func (pdb *PrometheusDataRepository) LoadStats(
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
if err != nil {
log.Warn("Error while loading job for stats")
cclog.Warn("Error while loading job for stats")
return nil, err
}
for metric, metricData := range data {
@@ -391,19 +391,19 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(cluster, metric)
if metricConfig == nil {
log.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
cclog.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
return nil, errors.New("Prometheus config error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
if err != nil {
log.Warn("Error while formatting prometheus query")
cclog.Warn("Error while formatting prometheus query")
return nil, err
}
@@ -415,11 +415,11 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
}
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
return nil, errors.New("Prometheus query error")
}
if len(warnings) > 0 {
log.Warnf("Warnings: %v\n", warnings)
cclog.Warnf("Warnings: %v\n", warnings)
}
step := int64(metricConfig.Timestep)
@@ -444,7 +444,7 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
}
}
t1 := time.Since(t0)
log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
cclog.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
return data, nil
}
@@ -459,7 +459,7 @@ func (pdb *PrometheusDataRepository) LoadScopedStats(
scopedJobStats := make(schema.ScopedJobStats)
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
if err != nil {
log.Warn("Error while loading job for scopedJobStats")
cclog.Warn("Error while loading job for scopedJobStats")
return nil, err
}
@@ -467,7 +467,7 @@ func (pdb *PrometheusDataRepository) LoadScopedStats(
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
@@ -563,7 +563,7 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
@@ -571,12 +571,12 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(cluster, metric)
if metricConfig == nil {
log.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
cclog.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
return nil, totalNodes, hasNextPage, errors.New("Prometheus config error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
if err != nil {
log.Warn("Error while formatting prometheus query")
cclog.Warn("Error while formatting prometheus query")
return nil, totalNodes, hasNextPage, err
}
@@ -588,11 +588,11 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
}
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
return nil, totalNodes, hasNextPage, errors.New("Prometheus query error")
}
if len(warnings) > 0 {
log.Warnf("Warnings: %v\n", warnings)
cclog.Warnf("Warnings: %v\n", warnings)
}
step := int64(metricConfig.Timestep)
@@ -628,6 +628,6 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
}
}
t1 := time.Since(t0)
log.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
cclog.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
return data, totalNodes, hasNextPage, nil
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdata
@@ -10,7 +10,7 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
)
var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
@@ -29,16 +29,16 @@ func (tmdr *TestMetricDataRepository) LoadData(
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
resolution int) (schema.JobData, error) {
resolution int,
) (schema.JobData, error) {
return TestLoadDataCallback(job, metrics, scopes, ctx, resolution)
}
func (tmdr *TestMetricDataRepository) LoadStats(
job *schema.Job,
metrics []string,
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
ctx context.Context,
) (map[string]map[string]schema.MetricStatistics, error) {
panic("TODO")
}
@@ -46,8 +46,8 @@ func (tmdr *TestMetricDataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context) (schema.ScopedJobStats, error) {
ctx context.Context,
) (schema.ScopedJobStats, error) {
panic("TODO")
}
@@ -56,8 +56,8 @@ func (tmdr *TestMetricDataRepository) LoadNodeData(
metrics, nodes []string,
scopes []schema.MetricScope,
from, to time.Time,
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
ctx context.Context,
) (map[string]map[string][]*schema.JobMetric, error) {
panic("TODO")
}
@@ -70,7 +70,6 @@ func (tmdr *TestMetricDataRepository) LoadNodeListData(
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
panic("TODO")
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,7 +9,7 @@ import (
"sync"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"github.com/qustavo/sqlhooks/v2"
@@ -53,7 +53,7 @@ func Connect(driver string, db string) {
// - Enable foreign key checks
opts.URL += "?_journal=WAL&_timeout=5000&_fk=true"
if log.Loglevel() == "debug" {
if cclog.Loglevel() == "debug" {
sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{}))
dbHandle, err = sqlx.Open("sqlite3WithHooks", opts.URL)
} else {
@@ -63,11 +63,11 @@ func Connect(driver string, db string) {
opts.URL += "?multiStatements=true"
dbHandle, err = sqlx.Open("mysql", opts.URL)
default:
log.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
cclog.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
}
if err != nil {
log.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
cclog.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
}
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
@@ -78,14 +78,14 @@ func Connect(driver string, db string) {
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
err = checkDBVersion(driver, dbHandle.DB)
if err != nil {
log.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
cclog.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
}
})
}
func GetConnection() *DBConnection {
if dbConnInstance == nil {
log.Fatalf("Database connection not initialized!")
cclog.Fatalf("Database connection not initialized!")
}
return dbConnInstance

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -8,7 +8,7 @@ import (
"context"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
// Hooks satisfies the sqlhook.Hooks interface
@@ -16,13 +16,13 @@ type Hooks struct{}
// Before hook will print the query with it's args and return the context with the timestamp
func (h *Hooks) Before(ctx context.Context, query string, args ...any) (context.Context, error) {
log.Debugf("SQL query %s %q", query, args)
cclog.Debugf("SQL query %s %q", query, args)
return context.WithValue(ctx, "begin", time.Now()), nil
}
// After hook will get the timestamp registered on the Before hook and print the elapsed time
func (h *Hooks) After(ctx context.Context, query string, args ...any) (context.Context, error) {
begin := ctx.Value("begin").(time.Time)
log.Debugf("Took: %s\n", time.Since(begin))
cclog.Debugf("Took: %s\n", time.Since(begin))
return ctx, nil
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -16,9 +16,9 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/lrucache"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
@@ -76,18 +76,18 @@ func scanJob(row interface{ Scan(...any) error }) (*schema.Job, error) {
&job.StartTime, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads,
&job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
&job.Duration, &job.Walltime, &job.RawResources, &job.RawFootprint, &job.Energy); err != nil {
log.Warnf("Error while scanning rows (Job): %v", err)
cclog.Warnf("Error while scanning rows (Job): %v", err)
return nil, err
}
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
log.Warn("Error while unmarshaling raw resources json")
cclog.Warn("Error while unmarshaling raw resources json")
return nil, err
}
job.RawResources = nil
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
log.Warnf("Error while unmarshaling raw footprint json: %v", err)
cclog.Warnf("Error while unmarshaling raw footprint json: %v", err)
return nil, err
}
job.RawFootprint = nil
@@ -109,7 +109,7 @@ func (r *JobRepository) Optimize() error {
return err
}
case "mysql":
log.Info("Optimize currently not supported for mysql driver")
cclog.Info("Optimize currently not supported for mysql driver")
}
return nil
@@ -160,7 +160,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
log.Warn("Error while scanning for job metadata")
cclog.Warn("Error while scanning for job metadata")
return nil, err
}
@@ -169,12 +169,12 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
}
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
log.Warn("Error while unmarshaling raw metadata json")
cclog.Warn("Error while unmarshaling raw metadata json")
return nil, err
}
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
log.Debugf("Timer FetchMetadata %s", time.Since(start))
cclog.Debugf("Timer FetchMetadata %s", time.Since(start))
return job.MetaData, nil
}
@@ -183,7 +183,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
r.cache.Del(cachekey)
if job.MetaData == nil {
if _, err = r.FetchMetadata(job); err != nil {
log.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
cclog.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
return err
}
}
@@ -198,7 +198,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
}
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
log.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
cclog.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
return err
}
@@ -206,7 +206,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
Set("meta_data", job.RawMetaData).
Where("job.id = ?", job.ID).
RunWith(r.stmtCache).Exec(); err != nil {
log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
cclog.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
return err
}
@@ -219,7 +219,7 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err
if err := sq.Select("job.footprint").From("job").Where("job.id = ?", job.ID).
RunWith(r.stmtCache).QueryRow().Scan(&job.RawFootprint); err != nil {
log.Warn("Error while scanning for job footprint")
cclog.Warn("Error while scanning for job footprint")
return nil, err
}
@@ -228,11 +228,11 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err
}
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
log.Warn("Error while unmarshaling raw footprint json")
cclog.Warn("Error while unmarshaling raw footprint json")
return nil, err
}
log.Debugf("Timer FetchFootprint %s", time.Since(start))
cclog.Debugf("Timer FetchFootprint %s", time.Since(start))
return job.Footprint, nil
}
@@ -246,7 +246,7 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6
if err := sq.Select("job.energy_footprint").From("job").Where("job.id = ?", job.ID).
RunWith(r.stmtCache).QueryRow().Scan(&job.RawEnergyFootprint); err != nil {
log.Warn("Error while scanning for job energy_footprint")
cclog.Warn("Error while scanning for job energy_footprint")
return nil, err
}
@@ -255,12 +255,12 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6
}
if err := json.Unmarshal(job.RawEnergyFootprint, &job.EnergyFootprint); err != nil {
log.Warn("Error while unmarshaling raw energy footprint json")
cclog.Warn("Error while unmarshaling raw energy footprint json")
return nil, err
}
r.cache.Put(cachekey, job.EnergyFootprint, len(job.EnergyFootprint), 24*time.Hour)
log.Debugf("Timer FetchEnergyFootprint %s", time.Since(start))
cclog.Debugf("Timer FetchEnergyFootprint %s", time.Since(start))
return job.EnergyFootprint, nil
}
@@ -273,9 +273,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
if err != nil {
s, _, _ := qd.ToSql()
log.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err)
cclog.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err)
} else {
log.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
cclog.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
}
return cnt, err
}
@@ -286,9 +286,9 @@ func (r *JobRepository) DeleteJobById(id int64) error {
if err != nil {
s, _, _ := qd.ToSql()
log.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err)
cclog.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err)
} else {
log.Debugf("DeleteJobById(%d): Success", id)
cclog.Debugf("DeleteJobById(%d): Success", id)
}
return err
}
@@ -351,7 +351,7 @@ func (r *JobRepository) FindColumnValue(user *schema.User, searchterm string, ta
}
return "", ErrNotFound
} else {
log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
return "", ErrForbidden
}
}
@@ -370,7 +370,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
err := rows.Scan(&result)
if err != nil {
rows.Close()
log.Warnf("Error while scanning rows: %v", err)
cclog.Warnf("Error while scanning rows: %v", err)
return emptyResult, err
}
results = append(results, result)
@@ -380,7 +380,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
return emptyResult, ErrNotFound
} else {
log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
return emptyResult, ErrForbidden
}
}
@@ -399,7 +399,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) {
if err != nil {
return nil, err
}
log.Debugf("Timer Partitions %s", time.Since(start))
cclog.Debugf("Timer Partitions %s", time.Since(start))
return partitions.([]string), nil
}
@@ -413,7 +413,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
Where("job.cluster = ?", cluster).
RunWith(r.stmtCache).Query()
if err != nil {
log.Error("Error while running query")
cclog.Error("Error while running query")
return nil, err
}
@@ -424,11 +424,11 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
var resources []*schema.Resource
var subcluster string
if err := rows.Scan(&raw, &subcluster); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
if err := json.Unmarshal(raw, &resources); err != nil {
log.Warn("Error while unmarshaling raw resources json")
cclog.Warn("Error while unmarshaling raw resources json")
return nil, err
}
@@ -443,7 +443,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
}
}
log.Debugf("Timer AllocatedNodes %s", time.Since(start))
cclog.Debugf("Timer AllocatedNodes %s", time.Since(start))
return subclusters, nil
}
@@ -459,20 +459,20 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
RunWith(r.DB).Exec()
if err != nil {
log.Warn("Error while stopping jobs exceeding walltime")
cclog.Warn("Error while stopping jobs exceeding walltime")
return err
}
rowsAffected, err := res.RowsAffected()
if err != nil {
log.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
cclog.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
return err
}
if rowsAffected > 0 {
log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
cclog.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
}
log.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
cclog.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
return nil
}
@@ -482,7 +482,7 @@ func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) {
Where(sq.Eq{"jobtag.tag_id": tagId}).Distinct()
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
log.Error("Error while running query")
cclog.Error("Error while running query")
return nil, err
}
jobIds := make([]int64, 0, 100)
@@ -492,7 +492,7 @@ func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) {
if err := rows.Scan(&jobId); err != nil {
rows.Close()
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -511,7 +511,7 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
log.Error("Error while running query")
cclog.Error("Error while running query")
return nil, err
}
@@ -520,13 +520,13 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
job, err := scanJob(rows)
if err != nil {
rows.Close()
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
}
log.Infof("Return job count %d", len(jobs))
cclog.Infof("Return job count %d", len(jobs))
return jobs, nil
}
@@ -551,18 +551,18 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
}
if startTimeBegin == 0 {
log.Infof("Find jobs before %d", startTimeEnd)
cclog.Infof("Find jobs before %d", startTimeEnd)
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
"job.start_time < %d", startTimeEnd))
} else {
log.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
cclog.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
"job.start_time BETWEEN %d AND %d", startTimeBegin, startTimeEnd))
}
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
log.Error("Error while running query")
cclog.Error("Error while running query")
return nil, err
}
@@ -571,13 +571,13 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
job, err := scanJob(rows)
if err != nil {
rows.Close()
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
}
log.Infof("Return job count %d", len(jobs))
cclog.Infof("Return job count %d", len(jobs))
return jobs, nil
}
@@ -612,7 +612,7 @@ func (r *JobRepository) UpdateEnergy(
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
if err != nil {
log.Errorf("cannot get subcluster: %s", err.Error())
cclog.Errorf("cannot get subcluster: %s", err.Error())
return stmt, err
}
energyFootprint := make(map[string]float64)
@@ -625,7 +625,7 @@ func (r *JobRepository) UpdateEnergy(
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh)
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
// FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Energy: Power (in Watts) * Time (in Seconds)
@@ -637,18 +637,18 @@ func (r *JobRepository) UpdateEnergy(
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
}
} else {
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
}
energyFootprint[fp] = metricEnergy
totalEnergy += metricEnergy
// log.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
// cclog.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
}
var rawFootprint []byte
if rawFootprint, err = json.Marshal(energyFootprint); err != nil {
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
return stmt, err
}
@@ -662,7 +662,7 @@ func (r *JobRepository) UpdateFootprint(
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
if err != nil {
log.Errorf("cannot get subcluster: %s", err.Error())
cclog.Errorf("cannot get subcluster: %s", err.Error())
return stmt, err
}
footprint := make(map[string]float64)
@@ -676,7 +676,7 @@ func (r *JobRepository) UpdateFootprint(
}
if statType != "avg" && statType != "min" && statType != "max" {
log.Warnf("unknown statType for footprint update: %s", statType)
cclog.Warnf("unknown statType for footprint update: %s", statType)
return stmt, fmt.Errorf("unknown statType for footprint update: %s", statType)
}
@@ -690,7 +690,7 @@ func (r *JobRepository) UpdateFootprint(
var rawFootprint []byte
if rawFootprint, err = json.Marshal(footprint); err != nil {
log.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
cclog.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
return stmt, err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -8,8 +8,8 @@ import (
"encoding/json"
"fmt"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -34,12 +34,12 @@ func (r *JobRepository) InsertJob(job *schema.Job) (int64, error) {
res, err := r.DB.NamedExec(NamedJobCacheInsert, job)
r.Mutex.Unlock()
if err != nil {
log.Warn("Error while NamedJobInsert")
cclog.Warn("Error while NamedJobInsert")
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
log.Warn("Error while getting last insert ID")
cclog.Warn("Error while getting last insert ID")
return 0, err
}
@@ -54,7 +54,7 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
log.Errorf("Error while running query %v", err)
cclog.Errorf("Error while running query %v", err)
return nil, err
}
@@ -63,7 +63,7 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
job, err := scanJob(rows)
if err != nil {
rows.Close()
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
@@ -72,13 +72,13 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
_, err = r.DB.Exec(
"INSERT INTO job (job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data) SELECT job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data FROM job_cache")
if err != nil {
log.Warnf("Error while Job sync: %v", err)
cclog.Warnf("Error while Job sync: %v", err)
return nil, err
}
_, err = r.DB.Exec("DELETE FROM job_cache")
if err != nil {
log.Warnf("Error while Job cache clean: %v", err)
cclog.Warnf("Error while Job cache clean: %v", err)
return nil, err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -11,8 +11,8 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -39,7 +39,7 @@ func (r *JobRepository) Find(
q = q.OrderBy("job.id DESC") // always use newest matching job by db id if more than one match
log.Debugf("Timer Find %s", time.Since(start))
cclog.Debugf("Timer Find %s", time.Since(start))
return scanJob(q.RunWith(r.stmtCache).QueryRow())
}
@@ -86,7 +86,7 @@ func (r *JobRepository) FindAll(
rows, err := q.RunWith(r.stmtCache).Query()
if err != nil {
log.Error("Error while running query")
cclog.Error("Error while running query")
return nil, err
}
@@ -94,12 +94,12 @@ func (r *JobRepository) FindAll(
for rows.Next() {
job, err := scanJob(rows)
if err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
}
log.Debugf("Timer FindAll %s", time.Since(start))
cclog.Debugf("Timer FindAll %s", time.Since(start))
return jobs, nil
}
@@ -112,7 +112,7 @@ func (r *JobRepository) GetJobList() ([]int64, error) {
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
log.Error("Error while running query")
cclog.Error("Error while running query")
return nil, err
}
@@ -122,13 +122,13 @@ func (r *JobRepository) GetJobList() ([]int64, error) {
err := rows.Scan(&id)
if err != nil {
rows.Close()
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
jl = append(jl, id)
}
log.Infof("Return job count %d", len(jl))
cclog.Infof("Return job count %d", len(jl))
return jl, nil
}
@@ -253,7 +253,7 @@ func (r *JobRepository) FindConcurrentJobs(
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
log.Errorf("Error while running query: %v", err)
cclog.Errorf("Error while running query: %v", err)
return nil, err
}
@@ -264,7 +264,7 @@ func (r *JobRepository) FindConcurrentJobs(
var id, jobId, startTime sql.NullInt64
if err = rows.Scan(&id, &jobId, &startTime); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -280,7 +280,7 @@ func (r *JobRepository) FindConcurrentJobs(
rows, err = queryRunning.RunWith(r.stmtCache).Query()
if err != nil {
log.Errorf("Error while running query: %v", err)
cclog.Errorf("Error while running query: %v", err)
return nil, err
}
@@ -288,7 +288,7 @@ func (r *JobRepository) FindConcurrentJobs(
var id, jobId, startTime sql.NullInt64
if err := rows.Scan(&id, &jobId, &startTime); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -7,7 +7,7 @@ package repository
import (
"sync"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
)
type JobHook interface {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -13,8 +13,8 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -68,7 +68,7 @@ func (r *JobRepository) QueryJobs(
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
queryString, queryVars, _ := query.ToSql()
log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
@@ -77,7 +77,7 @@ func (r *JobRepository) QueryJobs(
job, err := scanJob(rows)
if err != nil {
rows.Close()
log.Warn("Error while scanning rows (Jobs)")
cclog.Warn("Error while scanning rows (Jobs)")
return nil, err
}
jobs = append(jobs, job)
@@ -123,7 +123,7 @@ func SecurityCheckWithUser(user *schema.User, query sq.SelectBuilder) (sq.Select
if len(user.Projects) != 0 {
return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.hpc_user": user.Username}}), nil
} else {
log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
cclog.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
return query.Where("job.hpc_user = ?", user.Username), nil
}
case user.HasRole(schema.RoleUser): // User : Only personal jobs
@@ -244,7 +244,7 @@ func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBui
case "last30d":
then = now - (60 * 60 * 24 * 30)
default:
log.Debugf("No known named timeRange: startTime.range = %s", cond.Range)
cclog.Debugf("No known named timeRange: startTime.range = %s", cond.Range)
return query
}
return query.Where(field+" BETWEEN ? AND ?", then, now)
@@ -335,7 +335,7 @@ var (
func toSnakeCase(str string) string {
for _, c := range str {
if c == '\'' || c == '\\' {
log.Panic("toSnakeCase() attack vector!")
cclog.Panic("toSnakeCase() attack vector!")
}
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,7 +9,7 @@ import (
"fmt"
"testing"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
_ "github.com/mattn/go-sqlite3"
)

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,7 +9,7 @@ import (
"embed"
"fmt"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/mysql"
"github.com/golang-migrate/migrate/v4/database/sqlite3"
@@ -54,13 +54,13 @@ func checkDBVersion(backend string, db *sql.DB) error {
return err
}
default:
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
}
v, dirty, err := m.Version()
if err != nil {
if err == migrate.ErrNilVersion {
log.Warn("Legacy database without version or missing database file!")
cclog.Warn("Legacy database without version or missing database file!")
} else {
return err
}
@@ -84,7 +84,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
case "sqlite3":
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
if err != nil {
log.Fatal(err)
cclog.Fatal(err)
}
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
@@ -102,7 +102,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
return m, err
}
default:
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
}
return m, nil
@@ -117,14 +117,14 @@ func MigrateDB(backend string, db string) error {
v, dirty, err := m.Version()
if err != nil {
if err == migrate.ErrNilVersion {
log.Warn("Legacy database without version or missing database file!")
cclog.Warn("Legacy database without version or missing database file!")
} else {
return err
}
}
if v < Version {
log.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
cclog.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
}
if dirty {
@@ -133,7 +133,7 @@ func MigrateDB(backend string, db string) error {
if err := m.Up(); err != nil {
if err == migrate.ErrNoChange {
log.Info("DB already up to date!")
cclog.Info("DB already up to date!")
} else {
return err
}
@@ -151,7 +151,7 @@ func RevertDB(backend string, db string) error {
if err := m.Migrate(Version - 1); err != nil {
if err == migrate.ErrNoChange {
log.Info("DB already up to date!")
cclog.Info("DB already up to date!")
} else {
return err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -15,9 +15,9 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/lrucache"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
@@ -59,7 +59,7 @@ func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, er
if err := sq.Select("node.meta_data").From("node").Where("node.id = ?", node.ID).
RunWith(r.stmtCache).QueryRow().Scan(&node.RawMetaData); err != nil {
log.Warn("Error while scanning for node metadata")
cclog.Warn("Error while scanning for node metadata")
return nil, err
}
@@ -68,12 +68,12 @@ func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, er
}
if err := json.Unmarshal(node.RawMetaData, &node.MetaData); err != nil {
log.Warn("Error while unmarshaling raw metadata json")
cclog.Warn("Error while unmarshaling raw metadata json")
return nil, err
}
r.cache.Put(cachekey, node.MetaData, len(node.RawMetaData), 24*time.Hour)
log.Debugf("Timer FetchMetadata %s", time.Since(start))
cclog.Debugf("Timer FetchMetadata %s", time.Since(start))
return node.MetaData, nil
}
@@ -82,7 +82,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
r.cache.Del(cachekey)
if node.MetaData == nil {
if _, err = r.FetchMetadata(node); err != nil {
log.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID)
cclog.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID)
return err
}
}
@@ -97,7 +97,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
}
if node.RawMetaData, err = json.Marshal(node.MetaData); err != nil {
log.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID)
cclog.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID)
return err
}
@@ -105,7 +105,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
Set("meta_data", node.RawMetaData).
Where("node.id = ?", node.ID).
RunWith(r.stmtCache).Exec(); err != nil {
log.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID)
cclog.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID)
return err
}
@@ -120,7 +120,7 @@ func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error)
Where("node.id = ?", id).RunWith(r.DB).
QueryRow().Scan(&node.ID, &node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState,
&node.HealthState); err != nil {
log.Warnf("Error while querying node '%v' from database", id)
cclog.Warnf("Error while querying node '%v' from database", id)
return nil, err
}
@@ -128,7 +128,7 @@ func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error)
var err error
var meta map[string]string
if meta, err = r.FetchMetadata(node); err != nil {
log.Warnf("Error while fetching metadata for node '%v'", id)
cclog.Warnf("Error while fetching metadata for node '%v'", id)
return nil, err
}
node.MetaData = meta
@@ -146,12 +146,12 @@ func (r *NodeRepository) AddNode(node *schema.Node) (int64, error) {
res, err := r.DB.NamedExec(NamedNodeInsert, node)
if err != nil {
log.Errorf("Error while adding node '%v' to database", node.Hostname)
cclog.Errorf("Error while adding node '%v' to database", node.Hostname)
return 0, err
}
node.ID, err = res.LastInsertId()
if err != nil {
log.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname)
cclog.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname)
return 0, err
}
@@ -166,7 +166,7 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
if err == sql.ErrNoRows {
subcluster, err := archive.GetSubClusterByNode(cluster, hostname)
if err != nil {
log.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err)
cclog.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err)
return err
}
node := schema.Node{
@@ -175,29 +175,29 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
}
_, err = r.AddNode(&node)
if err != nil {
log.Errorf("Error while adding node '%s' to database: %v", hostname, err)
cclog.Errorf("Error while adding node '%s' to database: %v", hostname, err)
return err
}
log.Infof("Added node '%s' to database", hostname)
cclog.Infof("Added node '%s' to database", hostname)
return nil
} else {
log.Warnf("Error while querying node '%v' from database", id)
cclog.Warnf("Error while querying node '%v' from database", id)
return err
}
}
if _, err := sq.Update("node").Set("node_state", nodeState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
log.Errorf("error while updating node '%s'", hostname)
cclog.Errorf("error while updating node '%s'", hostname)
return err
}
log.Infof("Updated node '%s' in database", hostname)
cclog.Infof("Updated node '%s' in database", hostname)
return nil
}
// func (r *NodeRepository) UpdateHealthState(hostname string, healthState *schema.MonitoringState) error {
// if _, err := sq.Update("node").Set("health_state", healthState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
// log.Errorf("error while updating node '%d'", id)
// cclog.Errorf("error while updating node '%d'", id)
// return err
// }
//
@@ -207,10 +207,10 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
func (r *NodeRepository) DeleteNode(id int64) error {
_, err := r.DB.Exec(`DELETE FROM node WHERE node.id = ?`, id)
if err != nil {
log.Errorf("Error while deleting node '%d' from DB", id)
cclog.Errorf("Error while deleting node '%d' from DB", id)
return err
}
log.Infof("deleted node '%d' from DB", id)
cclog.Infof("deleted node '%d' from DB", id)
return nil
}
@@ -243,7 +243,7 @@ func (r *NodeRepository) QueryNodes(
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
queryString, queryVars, _ := query.ToSql()
log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
@@ -254,7 +254,7 @@ func (r *NodeRepository) QueryNodes(
if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster,
&node.NodeState, &node.HealthState); err != nil {
rows.Close()
log.Warn("Error while scanning rows (Nodes)")
cclog.Warn("Error while scanning rows (Nodes)")
return nil, err
}
nodes = append(nodes, &node)
@@ -269,7 +269,7 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
rows, err := q.RunWith(r.DB).Query()
if err != nil {
log.Warn("Error while querying user list")
cclog.Warn("Error while querying user list")
return nil, err
}
nodeList := make([]*schema.Node, 0, 100)
@@ -278,7 +278,7 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
node := &schema.Node{}
if err := rows.Scan(&node.Hostname, &node.Cluster,
&node.SubCluster, &node.NodeState, &node.HealthState); err != nil {
log.Warn("Error while scanning node list")
cclog.Warn("Error while scanning node list")
return nil, err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,8 +9,8 @@ import (
"testing"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
_ "github.com/mattn/go-sqlite3"
)
@@ -65,7 +65,7 @@ func BenchmarkDB_FindJobById(b *testing.B) {
func BenchmarkDB_FindJob(b *testing.B) {
var jobId int64 = 107266
var startTime int64 = 1657557241
var cluster = "fritz"
cluster := "fritz"
b.Run("FindJob", func(b *testing.B) {
db := setup(b)
@@ -147,7 +147,7 @@ func getContext(tb testing.TB) context.Context {
func setup(tb testing.TB) *JobRepository {
tb.Helper()
log.Init("warn", true)
cclog.Init("warn", true)
dbfile := "testdata/job.db"
err := MigrateDB("sqlite3", dbfile)
noErr(tb, err)

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -14,8 +14,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -158,7 +158,7 @@ func (r *JobRepository) JobsStatsGrouped(
rows, err := query.RunWith(r.DB).Query()
if err != nil {
log.Warn("Error while querying DB for job statistics")
cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
@@ -169,7 +169,7 @@ func (r *JobRepository) JobsStatsGrouped(
var name sql.NullString
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := rows.Scan(&id, &jobs, &name, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -241,7 +241,7 @@ func (r *JobRepository) JobsStatsGrouped(
}
}
log.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
cclog.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
return stats, nil
}
@@ -261,7 +261,7 @@ func (r *JobRepository) JobsStats(
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -287,7 +287,7 @@ func (r *JobRepository) JobsStats(
})
}
log.Debugf("Timer JobStats %s", time.Since(start))
cclog.Debugf("Timer JobStats %s", time.Since(start))
return stats, nil
}
@@ -301,7 +301,7 @@ func LoadJobStat(job *schema.Job, metric string, statType string) float64 {
case "min":
return stats.Min
default:
log.Errorf("Unknown stat type %s", statType)
cclog.Errorf("Unknown stat type %s", statType)
}
}
@@ -322,7 +322,7 @@ func (r *JobRepository) JobCountGrouped(
}
rows, err := query.RunWith(r.DB).Query()
if err != nil {
log.Warn("Error while querying DB for job statistics")
cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
@@ -332,7 +332,7 @@ func (r *JobRepository) JobCountGrouped(
var id sql.NullString
var cnt sql.NullInt64
if err := rows.Scan(&id, &cnt); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
if id.Valid {
@@ -344,7 +344,7 @@ func (r *JobRepository) JobCountGrouped(
}
}
log.Debugf("Timer JobCountGrouped %s", time.Since(start))
cclog.Debugf("Timer JobCountGrouped %s", time.Since(start))
return stats, nil
}
@@ -364,7 +364,7 @@ func (r *JobRepository) AddJobCountGrouped(
}
rows, err := query.RunWith(r.DB).Query()
if err != nil {
log.Warn("Error while querying DB for job statistics")
cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
@@ -374,7 +374,7 @@ func (r *JobRepository) AddJobCountGrouped(
var id sql.NullString
var cnt sql.NullInt64
if err := rows.Scan(&id, &cnt); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
if id.Valid {
@@ -393,7 +393,7 @@ func (r *JobRepository) AddJobCountGrouped(
}
}
log.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
cclog.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
return stats, nil
}
@@ -411,7 +411,7 @@ func (r *JobRepository) AddJobCount(
}
rows, err := query.RunWith(r.DB).Query()
if err != nil {
log.Warn("Error while querying DB for job statistics")
cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
@@ -420,7 +420,7 @@ func (r *JobRepository) AddJobCount(
for rows.Next() {
var cnt sql.NullInt64
if err := rows.Scan(&cnt); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -438,7 +438,7 @@ func (r *JobRepository) AddJobCount(
}
}
log.Debugf("Timer AddJobCount %s", time.Since(start))
cclog.Debugf("Timer AddJobCount %s", time.Since(start))
return stats, nil
}
@@ -479,29 +479,29 @@ func (r *JobRepository) AddHistograms(
value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType)
stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount)
if err != nil {
log.Warn("Error while loading job statistics histogram: job duration")
cclog.Warn("Error while loading job statistics histogram: job duration")
return nil, err
}
stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter)
if err != nil {
log.Warn("Error while loading job statistics histogram: num nodes")
cclog.Warn("Error while loading job statistics histogram: num nodes")
return nil, err
}
stat.HistNumCores, err = r.jobsStatisticsHistogram(ctx, "job.num_hwthreads as value", filter)
if err != nil {
log.Warn("Error while loading job statistics histogram: num hwthreads")
cclog.Warn("Error while loading job statistics histogram: num hwthreads")
return nil, err
}
stat.HistNumAccs, err = r.jobsStatisticsHistogram(ctx, "job.num_acc as value", filter)
if err != nil {
log.Warn("Error while loading job statistics histogram: num acc")
cclog.Warn("Error while loading job statistics histogram: num acc")
return nil, err
}
log.Debugf("Timer AddHistograms %s", time.Since(start))
cclog.Debugf("Timer AddHistograms %s", time.Since(start))
return stat, nil
}
@@ -520,7 +520,7 @@ func (r *JobRepository) AddMetricHistograms(
if f.State != nil {
if len(f.State) == 1 && f.State[0] == "running" {
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount)
log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start))
return stat, nil
}
}
@@ -530,13 +530,13 @@ func (r *JobRepository) AddMetricHistograms(
for _, m := range metrics {
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount)
if err != nil {
log.Warnf("Error while loading job metric statistics histogram: %s", m)
cclog.Warnf("Error while loading job metric statistics histogram: %s", m)
continue
}
stat.HistMetrics = append(stat.HistMetrics, metricHisto)
}
log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start))
return stat, nil
}
@@ -560,7 +560,7 @@ func (r *JobRepository) jobsStatisticsHistogram(
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
if err != nil {
log.Error("Error while running query")
cclog.Error("Error while running query")
return nil, err
}
@@ -569,13 +569,13 @@ func (r *JobRepository) jobsStatisticsHistogram(
for rows.Next() {
point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
points = append(points, &point)
}
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil
}
@@ -607,7 +607,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
if err != nil {
log.Error("Error while running query")
cclog.Error("Error while running query")
return nil, err
}
@@ -615,7 +615,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
for rows.Next() {
point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -630,7 +630,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
}
}
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil
}
@@ -652,7 +652,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
peak = metricConfig.Peak
unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base
footprintStat = metricConfig.Footprint
log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
cclog.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
}
}
@@ -674,7 +674,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
}
}
// log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
// cclog.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
// Make bins, see https://jereze.com/code/sql-histogram/ (Modified here)
start := time.Now()
@@ -709,7 +709,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
rows, err := mainQuery.RunWith(r.DB).Query()
if err != nil {
log.Errorf("Error while running mainQuery: %s", err)
cclog.Errorf("Error while running mainQuery: %s", err)
return nil, err
}
@@ -726,7 +726,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!)
rpoint := model.MetricHistoPoint{}
if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max
log.Warnf("Error while scanning rows for %s", metric)
cclog.Warnf("Error while scanning rows for %s", metric)
return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested?
}
@@ -736,10 +736,10 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
e.Count = rpoint.Count
// Only Required For Debug: Check DB returned Min/Max against Backend Init above
// if rpoint.Min != nil {
// log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
// cclog.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
// }
// if rpoint.Max != nil {
// log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
// cclog.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
// }
break
}
@@ -749,7 +749,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points}
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return &result, nil
}
@@ -762,11 +762,11 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
// Get Jobs
jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil)
if err != nil {
log.Errorf("Error while querying jobs for footprint: %s", err)
cclog.Errorf("Error while querying jobs for footprint: %s", err)
return nil
}
if len(jobs) > 500 {
log.Errorf("too many jobs matched (max: %d)", 500)
cclog.Errorf("too many jobs matched (max: %d)", 500)
return nil
}
@@ -782,7 +782,7 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
}
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
log.Errorf("Error while loading averages for histogram: %s", err)
cclog.Errorf("Error while loading averages for histogram: %s", err)
return nil
}
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -19,7 +19,6 @@ func TestBuildJobStatsQuery(t *testing.T) {
noErr(t, err)
fmt.Printf("SQL: %s\n", sql)
}
func TestJobStats(t *testing.T) {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,8 +9,8 @@ import (
"strings"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -18,7 +18,7 @@ import (
func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*schema.Tag, error) {
j, err := r.FindByIdWithUser(user, job)
if err != nil {
log.Warn("Error while finding job by id")
cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -26,19 +26,19 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error adding tag with %s: %v", s, err)
cclog.Errorf("Error adding tag with %s: %v", s, err)
return nil, err
}
tags, err := r.GetTags(user, &job)
if err != nil {
log.Warn("Error while getting tags for job")
cclog.Warn("Error while getting tags for job")
return nil, err
}
archiveTags, err := r.getArchiveTags(&job)
if err != nil {
log.Warn("Error while getting tags for job")
cclog.Warn("Error while getting tags for job")
return nil, err
}
@@ -48,7 +48,7 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche
func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error) {
j, err := r.FindByIdDirect(job)
if err != nil {
log.Warn("Error while finding job by id")
cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -56,19 +56,19 @@ func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error adding tag with %s: %v", s, err)
cclog.Errorf("Error adding tag with %s: %v", s, err)
return nil, err
}
tags, err := r.GetTagsDirect(&job)
if err != nil {
log.Warn("Error while getting tags for job")
cclog.Warn("Error while getting tags for job")
return nil, err
}
archiveTags, err := r.getArchiveTags(&job)
if err != nil {
log.Warn("Error while getting tags for job")
cclog.Warn("Error while getting tags for job")
return nil, err
}
@@ -80,7 +80,7 @@ func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error
func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) {
j, err := r.FindByIdWithUser(user, job)
if err != nil {
log.Warn("Error while finding job by id")
cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -88,19 +88,19 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error removing tag with %s: %v", s, err)
cclog.Errorf("Error removing tag with %s: %v", s, err)
return nil, err
}
tags, err := r.GetTags(user, &job)
if err != nil {
log.Warn("Error while getting tags for job")
cclog.Warn("Error while getting tags for job")
return nil, err
}
archiveTags, err := r.getArchiveTags(&job)
if err != nil {
log.Warn("Error while getting tags for job")
cclog.Warn("Error while getting tags for job")
return nil, err
}
@@ -113,14 +113,14 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT
// Get Tag ID to delete
tagID, exists := r.TagId(tagType, tagName, tagScope)
if !exists {
log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
return nil, fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
}
// Get Job
j, err := r.FindByIdWithUser(user, job)
if err != nil {
log.Warn("Error while finding job by id")
cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -129,19 +129,19 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
return nil, err
}
tags, err := r.GetTags(user, &job)
if err != nil {
log.Warn("Error while getting tags for job")
cclog.Warn("Error while getting tags for job")
return nil, err
}
archiveTags, err := r.getArchiveTags(&job)
if err != nil {
log.Warn("Error while getting tags for job")
cclog.Warn("Error while getting tags for job")
return nil, err
}
@@ -152,13 +152,13 @@ func (r *JobRepository) removeTagFromArchiveJobs(jobIds []int64) {
for _, j := range jobIds {
tags, err := r.getArchiveTags(&j)
if err != nil {
log.Warnf("Error while getting tags for job %d", j)
cclog.Warnf("Error while getting tags for job %d", j)
continue
}
job, err := r.FindByIdDirect(j)
if err != nil {
log.Warnf("Error while getting job %d", j)
cclog.Warnf("Error while getting job %d", j)
continue
}
@@ -172,7 +172,7 @@ func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagSc
// Get Tag ID to delete
tagID, exists := r.TagId(tagType, tagName, tagScope)
if !exists {
log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
return fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
}
@@ -192,7 +192,7 @@ func (r *JobRepository) RemoveTagById(tagID int64) error {
if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := qJobTag.ToSql()
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
return err
}
@@ -201,7 +201,7 @@ func (r *JobRepository) RemoveTagById(tagID int64) error {
if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := qTag.ToSql()
log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
cclog.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
return err
}
@@ -223,7 +223,7 @@ func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope strin
res, err := q.RunWith(r.stmtCache).Exec()
if err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error inserting tag with %s: %v", s, err)
cclog.Errorf("Error inserting tag with %s: %v", s, err)
return 0, err
}
@@ -272,7 +272,7 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
// Handle Job Ownership
if user != nil && user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { // ADMIN || SUPPORT: Count all jobs
// log.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
// cclog.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
// Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case
} else if user != nil && user.HasRole(schema.RoleManager) { // MANAGER: Count own jobs plus project's jobs
// Build ("project1", "project2", ...) list of variable length directly in SQL string
@@ -396,7 +396,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
rows, err := q.RunWith(r.stmtCache).Query()
if err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error get tags with %s: %v", s, err)
cclog.Errorf("Error get tags with %s: %v", s, err)
return nil, err
}
@@ -404,7 +404,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
for rows.Next() {
tag := &schema.Tag{}
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
// Handle Scope Filtering: Tag Scope is Global, Private (== Username) or User is auth'd to view Admin Tags
@@ -429,7 +429,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
rows, err := q.RunWith(r.stmtCache).Query()
if err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error get tags with %s: %v", s, err)
cclog.Errorf("Error get tags with %s: %v", s, err)
return nil, err
}
@@ -437,7 +437,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
for rows.Next() {
tag := &schema.Tag{}
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
tags = append(tags, tag)
@@ -456,7 +456,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
rows, err := q.RunWith(r.stmtCache).Query()
if err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error get tags with %s: %v", s, err)
cclog.Errorf("Error get tags with %s: %v", s, err)
return nil, err
}
@@ -464,7 +464,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
for rows.Next() {
tag := &schema.Tag{}
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
log.Warn("Error while scanning rows")
cclog.Warn("Error while scanning rows")
return nil, err
}
tags = append(tags, tag)
@@ -488,7 +488,7 @@ func (r *JobRepository) ImportTag(jobId int64, tagType string, tagName string, t
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error adding tag on import with %s: %v", s, err)
cclog.Errorf("Error adding tag on import with %s: %v", s, err)
return err
}

View File

@@ -1,11 +1,11 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
import (
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/jmoiron/sqlx"
)
@@ -20,7 +20,7 @@ func (r *JobRepository) TransactionInit() (*Transaction, error) {
t.tx, err = r.DB.Beginx()
if err != nil {
log.Warn("Error while bundling transactions")
cclog.Warn("Error while bundling transactions")
return nil, err
}
return t, nil
@@ -30,14 +30,14 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error {
var err error
if t.tx != nil {
if err = t.tx.Commit(); err != nil {
log.Warn("Error while committing transactions")
cclog.Warn("Error while committing transactions")
return err
}
}
t.tx, err = r.DB.Beginx()
if err != nil {
log.Warn("Error while bundling transactions")
cclog.Warn("Error while bundling transactions")
return err
}
@@ -46,7 +46,7 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error {
func (r *JobRepository) TransactionEnd(t *Transaction) error {
if err := t.tx.Commit(); err != nil {
log.Warn("Error while committing SQL transactions")
cclog.Warn("Error while committing SQL transactions")
return err
}
return nil
@@ -59,13 +59,13 @@ func (r *JobRepository) TransactionAddNamed(
) (int64, error) {
res, err := t.tx.NamedExec(query, args)
if err != nil {
log.Errorf("Named Exec failed: %v", err)
cclog.Errorf("Named Exec failed: %v", err)
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
log.Errorf("repository initDB(): %v", err)
cclog.Errorf("repository initDB(): %v", err)
return 0, err
}
@@ -73,16 +73,15 @@ func (r *JobRepository) TransactionAddNamed(
}
func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) {
res, err := t.tx.Exec(query, args...)
if err != nil {
log.Errorf("TransactionAdd(), Exec() Error: %v", err)
cclog.Errorf("TransactionAdd(), Exec() Error: %v", err)
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
log.Errorf("TransactionAdd(), LastInsertId() Error: %v", err)
cclog.Errorf("TransactionAdd(), LastInsertId() Error: %v", err)
return 0, err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -13,13 +13,13 @@ import (
"strings"
"sync"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
"golang.org/x/crypto/bcrypt"
"github.com/ClusterCockpit/cc-backend/internal/config"
)
var (
@@ -50,7 +50,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) {
if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("hpc_user").
Where("hpc_user.username = ?", username).RunWith(r.DB).
QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil {
log.Warnf("Error while querying user '%v' from database", username)
cclog.Warnf("Error while querying user '%v' from database", username)
return nil, err
}
@@ -59,7 +59,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) {
user.Email = email.String
if rawRoles.Valid {
if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil {
log.Warn("Error while unmarshaling raw roles from DB")
cclog.Warn("Error while unmarshaling raw roles from DB")
return nil, err
}
}
@@ -76,14 +76,14 @@ func (r *UserRepository) GetLdapUsernames() ([]string, error) {
var users []string
rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE hpc_user.ldap = 1`)
if err != nil {
log.Warn("Error while querying usernames")
cclog.Warn("Error while querying usernames")
return nil, err
}
for rows.Next() {
var username string
if err := rows.Scan(&username); err != nil {
log.Warnf("Error while scanning for user '%s'", username)
cclog.Warnf("Error while scanning for user '%s'", username)
return nil, err
}
@@ -111,7 +111,7 @@ func (r *UserRepository) AddUser(user *schema.User) error {
if user.Password != "" {
password, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
if err != nil {
log.Error("Error while encrypting new user password")
cclog.Error("Error while encrypting new user password")
return err
}
cols = append(cols, "password")
@@ -123,21 +123,21 @@ func (r *UserRepository) AddUser(user *schema.User) error {
}
if _, err := sq.Insert("hpc_user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil {
log.Errorf("Error while inserting new user '%v' into DB", user.Username)
cclog.Errorf("Error while inserting new user '%v' into DB", user.Username)
return err
}
log.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
cclog.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
defaultMetricsCfg, err := config.LoadDefaultMetricsConfig()
if err != nil {
log.Errorf("Error loading default metrics config: %v", err)
cclog.Errorf("Error loading default metrics config: %v", err)
} else if defaultMetricsCfg != nil {
for _, cluster := range defaultMetricsCfg.Clusters {
metricsArray := config.ParseMetricsString(cluster.DefaultMetrics)
metricsJSON, err := json.Marshal(metricsArray)
if err != nil {
log.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err)
cclog.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err)
continue
}
confKey := "job_view_selectedMetrics:" + cluster.Name
@@ -145,9 +145,9 @@ func (r *UserRepository) AddUser(user *schema.User) error {
Columns("username", "confkey", "value").
Values(user.Username, confKey, string(metricsJSON)).
RunWith(r.DB).Exec(); err != nil {
log.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err)
cclog.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err)
} else {
log.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON))
cclog.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON))
}
}
}
@@ -160,7 +160,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro
// TODO: Discuss updatable fields
if dbUser.Name != user.Name {
if _, err := sq.Update("hpc_user").Set("name", user.Name).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil {
log.Errorf("error while updating name of user '%s'", user.Username)
cclog.Errorf("error while updating name of user '%s'", user.Username)
return err
}
}
@@ -179,10 +179,10 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro
func (r *UserRepository) DelUser(username string) error {
_, err := r.DB.Exec(`DELETE FROM hpc_user WHERE hpc_user.username = ?`, username)
if err != nil {
log.Errorf("Error while deleting user '%s' from DB", username)
cclog.Errorf("Error while deleting user '%s' from DB", username)
return err
}
log.Infof("deleted user '%s' from DB", username)
cclog.Infof("deleted user '%s' from DB", username)
return nil
}
@@ -194,7 +194,7 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {
rows, err := q.RunWith(r.DB).Query()
if err != nil {
log.Warn("Error while querying user list")
cclog.Warn("Error while querying user list")
return nil, err
}
@@ -206,12 +206,12 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {
user := &schema.User{}
var name, email sql.NullString
if err := rows.Scan(&user.Username, &name, &email, &rawroles, &rawprojects); err != nil {
log.Warn("Error while scanning user list")
cclog.Warn("Error while scanning user list")
return nil, err
}
if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil {
log.Warn("Error while unmarshaling raw role list")
cclog.Warn("Error while unmarshaling raw role list")
return nil, err
}
@@ -234,7 +234,7 @@ func (r *UserRepository) AddRole(
newRole := strings.ToLower(queryrole)
user, err := r.GetUser(username)
if err != nil {
log.Warnf("Could not load user '%s'", username)
cclog.Warnf("Could not load user '%s'", username)
return err
}
@@ -249,7 +249,7 @@ func (r *UserRepository) AddRole(
roles, _ := json.Marshal(append(user.Roles, newRole))
if _, err := sq.Update("hpc_user").Set("roles", roles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
log.Errorf("error while adding new role for user '%s'", user.Username)
cclog.Errorf("error while adding new role for user '%s'", user.Username)
return err
}
return nil
@@ -259,7 +259,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr
oldRole := strings.ToLower(queryrole)
user, err := r.GetUser(username)
if err != nil {
log.Warnf("Could not load user '%s'", username)
cclog.Warnf("Could not load user '%s'", username)
return err
}
@@ -285,7 +285,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr
mroles, _ := json.Marshal(newroles)
if _, err := sq.Update("hpc_user").Set("roles", mroles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
log.Errorf("Error while removing role for user '%s'", user.Username)
cclog.Errorf("Error while removing role for user '%s'", user.Username)
return err
}
return nil
@@ -364,10 +364,10 @@ const ContextUserKey ContextKey = "user"
func GetUserFromContext(ctx context.Context) *schema.User {
x := ctx.Value(ContextUserKey)
if x == nil {
log.Warnf("no user retrieved from context")
cclog.Warnf("no user retrieved from context")
return nil
}
// log.Infof("user retrieved from context: %v", x.(*schema.User))
// cclog.Infof("user retrieved from context: %v", x.(*schema.User))
return x.(*schema.User)
}
@@ -385,11 +385,11 @@ func (r *UserRepository) FetchUserInCtx(ctx context.Context, username string) (*
if err == sql.ErrNoRows {
/* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */
/* since FetchUser will be called to retrieve full name and mail for every job in query/list */
// log.Warnf("User '%s' Not found in DB", username)
// cclog.Warnf("User '%s' Not found in DB", username)
return nil, nil
}
log.Warnf("Error while fetching user '%s'", username)
cclog.Warnf("Error while fetching user '%s'", username)
return nil, err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -10,9 +10,9 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/lrucache"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/jmoiron/sqlx"
)
@@ -35,7 +35,7 @@ func GetUserCfgRepo() *UserCfgRepo {
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
if err != nil {
log.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
cclog.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
}
userCfgRepoInstance = &UserCfgRepo{
@@ -70,7 +70,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
rows, err := uCfg.Lookup.Query(user.Username)
if err != nil {
log.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
cclog.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
return err, 0, 0
}
@@ -79,13 +79,13 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
for rows.Next() {
var key, rawval string
if err := rows.Scan(&key, &rawval); err != nil {
log.Warn("Error while scanning user uiconfig values")
cclog.Warn("Error while scanning user uiconfig values")
return err, 0, 0
}
var val interface{}
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
log.Warn("Error while unmarshaling raw user uiconfig json")
cclog.Warn("Error while unmarshaling raw user uiconfig json")
return err, 0, 0
}
@@ -100,7 +100,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
return uiconfig, 24 * time.Hour, size
})
if err, ok := data.(error); ok {
log.Error("Error in returned dataset")
cclog.Error("Error in returned dataset")
return nil, err
}
@@ -117,7 +117,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
if user == nil {
var val interface{}
if err := json.Unmarshal([]byte(value), &val); err != nil {
log.Warn("Error while unmarshaling raw user config json")
cclog.Warn("Error while unmarshaling raw user config json")
return err
}
@@ -128,7 +128,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
}
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`, user.Username, key, value); err != nil {
log.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
cclog.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
return err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -10,8 +10,8 @@ import (
"testing"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
_ "github.com/mattn/go-sqlite3"
)
@@ -39,7 +39,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
} } ]
}`
log.Init("info", true)
cclog.Init("info", true)
dbfilepath := "testdata/job.db"
err := MigrateDB("sqlite3", dbfilepath)
if err != nil {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package routerConfig
@@ -16,10 +16,10 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/web"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/ClusterCockpit/cc-lib/util"
"github.com/gorilla/mux"
)
@@ -57,23 +57,23 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
// startJobCount := time.Now()
stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy)
if err != nil {
log.Warnf("failed to count jobs: %s", err.Error())
cclog.Warnf("failed to count jobs: %s", err.Error())
}
// log.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount))
// cclog.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount))
// startRunningJobCount := time.Now()
stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running")
if err != nil {
log.Warnf("failed to count running jobs: %s", err.Error())
cclog.Warnf("failed to count running jobs: %s", err.Error())
}
// log.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount))
// cclog.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount))
i["clusters"] = stats
if util.CheckFileExists("./var/notice.txt") {
msg, err := os.ReadFile("./var/notice.txt")
if err != nil {
log.Warnf("failed to read notice.txt file: %s", err.Error())
cclog.Warnf("failed to read notice.txt file: %s", err.Error())
} else {
i["message"] = string(msg)
}
@@ -178,7 +178,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
tags, counts, err := jobRepo.CountTags(repository.GetUserFromContext(r.Context()))
tagMap := make(map[string][]map[string]interface{})
if err != nil {
log.Warnf("GetTags failed: %s", err.Error())
cclog.Warnf("GetTags failed: %s", err.Error())
i["tagmap"] = tagMap
return i
}

View File

@@ -15,10 +15,10 @@ import (
"text/template"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/ClusterCockpit/cc-lib/util"
"github.com/expr-lang/expr"
"github.com/expr-lang/expr/vm"
)
@@ -66,7 +66,7 @@ type JobClassTagger struct {
func (t *JobClassTagger) prepareRule(b []byte, fns string) {
var rule RuleFormat
if err := json.NewDecoder(bytes.NewReader(b)).Decode(&rule); err != nil {
log.Warn("Error while decoding raw job meta json")
cclog.Warn("Error while decoding raw job meta json")
return
}
@@ -80,7 +80,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
for _, p := range rule.Parameters {
param, ok := t.parameters[p]
if !ok {
log.Warnf("prepareRule() > missing parameter %s in rule %s", p, fns)
cclog.Warnf("prepareRule() > missing parameter %s in rule %s", p, fns)
return
}
ri.env[p] = param
@@ -93,7 +93,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
for _, r := range rule.Requirements {
req, err := expr.Compile(r, expr.AsBool())
if err != nil {
log.Errorf("error compiling requirement %s: %#v", r, err)
cclog.Errorf("error compiling requirement %s: %#v", r, err)
return
}
ri.requirements = append(ri.requirements, req)
@@ -103,7 +103,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
for _, v := range rule.Variables {
req, err := expr.Compile(v.Expr, expr.AsFloat64())
if err != nil {
log.Errorf("error compiling requirement %s: %#v", v.Name, err)
cclog.Errorf("error compiling requirement %s: %#v", v.Name, err)
return
}
ri.variables = append(ri.variables, ruleVariable{name: v.Name, expr: req})
@@ -112,7 +112,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
// compile rule
exp, err := expr.Compile(rule.Rule, expr.AsBool())
if err != nil {
log.Errorf("error compiling rule %s: %#v", fns, err)
cclog.Errorf("error compiling rule %s: %#v", fns, err)
return
}
ri.rule = exp
@@ -120,9 +120,9 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
// prepare hint template
ri.hint, err = template.New(fns).Parse(rule.Hint)
if err != nil {
log.Errorf("error processing template %s: %#v", fns, err)
cclog.Errorf("error processing template %s: %#v", fns, err)
}
log.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables))
cclog.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables))
t.rules[rule.Tag] = ri
}
@@ -135,19 +135,19 @@ func (t *JobClassTagger) EventMatch(s string) bool {
func (t *JobClassTagger) EventCallback() {
files, err := os.ReadDir(t.cfgPath)
if err != nil {
log.Fatal(err)
cclog.Fatal(err)
}
if util.CheckFileExists(t.cfgPath + "/parameters.json") {
log.Info("Merge parameters")
cclog.Info("Merge parameters")
b, err := os.ReadFile(t.cfgPath + "/parameters.json")
if err != nil {
log.Warnf("prepareRule() > open file error: %v", err)
cclog.Warnf("prepareRule() > open file error: %v", err)
}
var paramTmp map[string]any
if err := json.NewDecoder(bytes.NewReader(b)).Decode(&paramTmp); err != nil {
log.Warn("Error while decoding parameters.json")
cclog.Warn("Error while decoding parameters.json")
}
maps.Copy(t.parameters, paramTmp)
@@ -156,11 +156,11 @@ func (t *JobClassTagger) EventCallback() {
for _, fn := range files {
fns := fn.Name()
if fns != "parameters.json" {
log.Debugf("Process: %s", fns)
cclog.Debugf("Process: %s", fns)
filename := fmt.Sprintf("%s/%s", t.cfgPath, fns)
b, err := os.ReadFile(filename)
if err != nil {
log.Warnf("prepareRule() > open file error: %v", err)
cclog.Warnf("prepareRule() > open file error: %v", err)
return
}
t.prepareRule(b, fns)
@@ -169,15 +169,15 @@ func (t *JobClassTagger) EventCallback() {
}
func (t *JobClassTagger) initParameters() error {
log.Info("Initialize parameters")
cclog.Info("Initialize parameters")
b, err := jobclassFiles.ReadFile("jobclasses/parameters.json")
if err != nil {
log.Warnf("prepareRule() > open file error: %v", err)
cclog.Warnf("prepareRule() > open file error: %v", err)
return err
}
if err := json.NewDecoder(bytes.NewReader(b)).Decode(&t.parameters); err != nil {
log.Warn("Error while decoding parameters.json")
cclog.Warn("Error while decoding parameters.json")
return err
}
@@ -190,7 +190,7 @@ func (t *JobClassTagger) Register() error {
err := t.initParameters()
if err != nil {
log.Warnf("error reading parameters.json: %v", err)
cclog.Warnf("error reading parameters.json: %v", err)
return err
}
@@ -203,11 +203,11 @@ func (t *JobClassTagger) Register() error {
fns := fn.Name()
if fns != "parameters.json" {
filename := fmt.Sprintf("jobclasses/%s", fns)
log.Infof("Process: %s", fns)
cclog.Infof("Process: %s", fns)
b, err := jobclassFiles.ReadFile(filename)
if err != nil {
log.Warnf("prepareRule() > open file error: %v", err)
cclog.Warnf("prepareRule() > open file error: %v", err)
return err
}
t.prepareRule(b, fns)
@@ -216,7 +216,7 @@ func (t *JobClassTagger) Register() error {
if util.CheckFileExists(t.cfgPath) {
t.EventCallback()
log.Infof("Setup file watch for %s", t.cfgPath)
cclog.Infof("Setup file watch for %s", t.cfgPath)
util.AddListener(t.cfgPath, t)
}
@@ -227,16 +227,16 @@ func (t *JobClassTagger) Match(job *schema.Job) {
r := repository.GetJobRepository()
jobstats, err := archive.GetStatistics(job)
metricsList := archive.GetMetricConfigSubCluster(job.Cluster, job.SubCluster)
log.Infof("Enter match rule with %d rules for job %d", len(t.rules), job.JobID)
cclog.Infof("Enter match rule with %d rules for job %d", len(t.rules), job.JobID)
if err != nil {
log.Errorf("job classification failed for job %d: %#v", job.JobID, err)
cclog.Errorf("job classification failed for job %d: %#v", job.JobID, err)
return
}
for tag, ri := range t.rules {
env := make(map[string]any)
maps.Copy(env, ri.env)
log.Infof("Try to match rule %s for job %d", tag, job.JobID)
cclog.Infof("Try to match rule %s for job %d", tag, job.JobID)
// Initialize environment
env["job"] = map[string]any{
@@ -253,7 +253,7 @@ func (t *JobClassTagger) Match(job *schema.Job) {
for _, m := range ri.metrics {
stats, ok := jobstats[m]
if !ok {
log.Errorf("job classification failed for job %d: missing metric '%s'", job.JobID, m)
cclog.Errorf("job classification failed for job %d: missing metric '%s'", job.JobID, m)
return
}
env[m] = map[string]any{
@@ -273,11 +273,11 @@ func (t *JobClassTagger) Match(job *schema.Job) {
for _, r := range ri.requirements {
ok, err := expr.Run(r, env)
if err != nil {
log.Errorf("error running requirement for rule %s: %#v", tag, err)
cclog.Errorf("error running requirement for rule %s: %#v", tag, err)
return
}
if !ok.(bool) {
log.Infof("requirement for rule %s not met", tag)
cclog.Infof("requirement for rule %s not met", tag)
return
}
}
@@ -286,7 +286,7 @@ func (t *JobClassTagger) Match(job *schema.Job) {
for _, v := range ri.variables {
value, err := expr.Run(v.expr, env)
if err != nil {
log.Errorf("error running rule %s: %#v", tag, err)
cclog.Errorf("error running rule %s: %#v", tag, err)
return
}
env[v.name] = value
@@ -296,11 +296,11 @@ func (t *JobClassTagger) Match(job *schema.Job) {
match, err := expr.Run(ri.rule, env)
if err != nil {
log.Errorf("error running rule %s: %#v", tag, err)
cclog.Errorf("error running rule %s: %#v", tag, err)
return
}
if match.(bool) {
log.Info("Rule matches!")
cclog.Info("Rule matches!")
id := *job.ID
if !r.HasTag(id, t.tagType, tag) {
r.AddTagOrCreateDirect(id, t.tagType, tag)
@@ -309,14 +309,14 @@ func (t *JobClassTagger) Match(job *schema.Job) {
// process hint template
var msg bytes.Buffer
if err := ri.hint.Execute(&msg, env); err != nil {
log.Errorf("Template error: %s", err.Error())
cclog.Errorf("Template error: %s", err.Error())
return
}
// FIXME: Handle case where multiple tags apply
r.UpdateMetadata(job, "message", msg.String())
} else {
log.Info("Rule does not match!")
cclog.Info("Rule does not match!")
}
}
}

View File

@@ -15,9 +15,9 @@ import (
"strings"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/ClusterCockpit/cc-lib/util"
)
//go:embed apps/*
@@ -53,15 +53,15 @@ func (t *AppTagger) EventMatch(s string) bool {
func (t *AppTagger) EventCallback() {
files, err := os.ReadDir(t.cfgPath)
if err != nil {
log.Fatal(err)
cclog.Fatal(err)
}
for _, fn := range files {
fns := fn.Name()
log.Debugf("Process: %s", fns)
cclog.Debugf("Process: %s", fns)
f, err := os.Open(fmt.Sprintf("%s/%s", t.cfgPath, fns))
if err != nil {
log.Errorf("error opening app file %s: %#v", fns, err)
cclog.Errorf("error opening app file %s: %#v", fns, err)
}
t.scanApp(f, fns)
}
@@ -78,7 +78,7 @@ func (t *AppTagger) Register() error {
t.apps = make(map[string]appInfo, 0)
for _, fn := range files {
fns := fn.Name()
log.Debugf("Process: %s", fns)
cclog.Debugf("Process: %s", fns)
f, err := appFiles.Open(fmt.Sprintf("apps/%s", fns))
if err != nil {
return fmt.Errorf("error opening app file %s: %#v", fns, err)
@@ -89,7 +89,7 @@ func (t *AppTagger) Register() error {
if util.CheckFileExists(t.cfgPath) {
t.EventCallback()
log.Infof("Setup file watch for %s", t.cfgPath)
cclog.Infof("Setup file watch for %s", t.cfgPath)
util.AddListener(t.cfgPath, t)
}
@@ -100,7 +100,7 @@ func (t *AppTagger) Match(job *schema.Job) {
r := repository.GetJobRepository()
metadata, err := r.FetchMetadata(job)
if err != nil {
log.Infof("Cannot fetch metadata for job: %d on %s", job.JobID, job.Cluster)
cclog.Infof("Cannot fetch metadata for job: %d on %s", job.JobID, job.Cluster)
return
}
@@ -122,6 +122,6 @@ func (t *AppTagger) Match(job *schema.Job) {
}
}
} else {
log.Infof("Cannot extract job script for job: %d on %s", job.JobID, job.Cluster)
cclog.Infof("Cannot extract job script for job: %d on %s", job.JobID, job.Cluster)
}
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package tagger
@@ -8,12 +8,12 @@ import (
"testing"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
func setup(tb testing.TB) *repository.JobRepository {
tb.Helper()
log.Init("warn", true)
cclog.Init("warn", true)
dbfile := "../repository/testdata/job.db"
err := repository.MigrateDB("sqlite3", dbfile)
noErr(tb, err)

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package tagger
@@ -8,8 +8,8 @@ import (
"sync"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
type Tagger interface {
@@ -66,21 +66,21 @@ func RunTaggers() error {
r := repository.GetJobRepository()
jl, err := r.GetJobList()
if err != nil {
log.Errorf("Error while getting job list %s", err)
cclog.Errorf("Error while getting job list %s", err)
return err
}
for _, id := range jl {
job, err := r.FindByIdDirect(id)
if err != nil {
log.Errorf("Error while getting job %s", err)
cclog.Errorf("Error while getting job %s", err)
return err
}
for _, tagger := range jobTagger.startTaggers {
tagger.Match(job)
}
for _, tagger := range jobTagger.stopTaggers {
log.Infof("Run stop tagger for job %d", job.ID)
cclog.Infof("Run stop tagger for job %d", job.ID)
tagger.Match(job)
}
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package tagger
@@ -8,7 +8,7 @@ import (
"testing"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
)
func TestInit(t *testing.T) {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -9,7 +9,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
@@ -21,15 +21,15 @@ func RegisterCommitJobService() {
frequency = "2m"
}
d, _ := time.ParseDuration(frequency)
log.Infof("Register commitJob service with %s interval", frequency)
cclog.Infof("Register commitJob service with %s interval", frequency)
s.NewJob(gocron.DurationJob(d),
gocron.NewTask(
func() {
start := time.Now()
log.Printf("Jobcache sync started at %s", start.Format(time.RFC3339))
cclog.Printf("Jobcache sync started at %s", start.Format(time.RFC3339))
jobs, _ := jobRepo.SyncJobs()
repository.CallJobStartHooks(jobs)
log.Printf("Jobcache sync and job callbacks are done and took %s", time.Since(start))
cclog.Printf("Jobcache sync and job callbacks are done and took %s", time.Since(start))
}))
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,13 +8,13 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/go-co-op/gocron/v2"
)
func RegisterCompressionService(compressOlderThan int) {
log.Info("Register compression service")
cclog.Info("Register compression service")
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(05, 0, 0))),
gocron.NewTask(
@@ -26,7 +26,7 @@ func RegisterCompressionService(compressOlderThan int) {
startTime := time.Now().Unix() - int64(compressOlderThan*24*3600)
lastTime := ar.CompressLast(startTime)
if startTime == lastTime {
log.Info("Compression Service - Complete archive run")
cclog.Info("Compression Service - Complete archive run")
jobs, err = jobRepo.FindJobsBetween(0, startTime)
} else {
@@ -34,7 +34,7 @@ func RegisterCompressionService(compressOlderThan int) {
}
if err != nil {
log.Warnf("Error while looking for compression jobs: %v", err)
cclog.Warnf("Error while looking for compression jobs: %v", err)
}
ar.Compress(jobs)
}))

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,29 +8,29 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/auth"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
func RegisterLdapSyncService(ds string) {
interval, err := parseDuration(ds)
if err != nil {
log.Warnf("Could not parse duration for sync interval: %v",
cclog.Warnf("Could not parse duration for sync interval: %v",
ds)
return
}
auth := auth.GetAuthInstance()
log.Info("Register LDAP sync service")
cclog.Info("Register LDAP sync service")
s.NewJob(gocron.DurationJob(interval),
gocron.NewTask(
func() {
t := time.Now()
log.Printf("ldap sync started at %s", t.Format(time.RFC3339))
cclog.Printf("ldap sync started at %s", t.Format(time.RFC3339))
if err := auth.LdapAuth.Sync(); err != nil {
log.Errorf("ldap sync failed: %s", err.Error())
cclog.Errorf("ldap sync failed: %s", err.Error())
}
log.Print("ldap sync done")
cclog.Print("ldap sync done")
}))
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,12 +8,12 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
func RegisterRetentionDeleteService(age int, includeDB bool) {
log.Info("Register retention delete service")
cclog.Info("Register retention delete service")
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))),
gocron.NewTask(
@@ -21,26 +21,26 @@ func RegisterRetentionDeleteService(age int, includeDB bool) {
startTime := time.Now().Unix() - int64(age*24*3600)
jobs, err := jobRepo.FindJobsBetween(0, startTime)
if err != nil {
log.Warnf("Error while looking for retention jobs: %s", err.Error())
cclog.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().CleanUp(jobs)
if includeDB {
cnt, err := jobRepo.DeleteJobsBefore(startTime)
if err != nil {
log.Errorf("Error while deleting retention jobs from db: %s", err.Error())
cclog.Errorf("Error while deleting retention jobs from db: %s", err.Error())
} else {
log.Infof("Retention: Removed %d jobs from db", cnt)
cclog.Infof("Retention: Removed %d jobs from db", cnt)
}
if err = jobRepo.Optimize(); err != nil {
log.Errorf("Error occured in db optimization: %s", err.Error())
cclog.Errorf("Error occured in db optimization: %s", err.Error())
}
}
}))
}
func RegisterRetentionMoveService(age int, includeDB bool, location string) {
log.Info("Register retention move service")
cclog.Info("Register retention move service")
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))),
gocron.NewTask(
@@ -48,19 +48,19 @@ func RegisterRetentionMoveService(age int, includeDB bool, location string) {
startTime := time.Now().Unix() - int64(age*24*3600)
jobs, err := jobRepo.FindJobsBetween(0, startTime)
if err != nil {
log.Warnf("Error while looking for retention jobs: %s", err.Error())
cclog.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().Move(jobs, location)
if includeDB {
cnt, err := jobRepo.DeleteJobsBefore(startTime)
if err != nil {
log.Errorf("Error while deleting retention jobs from db: %v", err)
cclog.Errorf("Error while deleting retention jobs from db: %v", err)
} else {
log.Infof("Retention: Removed %d jobs from db", cnt)
cclog.Infof("Retention: Removed %d jobs from db", cnt)
}
if err = jobRepo.Optimize(); err != nil {
log.Errorf("Error occured in db optimization: %v", err)
cclog.Errorf("Error occured in db optimization: %v", err)
}
}
}))

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,19 +8,19 @@ import (
"runtime"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
func RegisterStopJobsExceedTime() {
log.Info("Register undead jobs service")
cclog.Info("Register undead jobs service")
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(03, 0, 0))),
gocron.NewTask(
func() {
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
if err != nil {
log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
cclog.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
}
runtime.GC()
}))

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -10,8 +10,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/go-co-op/gocron/v2"
)
@@ -23,13 +23,13 @@ var (
func parseDuration(s string) (time.Duration, error) {
interval, err := time.ParseDuration(s)
if err != nil {
log.Warnf("Could not parse duration for sync interval: %v",
cclog.Warnf("Could not parse duration for sync interval: %v",
s)
return 0, err
}
if interval == 0 {
log.Info("TaskManager: Sync interval is zero")
cclog.Info("TaskManager: Sync interval is zero")
}
return interval, nil
@@ -40,7 +40,7 @@ func Start() {
jobRepo = repository.GetJobRepository()
s, err = gocron.NewScheduler()
if err != nil {
log.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
cclog.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
}
if config.Keys.StopJobsExceedingWalltime > 0 {
@@ -54,7 +54,7 @@ func Start() {
cfg.Retention.IncludeDB = true
if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil {
log.Warn("Error while unmarshaling raw config json")
cclog.Warn("Error while unmarshaling raw config json")
}
switch cfg.Retention.Policy {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,7 +8,7 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
@@ -20,14 +20,14 @@ func RegisterUpdateDurationWorker() {
frequency = "5m"
}
d, _ := time.ParseDuration(frequency)
log.Infof("Register Duration Update service with %s interval", frequency)
cclog.Infof("Register Duration Update service with %s interval", frequency)
s.NewJob(gocron.DurationJob(d),
gocron.NewTask(
func() {
start := time.Now()
log.Printf("Update duration started at %s", start.Format(time.RFC3339))
cclog.Printf("Update duration started at %s", start.Format(time.RFC3339))
jobRepo.UpdateDuration()
log.Printf("Update duration is done and took %s", time.Since(start))
cclog.Printf("Update duration is done and took %s", time.Since(start))
}))
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -12,8 +12,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
"github.com/go-co-op/gocron/v2"
)
@@ -26,7 +26,7 @@ func RegisterFootprintWorker() {
frequency = "10m"
}
d, _ := time.ParseDuration(frequency)
log.Infof("Register Footprint Update service with %s interval", frequency)
cclog.Infof("Register Footprint Update service with %s interval", frequency)
s.NewJob(gocron.DurationJob(d),
gocron.NewTask(
@@ -35,7 +35,7 @@ func RegisterFootprintWorker() {
c := 0
ce := 0
cl := 0
log.Printf("Update Footprints started at %s", s.Format(time.RFC3339))
cclog.Printf("Update Footprints started at %s", s.Format(time.RFC3339))
for _, cluster := range archive.Clusters {
s_cluster := time.Now()
@@ -54,21 +54,21 @@ func RegisterFootprintWorker() {
repo, err := metricdata.GetMetricDataRepo(cluster.Name)
if err != nil {
log.Errorf("no metric data repository configured for '%s'", cluster.Name)
cclog.Errorf("no metric data repository configured for '%s'", cluster.Name)
continue
}
pendingStatements := []sq.UpdateBuilder{}
for _, job := range jobs {
log.Debugf("Prepare job %d", job.JobID)
cclog.Debugf("Prepare job %d", job.JobID)
cl++
s_job := time.Now()
jobStats, err := repo.LoadStats(job, allMetrics, context.Background())
if err != nil {
log.Errorf("error wile loading job data stats for footprint update: %v", err)
cclog.Errorf("error wile loading job data stats for footprint update: %v", err)
ce++
continue
}
@@ -106,26 +106,26 @@ func RegisterFootprintWorker() {
stmt := sq.Update("job")
stmt, err = jobRepo.UpdateFootprint(stmt, job)
if err != nil {
log.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error())
cclog.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error())
ce++
continue
}
stmt = stmt.Where("job.id = ?", job.ID)
pendingStatements = append(pendingStatements, stmt)
log.Debugf("Job %d took %s", job.JobID, time.Since(s_job))
cclog.Debugf("Job %d took %s", job.JobID, time.Since(s_job))
}
t, err := jobRepo.TransactionInit()
if err != nil {
log.Errorf("failed TransactionInit %v", err)
log.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name)
cclog.Errorf("failed TransactionInit %v", err)
cclog.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name)
ce += len(pendingStatements)
} else {
for _, ps := range pendingStatements {
query, args, err := ps.ToSql()
if err != nil {
log.Errorf("failed in ToSQL conversion: %v", err)
cclog.Errorf("failed in ToSQL conversion: %v", err)
ce++
} else {
// args...: Footprint-JSON, Energyfootprint-JSON, TotalEnergy, JobID
@@ -135,8 +135,8 @@ func RegisterFootprintWorker() {
}
jobRepo.TransactionEnd(t)
}
log.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster))
cclog.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster))
}
log.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s))
cclog.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s))
}))
}

View File

@@ -1,14 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
func Contains[T comparable](items []T, item T) bool {
for _, v := range items {
if v == item {
return true
}
}
return false
}

View File

@@ -1,77 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"compress/gzip"
"io"
"os"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func CompressFile(fileIn string, fileOut string) error {
originalFile, err := os.Open(fileIn)
if err != nil {
log.Errorf("CompressFile() error: %v", err)
return err
}
defer originalFile.Close()
gzippedFile, err := os.Create(fileOut)
if err != nil {
log.Errorf("CompressFile() error: %v", err)
return err
}
defer gzippedFile.Close()
gzipWriter := gzip.NewWriter(gzippedFile)
defer gzipWriter.Close()
_, err = io.Copy(gzipWriter, originalFile)
if err != nil {
log.Errorf("CompressFile() error: %v", err)
return err
}
gzipWriter.Flush()
if err := os.Remove(fileIn); err != nil {
log.Errorf("CompressFile() error: %v", err)
return err
}
return nil
}
func UncompressFile(fileIn string, fileOut string) error {
gzippedFile, err := os.Open(fileIn)
if err != nil {
log.Errorf("UncompressFile() error: %v", err)
return err
}
defer gzippedFile.Close()
gzipReader, _ := gzip.NewReader(gzippedFile)
defer gzipReader.Close()
uncompressedFile, err := os.Create(fileOut)
if err != nil {
log.Errorf("UncompressFile() error: %v", err)
return err
}
defer uncompressedFile.Close()
_, err = io.Copy(uncompressedFile, gzipReader)
if err != nil {
log.Errorf("UncompressFile() error: %v", err)
return err
}
if err := os.Remove(fileIn); err != nil {
log.Errorf("UncompressFile() error: %v", err)
return err
}
return nil
}

View File

@@ -1,107 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
)
func CopyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
if e := out.Close(); e != nil {
err = e
}
}()
_, err = io.Copy(out, in)
if err != nil {
return
}
err = out.Sync()
if err != nil {
return
}
si, err := os.Stat(src)
if err != nil {
return
}
err = os.Chmod(dst, si.Mode())
if err != nil {
return
}
return
}
func CopyDir(src string, dst string) (err error) {
src = filepath.Clean(src)
dst = filepath.Clean(dst)
si, err := os.Stat(src)
if err != nil {
return err
}
if !si.IsDir() {
return fmt.Errorf("source is not a directory")
}
_, err = os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
return
}
if err == nil {
return fmt.Errorf("destination already exists")
}
err = os.MkdirAll(dst, si.Mode())
if err != nil {
return
}
entries, err := ioutil.ReadDir(src)
if err != nil {
return
}
for _, entry := range entries {
srcPath := filepath.Join(src, entry.Name())
dstPath := filepath.Join(dst, entry.Name())
if entry.IsDir() {
err = CopyDir(srcPath, dstPath)
if err != nil {
return
}
} else {
// Skip symlinks.
if entry.Mode()&os.ModeSymlink != 0 {
continue
}
err = CopyFile(srcPath, dstPath)
if err != nil {
return
}
}
}
return
}

View File

@@ -1,34 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"os"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func DiskUsage(dirpath string) float64 {
var size int64
dir, err := os.Open(dirpath)
if err != nil {
log.Errorf("DiskUsage() error: %v", err)
return 0
}
defer dir.Close()
files, err := dir.Readdir(-1)
if err != nil {
log.Errorf("DiskUsage() error: %v", err)
return 0
}
for _, file := range files {
size += file.Size()
}
return float64(size) * 1e-6
}

View File

@@ -1,36 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"errors"
"os"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func CheckFileExists(filePath string) bool {
_, err := os.Stat(filePath)
return !errors.Is(err, os.ErrNotExist)
}
func GetFilesize(filePath string) int64 {
fileInfo, err := os.Stat(filePath)
if err != nil {
log.Errorf("Error on Stat %s: %v", filePath, err)
return 0
}
return fileInfo.Size()
}
func GetFilecount(path string) int {
files, err := os.ReadDir(path)
if err != nil {
log.Errorf("Error on ReadDir %s: %v", path, err)
return 0
}
return len(files)
}

View File

@@ -1,75 +0,0 @@
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"sync"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/fsnotify/fsnotify"
)
type Listener interface {
EventCallback()
EventMatch(event string) bool
}
var (
initOnce sync.Once
w *fsnotify.Watcher
listeners []Listener
)
func AddListener(path string, l Listener) {
var err error
initOnce.Do(func() {
var err error
w, err = fsnotify.NewWatcher()
if err != nil {
log.Error("creating a new watcher: %w", err)
}
listeners = make([]Listener, 0)
go watchLoop(w)
})
listeners = append(listeners, l)
err = w.Add(path)
if err != nil {
log.Warnf("%q: %s", path, err)
}
}
func FsWatcherShutdown() {
if w != nil {
w.Close()
}
}
func watchLoop(w *fsnotify.Watcher) {
for {
select {
// Read from Errors.
case err, ok := <-w.Errors:
if !ok { // Channel was closed (i.e. Watcher.Close() was called).
return
}
log.Errorf("watch event loop: %s", err)
// Read from Events.
case e, ok := <-w.Events:
if !ok { // Channel was closed (i.e. Watcher.Close() was called).
return
}
log.Infof("Event %s", e)
for _, l := range listeners {
if l.EventMatch(e.String()) {
l.EventCallback()
}
}
}
}
}

View File

@@ -1,60 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"golang.org/x/exp/constraints"
"fmt"
"math"
"sort"
)
func Min[T constraints.Ordered](a, b T) T {
if a < b {
return a
}
return b
}
func Max[T constraints.Ordered](a, b T) T {
if a > b {
return a
}
return b
}
func sortedCopy(input []float64) []float64 {
sorted := make([]float64, len(input))
copy(sorted, input)
sort.Float64s(sorted)
return sorted
}
func Mean(input []float64) (float64, error) {
if len(input) == 0 {
return math.NaN(), fmt.Errorf("input array is empty: %#v", input)
}
sum := 0.0
for _, n := range input {
sum += n
}
return sum / float64(len(input)), nil
}
func Median(input []float64) (median float64, err error) {
c := sortedCopy(input)
// Even numbers: add the two middle numbers, divide by two (use mean function)
// Odd numbers: Use the middle number
l := len(c)
if l == 0 {
return math.NaN(), fmt.Errorf("input array is empty: %#v", input)
} else if l%2 == 0 {
median, _ = Mean(c[l/2-1 : l/2+1])
} else {
median = c[l/2]
}
return median, nil
}

View File

@@ -1,75 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util_test
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/ClusterCockpit/cc-backend/internal/util"
)
func TestCheckFileExists(t *testing.T) {
tmpdir := t.TempDir()
if !util.CheckFileExists(tmpdir) {
t.Fatal("expected true, got false")
}
filePath := filepath.Join(tmpdir, "version.txt")
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
t.Fatal(err)
}
if !util.CheckFileExists(filePath) {
t.Fatal("expected true, got false")
}
filePath = filepath.Join(tmpdir, "version-test.txt")
if util.CheckFileExists(filePath) {
t.Fatal("expected false, got true")
}
}
func TestGetFileSize(t *testing.T) {
tmpdir := t.TempDir()
filePath := filepath.Join(tmpdir, "data.json")
if s := util.GetFilesize(filePath); s > 0 {
t.Fatalf("expected 0, got %d", s)
}
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
t.Fatal(err)
}
if s := util.GetFilesize(filePath); s == 0 {
t.Fatal("expected not 0, got 0")
}
}
func TestGetFileCount(t *testing.T) {
tmpdir := t.TempDir()
if c := util.GetFilecount(tmpdir); c != 0 {
t.Fatalf("expected 0, got %d", c)
}
filePath := filepath.Join(tmpdir, "data-1.json")
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
t.Fatal(err)
}
filePath = filepath.Join(tmpdir, "data-2.json")
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
t.Fatal(err)
}
if c := util.GetFilecount(tmpdir); c != 2 {
t.Fatalf("expected 2, got %d", c)
}
if c := util.GetFilecount(filePath); c != 0 {
t.Fatalf("expected 0, got %d", c)
}
}