Adapt loglevel for logs, shorten strings, fix formats, streamline

- Switched to Warn for most errors, reduces bloat, improves log control
This commit is contained in:
Christoph Kluge 2023-02-01 11:58:27 +01:00
parent b77bd078e5
commit a885e69125
26 changed files with 193 additions and 186 deletions

View File

@ -430,7 +430,7 @@ func main() {
for range time.Tick(30 * time.Minute) { for range time.Tick(30 * time.Minute) {
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime) err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
if err != nil { if err != nil {
log.Errorf("MAIN > error while looking for jobs exceeding their walltime: %s", err.Error()) log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
} }
runtime.GC() runtime.GC()
} }

View File

@ -169,7 +169,7 @@ func decode(r io.Reader, val interface{}) error {
// @router /jobs/ [get] // @router /jobs/ [get]
func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -184,7 +184,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
for _, s := range vals { for _, s := range vals {
state := schema.JobState(s) state := schema.JobState(s)
if !state.Valid() { if !state.Valid() {
http.Error(rw, "REST > invalid query parameter value: state", http.StatusBadRequest) http.Error(rw, "invalid query parameter value: state", http.StatusBadRequest)
return return
} }
filter.State = append(filter.State, state) filter.State = append(filter.State, state)
@ -194,7 +194,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
case "start-time": case "start-time":
st := strings.Split(vals[0], "-") st := strings.Split(vals[0], "-")
if len(st) != 2 { if len(st) != 2 {
http.Error(rw, "REST > invalid query parameter value: startTime", http.StatusBadRequest) http.Error(rw, "invalid query parameter value: startTime", http.StatusBadRequest)
return return
} }
from, err := strconv.ParseInt(st[0], 10, 64) from, err := strconv.ParseInt(st[0], 10, 64)
@ -226,7 +226,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
case "with-metadata": case "with-metadata":
withMetadata = true withMetadata = true
default: default:
http.Error(rw, "REST > invalid query parameter: "+key, http.StatusBadRequest) http.Error(rw, "invalid query parameter: "+key, http.StatusBadRequest)
return return
} }
} }
@ -300,7 +300,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/tag_job/{id} [post] // @router /jobs/tag_job/{id} [post]
func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -365,7 +365,7 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/start_job/ [post] // @router /jobs/start_job/ [post]
func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -446,7 +446,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/stop_job/{id} [post] // @router /jobs/stop_job/{id} [post]
func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -499,7 +499,7 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/stop_job/ [post] // @router /jobs/stop_job/ [post]
func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -545,7 +545,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/delete_job/{id} [delete] // @router /jobs/delete_job/{id} [delete]
func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -593,7 +593,7 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/delete_job/ [delete] // @router /jobs/delete_job/ [delete]
func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -649,7 +649,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
// @router /jobs/delete_job_before/{ts} [delete] // @router /jobs/delete_job_before/{ts} [delete]
func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -724,7 +724,7 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
// func (api *RestApi) importJob(rw http.ResponseWriter, r *http.Request) { // func (api *RestApi) importJob(rw http.ResponseWriter, r *http.Request) {
// if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { // if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
// handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) // handleError(fmt.Errorf("missing role: %v", auth.RoleApi), http.StatusForbidden, rw)
// return // return
// } // }
@ -793,7 +793,7 @@ func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) {
me := auth.GetUser(r.Context()) me := auth.GetUser(r.Context())
if !me.HasRole(auth.RoleAdmin) { if !me.HasRole(auth.RoleAdmin) {
if username != me.Username { if username != me.Username {
http.Error(rw, "REST > only admins are allowed to sign JWTs not for themselves", http.StatusForbidden) http.Error(rw, "Only admins are allowed to sign JWTs not for themselves", http.StatusForbidden)
return return
} }
} }
@ -818,13 +818,13 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "text/plain") rw.Header().Set("Content-Type", "text/plain")
me := auth.GetUser(r.Context()) me := auth.GetUser(r.Context())
if !me.HasRole(auth.RoleAdmin) { if !me.HasRole(auth.RoleAdmin) {
http.Error(rw, "REST > only admins are allowed to create new users", http.StatusForbidden) http.Error(rw, "Only admins are allowed to create new users", http.StatusForbidden)
return return
} }
username, password, role, name, email := r.FormValue("username"), r.FormValue("password"), r.FormValue("role"), r.FormValue("name"), r.FormValue("email") username, password, role, name, email := r.FormValue("username"), r.FormValue("password"), r.FormValue("role"), r.FormValue("name"), r.FormValue("email")
if len(password) == 0 && role != auth.RoleApi { if len(password) == 0 && role != auth.RoleApi {
http.Error(rw, "REST > only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest) http.Error(rw, "Only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest)
return return
} }
@ -838,12 +838,12 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
return return
} }
rw.Write([]byte(fmt.Sprintf("User %#v successfully created!\n", username))) rw.Write([]byte(fmt.Sprintf("User %v successfully created!\n", username)))
} }
func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "REST > only admins are allowed to delete a user", http.StatusForbidden) http.Error(rw, "Only admins are allowed to delete a user", http.StatusForbidden)
return return
} }
@ -858,7 +858,7 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "REST > only admins are allowed to fetch a list of users", http.StatusForbidden) http.Error(rw, "Only admins are allowed to fetch a list of users", http.StatusForbidden)
return return
} }
@ -873,7 +873,7 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "REST > only admins are allowed to update a user", http.StatusForbidden) http.Error(rw, "Only admins are allowed to update a user", http.StatusForbidden)
return return
} }
@ -893,9 +893,9 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity) http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return return
} }
rw.Write([]byte("REST > Remove Role Success")) rw.Write([]byte("Remove Role Success"))
} else { } else {
http.Error(rw, "REST > Not Add or Del?", http.StatusInternalServerError) http.Error(rw, "Not Add or Del?", http.StatusInternalServerError)
} }
} }

View File

@ -176,7 +176,7 @@ func (auth *Authentication) Login(
user := (*User)(nil) user := (*User)(nil)
if username != "" { if username != "" {
if user, _ = auth.GetUser(username); err != nil { if user, _ = auth.GetUser(username); err != nil {
// log.Warnf("login of unkown user %#v", username) // log.Warnf("login of unkown user %v", username)
_ = err _ = err
} }
} }
@ -206,12 +206,12 @@ func (auth *Authentication) Login(
session.Values["username"] = user.Username session.Values["username"] = user.Username
session.Values["roles"] = user.Roles session.Values["roles"] = user.Roles
if err := auth.sessionStore.Save(r, rw, session); err != nil { if err := auth.sessionStore.Save(r, rw, session); err != nil {
log.Errorf("session save failed: %s", err.Error()) log.Warnf("session save failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError) http.Error(rw, err.Error(), http.StatusInternalServerError)
return return
} }
log.Infof("login successfull: user: %#v (roles: %v)", user.Username, user.Roles) log.Infof("login successfull: user: %v (roles: %v)", user.Username, user.Roles)
ctx := context.WithValue(r.Context(), ContextUserKey, user) ctx := context.WithValue(r.Context(), ContextUserKey, user)
onsuccess.ServeHTTP(rw, r.WithContext(ctx)) onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return return

View File

@ -45,13 +45,13 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
} else { } else {
bytes, err := base64.StdEncoding.DecodeString(pubKey) bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil { if err != nil {
log.Error("Could not decode JWT public key") log.Warn("Could not decode JWT public key")
return err return err
} }
ja.publicKey = ed25519.PublicKey(bytes) ja.publicKey = ed25519.PublicKey(bytes)
bytes, err = base64.StdEncoding.DecodeString(privKey) bytes, err = base64.StdEncoding.DecodeString(privKey)
if err != nil { if err != nil {
log.Error("Could not decode JWT private key") log.Warn("Could not decode JWT private key")
return err return err
} }
ja.privateKey = ed25519.PrivateKey(bytes) ja.privateKey = ed25519.PrivateKey(bytes)
@ -60,7 +60,7 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
if pubKey = os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" { if pubKey = os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" {
bytes, err := base64.StdEncoding.DecodeString(pubKey) bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil { if err != nil {
log.Error("Could not decode cross login JWT HS512 key") log.Warn("Could not decode cross login JWT HS512 key")
return err return err
} }
ja.loginTokenKey = bytes ja.loginTokenKey = bytes
@ -71,7 +71,7 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
if keyFound && pubKeyCrossLogin != "" { if keyFound && pubKeyCrossLogin != "" {
bytes, err := base64.StdEncoding.DecodeString(pubKeyCrossLogin) bytes, err := base64.StdEncoding.DecodeString(pubKeyCrossLogin)
if err != nil { if err != nil {
log.Error("Could not decode cross login JWT public key") log.Warn("Could not decode cross login JWT public key")
return err return err
} }
ja.publicKeyCrossLogin = ed25519.PublicKey(bytes) ja.publicKeyCrossLogin = ed25519.PublicKey(bytes)
@ -130,7 +130,7 @@ func (ja *JWTAuthenticator) Login(
return nil, fmt.Errorf("AUTH/JWT > unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg()) return nil, fmt.Errorf("AUTH/JWT > unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg())
}) })
if err != nil { if err != nil {
log.Error("Error while parsing jwt token") log.Warn("Error while parsing jwt token")
return nil, err return nil, err
} }
@ -157,7 +157,7 @@ func (ja *JWTAuthenticator) Login(
if user == nil { if user == nil {
user, err = ja.auth.GetUser(sub) user, err = ja.auth.GetUser(sub)
if err != nil && err != sql.ErrNoRows { if err != nil && err != sql.ErrNoRows {
log.Errorf("Error while loading user '%#v'", sub) log.Errorf("Error while loading user '%v'", sub)
return nil, err return nil, err
} else if user == nil { } else if user == nil {
user = &User{ user = &User{
@ -166,7 +166,7 @@ func (ja *JWTAuthenticator) Login(
AuthSource: AuthViaToken, AuthSource: AuthViaToken,
} }
if err := ja.auth.AddUser(user); err != nil { if err := ja.auth.AddUser(user); err != nil {
log.Errorf("Error while adding user '%#v' to auth from token", user.Username) log.Errorf("Error while adding user '%v' to auth from token", user.Username)
return nil, err return nil, err
} }
} }
@ -231,7 +231,7 @@ func (ja *JWTAuthenticator) Auth(
return ja.publicKey, nil return ja.publicKey, nil
}) })
if err != nil { if err != nil {
log.Error("Error while parsing token") log.Warn("Error while parsing token")
return nil, err return nil, err
} }
@ -286,7 +286,7 @@ func (ja *JWTAuthenticator) Auth(
session.Values["roles"] = roles session.Values["roles"] = roles
if err := ja.auth.sessionStore.Save(r, rw, session); err != nil { if err := ja.auth.sessionStore.Save(r, rw, session); err != nil {
log.Errorf("session save failed: %s", err.Error()) log.Warnf("session save failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError) http.Error(rw, err.Error(), http.StatusInternalServerError)
return nil, err return nil, err
} }

View File

@ -39,7 +39,7 @@ func (la *LdapAuthenticator) Init(
if la.config != nil && la.config.SyncInterval != "" { if la.config != nil && la.config.SyncInterval != "" {
interval, err := time.ParseDuration(la.config.SyncInterval) interval, err := time.ParseDuration(la.config.SyncInterval)
if err != nil { if err != nil {
log.Errorf("Could not parse duration for sync interval: %#v", la.config.SyncInterval) log.Warnf("Could not parse duration for sync interval: %v", la.config.SyncInterval)
return err return err
} }
@ -78,7 +78,7 @@ func (la *LdapAuthenticator) Login(
l, err := la.getLdapConnection(false) l, err := la.getLdapConnection(false)
if err != nil { if err != nil {
log.Error("Error while getting ldap connection") log.Warn("Error while getting ldap connection")
return nil, err return nil, err
} }
defer l.Close() defer l.Close()
@ -108,14 +108,14 @@ func (la *LdapAuthenticator) Sync() error {
users := map[string]int{} users := map[string]int{}
rows, err := la.auth.db.Query(`SELECT username FROM user WHERE user.ldap = 1`) rows, err := la.auth.db.Query(`SELECT username FROM user WHERE user.ldap = 1`)
if err != nil { if err != nil {
log.Error("Error while querying LDAP users") log.Warn("Error while querying LDAP users")
return err return err
} }
for rows.Next() { for rows.Next() {
var username string var username string
if err := rows.Scan(&username); err != nil { if err := rows.Scan(&username); err != nil {
log.Errorf("Error while scanning for user '%s'", username) log.Warnf("Error while scanning for user '%s'", username)
return err return err
} }
@ -133,7 +133,7 @@ func (la *LdapAuthenticator) Sync() error {
la.config.UserBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, la.config.UserBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
la.config.UserFilter, []string{"dn", "uid", "gecos"}, nil)) la.config.UserFilter, []string{"dn", "uid", "gecos"}, nil))
if err != nil { if err != nil {
log.Error("LDAP search error") log.Warn("LDAP search error")
return err return err
} }
@ -155,14 +155,14 @@ func (la *LdapAuthenticator) Sync() error {
for username, where := range users { for username, where := range users {
if where == IN_DB && la.config.SyncDelOldUsers { if where == IN_DB && la.config.SyncDelOldUsers {
log.Debugf("sync: remove %#v (does not show up in LDAP anymore)", username) log.Debugf("sync: remove %v (does not show up in LDAP anymore)", username)
if _, err := la.auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil { if _, err := la.auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil {
log.Errorf("User '%s' not in LDAP anymore: Delete from DB failed", username) log.Errorf("User '%s' not in LDAP anymore: Delete from DB failed", username)
return err return err
} }
} else if where == IN_LDAP { } else if where == IN_LDAP {
name := newnames[username] name := newnames[username]
log.Debugf("sync: add %#v (name: %#v, roles: [user], ldap: true)", username, name) log.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name)
if _, err := la.auth.db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`, if _, err := la.auth.db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`,
username, 1, name, "[\""+RoleUser+"\"]"); err != nil { username, 1, name, "[\""+RoleUser+"\"]"); err != nil {
log.Errorf("User '%s' new in LDAP: Insert into DB failed", username) log.Errorf("User '%s' new in LDAP: Insert into DB failed", username)
@ -180,14 +180,14 @@ func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) {
conn, err := ldap.DialURL(la.config.Url) conn, err := ldap.DialURL(la.config.Url)
if err != nil { if err != nil {
log.Error("LDAP URL dial failed") log.Warn("LDAP URL dial failed")
return nil, err return nil, err
} }
if admin { if admin {
if err := conn.Bind(la.config.SearchDN, la.syncPassword); err != nil { if err := conn.Bind(la.config.SearchDN, la.syncPassword); err != nil {
conn.Close() conn.Close()
log.Error("LDAP connection bind failed") log.Warn("LDAP connection bind failed")
return nil, err return nil, err
} }
} }

View File

@ -25,7 +25,7 @@ func (auth *Authentication) GetUser(username string) (*User, error) {
if err := sq.Select("password", "ldap", "name", "roles", "email").From("user"). if err := sq.Select("password", "ldap", "name", "roles", "email").From("user").
Where("user.username = ?", username).RunWith(auth.db). Where("user.username = ?", username).RunWith(auth.db).
QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email); err != nil { QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email); err != nil {
log.Errorf("Error while querying user '%#v' from database", username) log.Warnf("Error while querying user '%v' from database", username)
return nil, err return nil, err
} }
@ -34,7 +34,7 @@ func (auth *Authentication) GetUser(username string) (*User, error) {
user.Email = email.String user.Email = email.String
if rawRoles.Valid { if rawRoles.Valid {
if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil { if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil {
log.Error("Error while unmarshaling raw roles from DB") log.Warn("Error while unmarshaling raw roles from DB")
return nil, err return nil, err
} }
} }
@ -67,11 +67,11 @@ func (auth *Authentication) AddUser(user *User) error {
} }
if _, err := sq.Insert("user").Columns(cols...).Values(vals...).RunWith(auth.db).Exec(); err != nil { if _, err := sq.Insert("user").Columns(cols...).Values(vals...).RunWith(auth.db).Exec(); err != nil {
log.Errorf("Error while inserting new user '%#v' into DB", user.Username) log.Errorf("Error while inserting new user '%v' into DB", user.Username)
return err return err
} }
log.Infof("new user %#v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource) log.Infof("new user %v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource)
return nil return nil
} }
@ -91,7 +91,7 @@ func (auth *Authentication) ListUsers(specialsOnly bool) ([]*User, error) {
rows, err := q.RunWith(auth.db).Query() rows, err := q.RunWith(auth.db).Query()
if err != nil { if err != nil {
log.Error("Error while querying user list") log.Warn("Error while querying user list")
return nil, err return nil, err
} }
@ -102,12 +102,12 @@ func (auth *Authentication) ListUsers(specialsOnly bool) ([]*User, error) {
user := &User{} user := &User{}
var name, email sql.NullString var name, email sql.NullString
if err := rows.Scan(&user.Username, &name, &email, &rawroles); err != nil { if err := rows.Scan(&user.Username, &name, &email, &rawroles); err != nil {
log.Error("Error while scanning user list") log.Warn("Error while scanning user list")
return nil, err return nil, err
} }
if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil { if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil {
log.Error("Error while unmarshaling raw role list") log.Warn("Error while unmarshaling raw role list")
return nil, err return nil, err
} }
@ -125,17 +125,17 @@ func (auth *Authentication) AddRole(
user, err := auth.GetUser(username) user, err := auth.GetUser(username)
if err != nil { if err != nil {
log.Errorf("Could not load user '%s'", username) log.Warnf("Could not load user '%s'", username)
return err return err
} }
if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport { if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport {
return fmt.Errorf("AUTH/USERS > invalid user role: %#v", role) return fmt.Errorf("Invalid user role: %v", role)
} }
for _, r := range user.Roles { for _, r := range user.Roles {
if r == role { if r == role {
return fmt.Errorf("AUTH/USERS > user %#v already has role %#v", username, role) return fmt.Errorf("User %v already has role %v", username, role)
} }
} }
@ -150,12 +150,12 @@ func (auth *Authentication) AddRole(
func (auth *Authentication) RemoveRole(ctx context.Context, username string, role string) error { func (auth *Authentication) RemoveRole(ctx context.Context, username string, role string) error {
user, err := auth.GetUser(username) user, err := auth.GetUser(username)
if err != nil { if err != nil {
log.Errorf("Could not load user '%s'", username) log.Warnf("Could not load user '%s'", username)
return err return err
} }
if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport { if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport {
return fmt.Errorf("AUTH/USERS > invalid user role: %#v", role) return fmt.Errorf("Invalid user role: %v", role)
} }
var exists bool var exists bool
@ -176,7 +176,7 @@ func (auth *Authentication) RemoveRole(ctx context.Context, username string, rol
} }
return nil return nil
} else { } else {
return fmt.Errorf("AUTH/USERS > user %#v already does not have role %#v", username, role) return fmt.Errorf("User '%v' already does not have role: %v", username, role)
} }
} }
@ -191,11 +191,13 @@ func FetchUser(ctx context.Context, db *sqlx.DB, username string) (*model.User,
if err := sq.Select("name", "email").From("user").Where("user.username = ?", username). if err := sq.Select("name", "email").From("user").Where("user.username = ?", username).
RunWith(db).QueryRow().Scan(&name, &email); err != nil { RunWith(db).QueryRow().Scan(&name, &email); err != nil {
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
log.Errorf("User '%s' Not found in DB", username) /* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */
/* since FetchUser will be called to retrieve full name and mail for every job in query/list */
// log.Warnf("User '%s' Not found in DB", username)
return nil, nil return nil, nil
} }
log.Errorf("Error while fetching user '%s'", username) log.Warnf("Error while fetching user '%s'", username)
return nil, err return nil, err
} }

View File

@ -69,7 +69,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
rows, err := query.RunWith(r.DB).Query() rows, err := query.RunWith(r.DB).Query()
if err != nil { if err != nil {
log.Error("Error while querying DB for job statistics") log.Warn("Error while querying DB for job statistics")
return nil, err return nil, err
} }
@ -77,7 +77,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
var id sql.NullString var id sql.NullString
var jobs, walltime, corehours sql.NullInt64 var jobs, walltime, corehours sql.NullInt64
if err := rows.Scan(&id, &jobs, &walltime, &corehours); err != nil { if err := rows.Scan(&id, &jobs, &walltime, &corehours); err != nil {
log.Error("Error while scanning rows") log.Warn("Error while scanning rows")
return nil, err return nil, err
} }
@ -106,7 +106,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
query = repository.BuildWhereClause(f, query) query = repository.BuildWhereClause(f, query)
} }
if err := query.RunWith(r.DB).QueryRow().Scan(&(stats[""].ShortJobs)); err != nil { if err := query.RunWith(r.DB).QueryRow().Scan(&(stats[""].ShortJobs)); err != nil {
log.Error("Error while scanning rows for short job stats") log.Warn("Error while scanning rows for short job stats")
return nil, err return nil, err
} }
} else { } else {
@ -118,7 +118,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
} }
rows, err := query.RunWith(r.DB).Query() rows, err := query.RunWith(r.DB).Query()
if err != nil { if err != nil {
log.Error("Error while querying jobs for short jobs") log.Warn("Error while querying jobs for short jobs")
return nil, err return nil, err
} }
@ -126,7 +126,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
var id sql.NullString var id sql.NullString
var shortJobs sql.NullInt64 var shortJobs sql.NullInt64
if err := rows.Scan(&id, &shortJobs); err != nil { if err := rows.Scan(&id, &shortJobs); err != nil {
log.Error("Error while scanning rows for short jobs") log.Warn("Error while scanning rows for short jobs")
return nil, err return nil, err
} }
@ -160,13 +160,13 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
value := fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as int) as value`, time.Now().Unix()) value := fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as int) as value`, time.Now().Unix())
stat.HistDuration, err = r.jobsStatisticsHistogram(ctx, value, filter, id, col) stat.HistDuration, err = r.jobsStatisticsHistogram(ctx, value, filter, id, col)
if err != nil { if err != nil {
log.Error("Error while loading job statistics histogram: running jobs") log.Warn("Error while loading job statistics histogram: running jobs")
return nil, err return nil, err
} }
stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter, id, col) stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter, id, col)
if err != nil { if err != nil {
log.Error("Error while loading job statistics histogram: num nodes") log.Warn("Error while loading job statistics histogram: num nodes")
return nil, err return nil, err
} }
} }
@ -198,7 +198,7 @@ func (r *queryResolver) jobsStatisticsHistogram(ctx context.Context, value strin
for rows.Next() { for rows.Next() {
point := model.HistoPoint{} point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil { if err := rows.Scan(&point.Value, &point.Count); err != nil {
log.Error("Error while scanning rows") log.Warn("Error while scanning rows")
return nil, err return nil, err
} }

View File

@ -79,7 +79,7 @@ func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
var config CCMetricStoreConfig var config CCMetricStoreConfig
if err := json.Unmarshal(rawConfig, &config); err != nil { if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Error("Error while unmarshaling raw json config") log.Warn("Error while unmarshaling raw json config")
return err return err
} }
@ -126,13 +126,13 @@ func (ccms *CCMetricStore) doRequest(
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(body); err != nil { if err := json.NewEncoder(buf).Encode(body); err != nil {
log.Error("Error while encoding request body") log.Warn("Error while encoding request body")
return nil, err return nil, err
} }
req, err := http.NewRequestWithContext(ctx, http.MethodPost, ccms.queryEndpoint, buf) req, err := http.NewRequestWithContext(ctx, http.MethodPost, ccms.queryEndpoint, buf)
if err != nil { if err != nil {
log.Error("Error while building request body") log.Warn("Error while building request body")
return nil, err return nil, err
} }
if ccms.jwt != "" { if ccms.jwt != "" {
@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest(
var resBody ApiQueryResponse var resBody ApiQueryResponse
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil { if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
log.Error("Error while decoding result body") log.Warn("Error while decoding result body")
return nil, err return nil, err
} }
@ -167,7 +167,7 @@ func (ccms *CCMetricStore) LoadData(
topology := archive.GetSubCluster(job.Cluster, job.SubCluster).Topology topology := archive.GetSubCluster(job.Cluster, job.SubCluster).Topology
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes) queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes)
if err != nil { if err != nil {
log.Error("Error while building queries") log.Warn("Error while building queries")
return nil, err return nil, err
} }
@ -210,7 +210,8 @@ func (ccms *CCMetricStore) LoadData(
for _, res := range row { for _, res := range row {
if res.Error != nil { if res.Error != nil {
errors = append(errors, fmt.Sprintf("METRICDATA/CCMS > failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) /* Build list for "partial errors", if any */
errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error))
continue continue
} }
@ -253,6 +254,7 @@ func (ccms *CCMetricStore) LoadData(
} }
if len(errors) != 0 { if len(errors) != 0 {
/* Returns list for "partial errors" */
return jobData, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) return jobData, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
} }
@ -506,7 +508,7 @@ func (ccms *CCMetricStore) LoadStats(
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode})
if err != nil { if err != nil {
log.Error("Error while building query") log.Warn("Error while building query")
return nil, err return nil, err
} }
@ -604,7 +606,8 @@ func (ccms *CCMetricStore) LoadNodeData(
metric := ccms.toLocalName(query.Metric) metric := ccms.toLocalName(query.Metric)
qdata := res[0] qdata := res[0]
if qdata.Error != nil { if qdata.Error != nil {
errors = append(errors, fmt.Sprintf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error)) /* Build list for "partial errors", if any */
errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error))
} }
if qdata.Avg.IsNaN() || qdata.Min.IsNaN() || qdata.Max.IsNaN() { if qdata.Avg.IsNaN() || qdata.Min.IsNaN() || qdata.Max.IsNaN() {
@ -638,7 +641,8 @@ func (ccms *CCMetricStore) LoadNodeData(
} }
if len(errors) != 0 { if len(errors) != 0 {
return data, fmt.Errorf("METRICDATA/CCMS > errors: %s", strings.Join(errors, ", ")) /* Returns list of "partial errors" */
return data, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
} }
return data, nil return data, nil

View File

@ -37,7 +37,7 @@ type InfluxDBv2DataRepository struct {
func (idb *InfluxDBv2DataRepository) Init(rawConfig json.RawMessage) error { func (idb *InfluxDBv2DataRepository) Init(rawConfig json.RawMessage) error {
var config InfluxDBv2DataRepositoryConfig var config InfluxDBv2DataRepositoryConfig
if err := json.Unmarshal(rawConfig, &config); err != nil { if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Error("Error while unmarshaling raw json config") log.Warn("Error while unmarshaling raw json config")
return err return err
} }
@ -205,7 +205,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
// Get Stats // Get Stats
stats, err := idb.LoadStats(job, metrics, ctx) stats, err := idb.LoadStats(job, metrics, ctx)
if err != nil { if err != nil {
log.Error("Error while loading statistics") log.Warn("Error while loading statistics")
return nil, err return nil, err
} }

View File

@ -46,7 +46,7 @@ func Init(disableArchive bool) error {
Kind string `json:"kind"` Kind string `json:"kind"`
} }
if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil { if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
log.Error("Error while unmarshaling raw json MetricDataRepository") log.Warn("Error while unmarshaling raw json MetricDataRepository")
return err return err
} }
@ -61,11 +61,11 @@ func Init(disableArchive bool) error {
case "test": case "test":
mdr = &TestMetricDataRepository{} mdr = &TestMetricDataRepository{}
default: default:
return fmt.Errorf("METRICDATA/METRICDATA > unkown metric data repository '%s' for cluster '%s'", kind.Kind, cluster.Name) return fmt.Errorf("METRICDATA/METRICDATA > Unknown MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
} }
if err := mdr.Init(cluster.MetricDataRepository); err != nil { if err := mdr.Init(cluster.MetricDataRepository); err != nil {
log.Error("Error initializing the MetricDataRepository") log.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
return err return err
} }
metricDataRepos[cluster.Name] = mdr metricDataRepos[cluster.Name] = mdr
@ -109,7 +109,7 @@ func LoadData(job *schema.Job,
jd, err = repo.LoadData(job, metrics, scopes, ctx) jd, err = repo.LoadData(job, metrics, scopes, ctx)
if err != nil { if err != nil {
if len(jd) != 0 { if len(jd) != 0 {
log.Errorf("partial error: %s", err.Error()) log.Warnf("partial error: %s", err.Error())
} else { } else {
log.Error("Error while loading job data from metric repository") log.Error("Error while loading job data from metric repository")
return err, 0, 0 return err, 0, 0
@ -192,7 +192,7 @@ func LoadAverages(
stats, err := repo.LoadStats(job, metrics, ctx) stats, err := repo.LoadStats(job, metrics, ctx)
if err != nil { if err != nil {
log.Errorf("Error while loading statistics for job %#v (User %#v, Project %#v)", job.JobID, job.User, job.Project) log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
return err return err
} }
@ -235,7 +235,7 @@ func LoadNodeData(
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil { if err != nil {
if len(data) != 0 { if len(data) != 0 {
log.Errorf("partial error: %s", err.Error()) log.Warnf("partial error: %s", err.Error())
} else { } else {
log.Error("Error while loading node data from metric repository") log.Error("Error while loading node data from metric repository")
return nil, err return nil, err

View File

@ -154,7 +154,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
var config PrometheusDataRepositoryConfig var config PrometheusDataRepositoryConfig
// parse config // parse config
if err := json.Unmarshal(rawConfig, &config); err != nil { if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Error("Error while unmarshaling raw json config") log.Warn("Error while unmarshaling raw json config")
return err return err
} }
// support basic authentication // support basic authentication
@ -188,7 +188,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
if err == nil { if err == nil {
log.Debugf("Added PromQL template for %s: %s", metric, templ) log.Debugf("Added PromQL template for %s: %s", metric, templ)
} else { } else {
log.Errorf("Failed to parse PromQL template %s for metric %s", templ, metric) log.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
} }
} }
return nil return nil
@ -292,12 +292,12 @@ func (pdb *PrometheusDataRepository) LoadData(
for _, metric := range metrics { for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(job.Cluster, metric) metricConfig := archive.GetMetricConfig(job.Cluster, metric)
if metricConfig == nil { if metricConfig == nil {
log.Errorf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster) log.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus query error") return nil, errors.New("Prometheus config error")
} }
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster) query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
if err != nil { if err != nil {
log.Error("Error while formatting prometheus query") log.Warn("Error while formatting prometheus query")
return nil, err return nil, err
} }
@ -311,7 +311,7 @@ func (pdb *PrometheusDataRepository) LoadData(
if err != nil { if err != nil {
log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query) log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus query error") return nil, errors.New("Prometheus query error")
} }
if len(warnings) > 0 { if len(warnings) > 0 {
log.Warnf("Warnings: %v\n", warnings) log.Warnf("Warnings: %v\n", warnings)
@ -361,7 +361,7 @@ func (pdb *PrometheusDataRepository) LoadStats(
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx) data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx)
if err != nil { if err != nil {
log.Error("Error while loading job for stats") log.Warn("Error while loading job for stats")
return nil, err return nil, err
} }
for metric, metricData := range data { for metric, metricData := range data {
@ -399,12 +399,12 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
for _, metric := range metrics { for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(cluster, metric) metricConfig := archive.GetMetricConfig(cluster, metric)
if metricConfig == nil { if metricConfig == nil {
log.Errorf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster) log.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus querry error") return nil, errors.New("Prometheus config error")
} }
query, err := pdb.FormatQuery(metric, scope, nodes, cluster) query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
if err != nil { if err != nil {
log.Error("Error while formatting prometheus query") log.Warn("Error while formatting prometheus query")
return nil, err return nil, err
} }
@ -418,7 +418,7 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
if err != nil { if err != nil {
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err) log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus querry error") return nil, errors.New("Prometheus query error")
} }
if len(warnings) > 0 { if len(warnings) > 0 {
log.Warnf("Warnings: %v\n", warnings) log.Warnf("Warnings: %v\n", warnings)

View File

@ -100,7 +100,7 @@ func HandleImportFlag(flag string) error {
raw, err := os.ReadFile(files[0]) raw, err := os.ReadFile(files[0])
if err != nil { if err != nil {
log.Error("Error while reading metadata file for import") log.Warn("Error while reading metadata file for import")
return err return err
} }
@ -113,13 +113,13 @@ func HandleImportFlag(flag string) error {
dec.DisallowUnknownFields() dec.DisallowUnknownFields()
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults} jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
if err := dec.Decode(&jobMeta); err != nil { if err := dec.Decode(&jobMeta); err != nil {
log.Error("Error while decoding raw json metadata for import") log.Warn("Error while decoding raw json metadata for import")
return err return err
} }
raw, err = os.ReadFile(files[1]) raw, err = os.ReadFile(files[1])
if err != nil { if err != nil {
log.Error("Error while reading jobdata file for import") log.Warn("Error while reading jobdata file for import")
return err return err
} }
@ -132,7 +132,7 @@ func HandleImportFlag(flag string) error {
dec.DisallowUnknownFields() dec.DisallowUnknownFields()
jobData := schema.JobData{} jobData := schema.JobData{}
if err := dec.Decode(&jobData); err != nil { if err := dec.Decode(&jobData); err != nil {
log.Error("Error while decoding raw json jobdata for import") log.Warn("Error while decoding raw json jobdata for import")
return err return err
} }
@ -140,7 +140,7 @@ func HandleImportFlag(flag string) error {
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows { if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
if err != nil { if err != nil {
log.Error("Error while finding job in jobRepository") log.Warn("Error while finding job in jobRepository")
return err return err
} }
@ -160,17 +160,17 @@ func HandleImportFlag(flag string) error {
job.FileBwAvg = loadJobStat(&jobMeta, "file_bw") job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
job.RawResources, err = json.Marshal(job.Resources) job.RawResources, err = json.Marshal(job.Resources)
if err != nil { if err != nil {
log.Error("Error while marshaling job resources") log.Warn("Error while marshaling job resources")
return err return err
} }
job.RawMetaData, err = json.Marshal(job.MetaData) job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil { if err != nil {
log.Error("Error while marshaling job metadata") log.Warn("Error while marshaling job metadata")
return err return err
} }
if err := SanityChecks(&job.BaseJob); err != nil { if err := SanityChecks(&job.BaseJob); err != nil {
log.Error("BaseJob SanityChecks failed") log.Warn("BaseJob SanityChecks failed")
return err return err
} }
@ -181,13 +181,13 @@ func HandleImportFlag(flag string) error {
res, err := GetConnection().DB.NamedExec(NamedJobInsert, job) res, err := GetConnection().DB.NamedExec(NamedJobInsert, job)
if err != nil { if err != nil {
log.Error("Error while NamedJobInsert") log.Warn("Error while NamedJobInsert")
return err return err
} }
id, err := res.LastInsertId() id, err := res.LastInsertId()
if err != nil { if err != nil {
log.Error("Error while getting last insert ID") log.Warn("Error while getting last insert ID")
return err return err
} }
@ -221,13 +221,13 @@ func InitDB() error {
// that speeds up inserts A LOT. // that speeds up inserts A LOT.
tx, err := db.DB.Beginx() tx, err := db.DB.Beginx()
if err != nil { if err != nil {
log.Error("Error while bundling transactions") log.Warn("Error while bundling transactions")
return err return err
} }
stmt, err := tx.PrepareNamed(NamedJobInsert) stmt, err := tx.PrepareNamed(NamedJobInsert)
if err != nil { if err != nil {
log.Error("Error while preparing namedJobInsert") log.Warn("Error while preparing namedJobInsert")
return err return err
} }
tags := make(map[string]int64) tags := make(map[string]int64)
@ -247,14 +247,14 @@ func InitDB() error {
if i%10 == 0 { if i%10 == 0 {
if tx != nil { if tx != nil {
if err := tx.Commit(); err != nil { if err := tx.Commit(); err != nil {
log.Error("Error while committing transactions for jobMeta") log.Warn("Error while committing transactions for jobMeta")
return err return err
} }
} }
tx, err = db.DB.Beginx() tx, err = db.DB.Beginx()
if err != nil { if err != nil {
log.Error("Error while bundling transactions for jobMeta") log.Warn("Error while bundling transactions for jobMeta")
return err return err
} }
@ -315,19 +315,19 @@ func InitDB() error {
if !ok { if !ok {
res, err := tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type) res, err := tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type)
if err != nil { if err != nil {
log.Errorf("Error while inserting tag into tag table: %#v %#v", tag.Name, tag.Type) log.Errorf("Error while inserting tag into tag table: %v (Type %v)", tag.Name, tag.Type)
return err return err
} }
tagId, err = res.LastInsertId() tagId, err = res.LastInsertId()
if err != nil { if err != nil {
log.Error("Error while getting last insert ID") log.Warn("Error while getting last insert ID")
return err return err
} }
tags[tagstr] = tagId tags[tagstr] = tagId
} }
if _, err := tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, id, tagId); err != nil { if _, err := tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, id, tagId); err != nil {
log.Errorf("Error while inserting jobtag into jobtag table: %#v %#v", id, tagId) log.Errorf("Error while inserting jobtag into jobtag table: %v (TagID %v)", id, tagId)
return err return err
} }
} }
@ -338,18 +338,18 @@ func InitDB() error {
} }
if errorOccured > 0 { if errorOccured > 0 {
log.Errorf("Error in import of %d jobs!", errorOccured) log.Warnf("Error in import of %d jobs!", errorOccured)
} }
if err := tx.Commit(); err != nil { if err := tx.Commit(); err != nil {
log.Error("Error while committing SQL transactions") log.Warn("Error while committing SQL transactions")
return err return err
} }
// Create indexes after inserts so that they do not // Create indexes after inserts so that they do not
// need to be continually updated. // need to be continually updated.
if _, err := db.DB.Exec(JobsDbIndexes); err != nil { if _, err := db.DB.Exec(JobsDbIndexes); err != nil {
log.Error("Error while creating indices after inserts") log.Warn("Error while creating indices after inserts")
return err return err
} }
@ -360,14 +360,14 @@ func InitDB() error {
// This function also sets the subcluster if necessary! // This function also sets the subcluster if necessary!
func SanityChecks(job *schema.BaseJob) error { func SanityChecks(job *schema.BaseJob) error {
if c := archive.GetCluster(job.Cluster); c == nil { if c := archive.GetCluster(job.Cluster); c == nil {
return fmt.Errorf("no such cluster: %#v", job.Cluster) return fmt.Errorf("no such cluster: %v", job.Cluster)
} }
if err := archive.AssignSubCluster(job); err != nil { if err := archive.AssignSubCluster(job); err != nil {
log.Error("Error while assigning subcluster to job") log.Warn("Error while assigning subcluster to job")
return err return err
} }
if !job.State.Valid() { if !job.State.Valid() {
return fmt.Errorf("not a valid job state: %#v", job.State) return fmt.Errorf("not a valid job state: %v", job.State)
} }
if len(job.Resources) == 0 || len(job.User) == 0 { if len(job.Resources) == 0 || len(job.User) == 0 {
return fmt.Errorf("'resources' and 'user' should not be empty") return fmt.Errorf("'resources' and 'user' should not be empty")

View File

@ -68,12 +68,12 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
&job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId, &job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId,
&job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State, &job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
&job.Duration, &job.Walltime, &job.RawResources /*&job.MetaData*/); err != nil { &job.Duration, &job.Walltime, &job.RawResources /*&job.MetaData*/); err != nil {
log.Error("Error while scanning rows") log.Warn("Error while scanning rows")
return nil, err return nil, err
} }
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil { if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
log.Error("Error while unmarhsaling raw resources json") log.Warn("Error while unmarhsaling raw resources json")
return nil, err return nil, err
} }
@ -95,7 +95,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID). if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil { RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
log.Error("Error while scanning for job metadata") log.Warn("Error while scanning for job metadata")
return nil, err return nil, err
} }
@ -104,7 +104,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
} }
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil { if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
log.Error("Error while unmarshaling raw metadata json") log.Warn("Error while unmarshaling raw metadata json")
return nil, err return nil, err
} }
@ -117,7 +117,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
r.cache.Del(cachekey) r.cache.Del(cachekey)
if job.MetaData == nil { if job.MetaData == nil {
if _, err = r.FetchMetadata(job); err != nil { if _, err = r.FetchMetadata(job); err != nil {
log.Errorf("Error while fetching metadata for job, DB ID '%#v'", job.ID) log.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
return err return err
} }
} }
@ -134,12 +134,12 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
} }
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil { if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
log.Errorf("Error while marshaling metadata for job, DB ID '%#v'", job.ID) log.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
return err return err
} }
if _, err = sq.Update("job").Set("meta_data", job.RawMetaData).Where("job.id = ?", job.ID).RunWith(r.stmtCache).Exec(); err != nil { if _, err = sq.Update("job").Set("meta_data", job.RawMetaData).Where("job.id = ?", job.ID).RunWith(r.stmtCache).Exec(); err != nil {
log.Errorf("Error while updating metadata for job, DB ID '%#v'", job.ID) log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
return err return err
} }
@ -200,7 +200,7 @@ func (r *JobRepository) FindAll(
for rows.Next() { for rows.Next() {
job, err := scanJob(rows) job, err := scanJob(rows)
if err != nil { if err != nil {
log.Error("Error while scanning rows") log.Warn("Error while scanning rows")
return nil, err return nil, err
} }
jobs = append(jobs, job) jobs = append(jobs, job)
@ -302,7 +302,7 @@ func (r *JobRepository) CountGroupedJobs(ctx context.Context, aggreg model.Aggre
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now) count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
runner = r.DB runner = r.DB
default: default:
log.Notef("CountGroupedJobs() Weight %#v unknown.", *weight) log.Notef("CountGroupedJobs() Weight %v unknown.", *weight)
} }
} }
@ -326,7 +326,7 @@ func (r *JobRepository) CountGroupedJobs(ctx context.Context, aggreg model.Aggre
var group string var group string
var count int var count int
if err := rows.Scan(&group, &count); err != nil { if err := rows.Scan(&group, &count); err != nil {
log.Error("Error while scanning rows") log.Warn("Error while scanning rows")
return nil, err return nil, err
} }
@ -370,12 +370,12 @@ func (r *JobRepository) MarkArchived(
case "file_bw": case "file_bw":
stmt = stmt.Set("file_bw_avg", stats.Avg) stmt = stmt.Set("file_bw_avg", stats.Avg)
default: default:
log.Notef("MarkArchived() Metric '%#v' unknown", metric) log.Notef("MarkArchived() Metric '%v' unknown", metric)
} }
} }
if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil { if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil {
log.Error("Error while marking job as archived") log.Warn("Error while marking job as archived")
return err return err
} }
return nil return nil
@ -501,11 +501,11 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
var resources []*schema.Resource var resources []*schema.Resource
var subcluster string var subcluster string
if err := rows.Scan(&raw, &subcluster); err != nil { if err := rows.Scan(&raw, &subcluster); err != nil {
log.Error("Error while scanning rows") log.Warn("Error while scanning rows")
return nil, err return nil, err
} }
if err := json.Unmarshal(raw, &resources); err != nil { if err := json.Unmarshal(raw, &resources); err != nil {
log.Error("Error while unmarshaling raw resources json") log.Warn("Error while unmarshaling raw resources json")
return nil, err return nil, err
} }
@ -533,13 +533,13 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)). Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
RunWith(r.DB).Exec() RunWith(r.DB).Exec()
if err != nil { if err != nil {
log.Error("Error while stopping jobs exceeding walltime") log.Warn("Error while stopping jobs exceeding walltime")
return err return err
} }
rowsAffected, err := res.RowsAffected() rowsAffected, err := res.RowsAffected()
if err != nil { if err != nil {
log.Error("Error while fetching affected rows after stopping due to exceeded walltime") log.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
return err return err
} }

View File

@ -51,7 +51,7 @@ func (r *JobRepository) QueryJobs(
sql, args, err := query.ToSql() sql, args, err := query.ToSql()
if err != nil { if err != nil {
log.Error("Error while converting query to sql") log.Warn("Error while converting query to sql")
return nil, err return nil, err
} }
@ -67,7 +67,7 @@ func (r *JobRepository) QueryJobs(
job, err := scanJob(rows) job, err := scanJob(rows)
if err != nil { if err != nil {
rows.Close() rows.Close()
log.Error("Error while scanning rows") log.Warn("Error while scanning rows")
return nil, err return nil, err
} }
jobs = append(jobs, job) jobs = append(jobs, job)

View File

@ -20,13 +20,13 @@ func (r *JobRepository) AddTag(job int64, tag int64) ([]*schema.Tag, error) {
j, err := r.FindById(job) j, err := r.FindById(job)
if err != nil { if err != nil {
log.Error("Error while finding job by id") log.Warn("Error while finding job by id")
return nil, err return nil, err
} }
tags, err := r.GetTags(&job) tags, err := r.GetTags(&job)
if err != nil { if err != nil {
log.Error("Error while getting tags for job") log.Warn("Error while getting tags for job")
return nil, err return nil, err
} }
@ -42,13 +42,13 @@ func (r *JobRepository) RemoveTag(job, tag int64) ([]*schema.Tag, error) {
j, err := r.FindById(job) j, err := r.FindById(job)
if err != nil { if err != nil {
log.Error("Error while finding job by id") log.Warn("Error while finding job by id")
return nil, err return nil, err
} }
tags, err := r.GetTags(&job) tags, err := r.GetTags(&job)
if err != nil { if err != nil {
log.Error("Error while getting tags for job") log.Warn("Error while getting tags for job")
return nil, err return nil, err
} }
@ -153,7 +153,7 @@ func (r *JobRepository) GetTags(job *int64) ([]*schema.Tag, error) {
for rows.Next() { for rows.Next() {
tag := &schema.Tag{} tag := &schema.Tag{}
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name); err != nil { if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name); err != nil {
log.Error("Error while scanning rows") log.Warn("Error while scanning rows")
return nil, err return nil, err
} }
tags = append(tags, tag) tags = append(tags, tag)

View File

@ -82,7 +82,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *auth.User) (map[string]interface{}, e
rows, err := uCfg.Lookup.Query(user.Username) rows, err := uCfg.Lookup.Query(user.Username)
if err != nil { if err != nil {
log.Errorf("Error while looking up user config for user '%#v'", user.Username) log.Warnf("Error while looking up user config for user '%v'", user.Username)
return err, 0, 0 return err, 0, 0
} }
@ -91,13 +91,13 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *auth.User) (map[string]interface{}, e
for rows.Next() { for rows.Next() {
var key, rawval string var key, rawval string
if err := rows.Scan(&key, &rawval); err != nil { if err := rows.Scan(&key, &rawval); err != nil {
log.Error("Error while scanning user config values") log.Warn("Error while scanning user config values")
return err, 0, 0 return err, 0, 0
} }
var val interface{} var val interface{}
if err := json.Unmarshal([]byte(rawval), &val); err != nil { if err := json.Unmarshal([]byte(rawval), &val); err != nil {
log.Error("Error while unmarshaling raw user config json") log.Warn("Error while unmarshaling raw user config json")
return err, 0, 0 return err, 0, 0
} }
@ -109,7 +109,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *auth.User) (map[string]interface{}, e
return config, 24 * time.Hour, size return config, 24 * time.Hour, size
}) })
if err, ok := data.(error); ok { if err, ok := data.(error); ok {
log.Error("Error in data set") log.Error("Error in returned dataset")
return nil, err return nil, err
} }
@ -126,7 +126,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
if user == nil { if user == nil {
var val interface{} var val interface{}
if err := json.Unmarshal([]byte(value), &val); err != nil { if err := json.Unmarshal([]byte(value), &val); err != nil {
log.Error("Error while unmarshaling raw user config json") log.Warn("Error while unmarshaling raw user config json")
return err return err
} }
@ -138,7 +138,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`, if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`,
user, key, value); err != nil { user, key, value); err != nil {
log.Errorf("Error while replacing user config in DB for user '$#v'", user) log.Warnf("Error while replacing user config in DB for user '$#v'", user)
return err return err
} }

View File

@ -61,12 +61,12 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
State: []schema.JobState{schema.JobStateRunning}, State: []schema.JobState{schema.JobStateRunning},
}}, nil, nil) }}, nil, nil)
if err != nil { if err != nil {
log.Errorf("failed to count jobs: %s", err.Error()) log.Warnf("failed to count jobs: %s", err.Error())
runningJobs = map[string]int{} runningJobs = map[string]int{}
} }
totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil, nil) totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil, nil)
if err != nil { if err != nil {
log.Errorf("failed to count jobs: %s", err.Error()) log.Warnf("failed to count jobs: %s", err.Error())
totalJobs = map[string]int{} totalJobs = map[string]int{}
} }
from := time.Now().Add(-24 * time.Hour) from := time.Now().Add(-24 * time.Hour)
@ -75,7 +75,7 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
Duration: &schema.IntRange{From: 0, To: graph.ShortJobDuration}, Duration: &schema.IntRange{From: 0, To: graph.ShortJobDuration},
}}, nil, nil) }}, nil, nil)
if err != nil { if err != nil {
log.Errorf("failed to count jobs: %s", err.Error()) log.Warnf("failed to count jobs: %s", err.Error())
recentShortJobs = map[string]int{} recentShortJobs = map[string]int{}
} }
@ -150,7 +150,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
tags, counts, err := jobRepo.CountTags(username) tags, counts, err := jobRepo.CountTags(username)
tagMap := make(map[string][]map[string]interface{}) tagMap := make(map[string][]map[string]interface{})
if err != nil { if err != nil {
log.Errorf("GetTags failed: %s", err.Error()) log.Warnf("GetTags failed: %s", err.Error())
i["tagmap"] = tagMap i["tagmap"] = tagMap
return i return i
} }

View File

@ -92,13 +92,13 @@ func DropPrivileges(username string, group string) error {
if group != "" { if group != "" {
g, err := user.LookupGroup(group) g, err := user.LookupGroup(group)
if err != nil { if err != nil {
log.Error("Error while looking up group") log.Warn("Error while looking up group")
return err return err
} }
gid, _ := strconv.Atoi(g.Gid) gid, _ := strconv.Atoi(g.Gid)
if err := syscall.Setgid(gid); err != nil { if err := syscall.Setgid(gid); err != nil {
log.Error("Error while setting gid") log.Warn("Error while setting gid")
return err return err
} }
} }
@ -106,13 +106,13 @@ func DropPrivileges(username string, group string) error {
if username != "" { if username != "" {
u, err := user.Lookup(username) u, err := user.Lookup(username)
if err != nil { if err != nil {
log.Error("Error while looking up user") log.Warn("Error while looking up user")
return err return err
} }
uid, _ := strconv.Atoi(u.Uid) uid, _ := strconv.Atoi(u.Uid)
if err := syscall.Setuid(uid); err != nil { if err := syscall.Setuid(uid); err != nil {
log.Error("Error while setting uid") log.Warn("Error while setting uid")
return err return err
} }
} }

View File

@ -41,7 +41,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
Kind string `json:"kind"` Kind string `json:"kind"`
} }
if err := json.Unmarshal(rawConfig, &kind); err != nil { if err := json.Unmarshal(rawConfig, &kind); err != nil {
log.Error("Error while unmarshaling raw config json") log.Warn("Error while unmarshaling raw config json")
return err return err
} }
@ -73,7 +73,7 @@ func LoadAveragesFromArchive(
metaFile, err := ar.LoadJobMeta(job) metaFile, err := ar.LoadJobMeta(job)
if err != nil { if err != nil {
log.Error("Error while loading job metadata from archiveBackend") log.Warn("Error while loading job metadata from archiveBackend")
return err return err
} }
@ -92,7 +92,7 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
metaFile, err := ar.LoadJobMeta(job) metaFile, err := ar.LoadJobMeta(job)
if err != nil { if err != nil {
log.Error("Error while loading job metadata from archiveBackend") log.Warn("Error while loading job metadata from archiveBackend")
return nil, err return nil, err
} }
@ -109,7 +109,7 @@ func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
jobMeta, err := ar.LoadJobMeta(job) jobMeta, err := ar.LoadJobMeta(job)
if err != nil { if err != nil {
log.Error("Error while loading job metadata from archiveBackend") log.Warn("Error while loading job metadata from archiveBackend")
return err return err
} }

View File

@ -24,7 +24,7 @@ func initClusterConfig() error {
cluster, err := ar.LoadClusterCfg(c) cluster, err := ar.LoadClusterCfg(c)
if err != nil { if err != nil {
log.Errorf("Error while loading cluster config for cluster '%#v'", c) log.Warnf("Error while loading cluster config for cluster '%v'", c)
return err return err
} }
@ -114,7 +114,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
cluster := GetCluster(job.Cluster) cluster := GetCluster(job.Cluster)
if cluster == nil { if cluster == nil {
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %#v", job.Cluster) return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", job.Cluster)
} }
if job.SubCluster != "" { if job.SubCluster != "" {
@ -123,7 +123,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
return nil return nil
} }
} }
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > already assigned subcluster %#v unkown (cluster: %#v)", job.SubCluster, job.Cluster) return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > already assigned subcluster %v unkown (cluster: %v)", job.SubCluster, job.Cluster)
} }
if len(job.Resources) == 0 { if len(job.Resources) == 0 {
@ -143,7 +143,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
return nil return nil
} }
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %#v and host %#v", job.Cluster, host0) return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %v and host %v", job.Cluster, host0)
} }
func GetSubClusterByNode(cluster, hostname string) (string, error) { func GetSubClusterByNode(cluster, hostname string) (string, error) {
@ -156,12 +156,12 @@ func GetSubClusterByNode(cluster, hostname string) (string, error) {
c := GetCluster(cluster) c := GetCluster(cluster)
if c == nil { if c == nil {
return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %#v", cluster) return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", cluster)
} }
if c.SubClusters[0].Nodes == "" { if c.SubClusters[0].Nodes == "" {
return c.SubClusters[0].Name, nil return c.SubClusters[0].Name, nil
} }
return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %#v and host %#v", cluster, hostname) return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %v and host %v", cluster, hostname)
} }

View File

@ -46,7 +46,7 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
log.Errorf("loadJobMeta() > open file error: %#v", err) log.Errorf("loadJobMeta() > open file error: %v", err)
return &schema.JobMeta{}, err return &schema.JobMeta{}, err
} }
defer f.Close() defer f.Close()
@ -58,19 +58,19 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) error {
var config FsArchiveConfig var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil { if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Errorf("Init() > Unmarshal error: %#v", err) log.Warnf("Init() > Unmarshal error: %#v", err)
return err return err
} }
if config.Path == "" { if config.Path == "" {
err := fmt.Errorf("ARCHIVE/FSBACKEND > Init() : empty config.Path") err := fmt.Errorf("Init() : empty config.Path")
log.Errorf("Init() > config.Path error: %#v", err) log.Errorf("Init() > config.Path error: %v", err)
return err return err
} }
fsa.path = config.Path fsa.path = config.Path
entries, err := os.ReadDir(fsa.path) entries, err := os.ReadDir(fsa.path)
if err != nil { if err != nil {
log.Errorf("Init() > ReadDir() error: %#v", err) log.Errorf("Init() > ReadDir() error: %v", err)
return err return err
} }
@ -86,7 +86,7 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
filename := getPath(job, fsa.path, "data.json") filename := getPath(job, fsa.path, "data.json")
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
log.Errorf("LoadJobData() > open file error: %#v", err) log.Errorf("LoadJobData() > open file error: %v", err)
return nil, err return nil, err
} }
defer f.Close() defer f.Close()
@ -109,7 +109,8 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
} }
if config.Keys.Validate { if config.Keys.Validate {
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil { if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
return &schema.Cluster{}, fmt.Errorf("ARCHIVE/FSBACKEND > Validate cluster config: %v\n", err) log.Warnf("Validate cluster config: %v\n", err)
return &schema.Cluster{}, fmt.Errorf("Validate cluster config: %v\n", err)
} }
} }
return DecodeCluster(bytes.NewReader(b)) return DecodeCluster(bytes.NewReader(b))
@ -183,7 +184,7 @@ func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error {
return err return err
} }
if err := f.Close(); err != nil { if err := f.Close(); err != nil {
log.Error("Error while closing meta.json file") log.Warn("Error while closing meta.json file")
return err return err
} }
@ -220,7 +221,7 @@ func (fsa *FsArchive) ImportJob(
return err return err
} }
if err := f.Close(); err != nil { if err := f.Close(); err != nil {
log.Error("Error while closing meta.json file") log.Warn("Error while closing meta.json file")
return err return err
} }
@ -234,10 +235,10 @@ func (fsa *FsArchive) ImportJob(
return err return err
} }
if err := f.Close(); err != nil { if err := f.Close(); err != nil {
log.Error("Error while closing data.json file") log.Warn("Error while closing data.json file")
return err return err
} }
// no error: final return is nil // no error: final return is nil
return nil return nil
} }

View File

@ -17,7 +17,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) { data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
var d schema.JobData var d schema.JobData
if err := json.NewDecoder(r).Decode(&d); err != nil { if err := json.NewDecoder(r).Decode(&d); err != nil {
log.Error("Error while decoding raw job data json") log.Warn("Error while decoding raw job data json")
return err, 0, 1000 return err, 0, 1000
} }
@ -25,7 +25,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
}) })
if err, ok := data.(error); ok { if err, ok := data.(error); ok {
log.Error("Error in decoded job data set") log.Warn("Error in decoded job data set")
return nil, err return nil, err
} }
@ -35,7 +35,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) { func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
var d schema.JobMeta var d schema.JobMeta
if err := json.NewDecoder(r).Decode(&d); err != nil { if err := json.NewDecoder(r).Decode(&d); err != nil {
log.Error("Error while decoding raw job meta json") log.Warn("Error while decoding raw job meta json")
return &d, err return &d, err
} }
@ -47,7 +47,7 @@ func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
func DecodeCluster(r io.Reader) (*schema.Cluster, error) { func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
var c schema.Cluster var c schema.Cluster
if err := json.NewDecoder(r).Decode(&c); err != nil { if err := json.NewDecoder(r).Decode(&c); err != nil {
log.Error("Error while decoding raw cluster json") log.Warn("Error while decoding raw cluster json")
return &c, err return &c, err
} }
@ -59,7 +59,7 @@ func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
func EncodeJobData(w io.Writer, d *schema.JobData) error { func EncodeJobData(w io.Writer, d *schema.JobData) error {
// Sanitize parameters // Sanitize parameters
if err := json.NewEncoder(w).Encode(d); err != nil { if err := json.NewEncoder(w).Encode(d); err != nil {
log.Error("Error while encoding new job data json") log.Warn("Error while encoding new job data json")
return err return err
} }
@ -69,7 +69,7 @@ func EncodeJobData(w io.Writer, d *schema.JobData) error {
func EncodeJobMeta(w io.Writer, d *schema.JobMeta) error { func EncodeJobMeta(w io.Writer, d *schema.JobMeta) error {
// Sanitize parameters // Sanitize parameters
if err := json.NewEncoder(w).Encode(d); err != nil { if err := json.NewEncoder(w).Encode(d); err != nil {
log.Error("Error while encoding new job meta json") log.Warn("Error while encoding new job meta json")
return err return err
} }

View File

@ -149,7 +149,7 @@ func ParseNodeList(raw string) (NodeList, error) {
s1, s2 := part[0:minus], part[minus+1:] s1, s2 := part[0:minus], part[minus+1:]
if len(s1) != len(s2) || len(s1) == 0 { if len(s1) != len(s2) || len(s1) == 0 {
return nil, fmt.Errorf("ARCHIVE/NODELIST > %#v and %#v are not of equal length or of length zero", s1, s2) return nil, fmt.Errorf("ARCHIVE/NODELIST > %v and %v are not of equal length or of length zero", s1, s2)
} }
x1, err := strconv.ParseInt(s1, 10, 32) x1, err := strconv.ParseInt(s1, 10, 32)

View File

@ -45,7 +45,7 @@ func (f *Float) UnmarshalJSON(input []byte) error {
val, err := strconv.ParseFloat(s, 64) val, err := strconv.ParseFloat(s, 64)
if err != nil { if err != nil {
log.Error("Error while parsing custom float") log.Warn("Error while parsing custom float")
return err return err
} }
*f = Float(val) *f = Float(val)

View File

@ -55,7 +55,7 @@ func Validate(k Kind, r io.Reader) (err error) {
var v interface{} var v interface{}
if err := json.NewDecoder(r).Decode(&v); err != nil { if err := json.NewDecoder(r).Decode(&v); err != nil {
log.Errorf("Error while decoding raw json schema: %#v", err) log.Warnf("Error while decoding raw json schema: %#v", err)
return err return err
} }

View File

@ -92,8 +92,8 @@ func RenderTemplate(rw http.ResponseWriter, r *http.Request, file string, page *
} }
} }
log.Infof("WEB/WEB > page config : %v\n", page.Config) log.Infof("Page config : %v\n", page.Config)
if err := t.Execute(rw, page); err != nil { if err := t.Execute(rw, page); err != nil {
log.Errorf("WEB/WEB > template error: %s", err.Error()) log.Errorf("Template error: %s", err.Error())
} }
} }