add more information to existing errors logs and panics

- '$ROOT/$FILE' for better localization in the code
- add text where none was given
- fix unnecessary sprintf nesting in influxv2 and prometheus metricrepo logging
This commit is contained in:
Christoph Kluge 2023-01-19 16:59:14 +01:00
parent 5abd3641b2
commit 24a4244f19
31 changed files with 254 additions and 253 deletions

View File

@ -118,7 +118,7 @@ func main() {
"ldap": config.Keys.LdapConfig, "ldap": config.Keys.LdapConfig,
"jwt": config.Keys.JwtConfig, "jwt": config.Keys.JwtConfig,
}); err != nil { }); err != nil {
log.Fatal(err) log.Fatalf("auth initialization failed: %v", err)
} }
if d, err := time.ParseDuration(config.Keys.SessionMaxAge); err != nil { if d, err := time.ParseDuration(config.Keys.SessionMaxAge); err != nil {
@ -134,12 +134,12 @@ func main() {
if err := authentication.AddUser(&auth.User{ if err := authentication.AddUser(&auth.User{
Username: parts[0], Password: parts[2], Roles: strings.Split(parts[1], ","), Username: parts[0], Password: parts[2], Roles: strings.Split(parts[1], ","),
}); err != nil { }); err != nil {
log.Fatal(err) log.Fatalf("adding '%s' user authentication failed: %v", parts[0], err)
} }
} }
if flagDelUser != "" { if flagDelUser != "" {
if err := authentication.DelUser(flagDelUser); err != nil { if err := authentication.DelUser(flagDelUser); err != nil {
log.Fatal(err) log.Fatalf("deleting user failed: %v", err)
} }
} }
@ -149,7 +149,7 @@ func main() {
} }
if err := authentication.LdapAuth.Sync(); err != nil { if err := authentication.LdapAuth.Sync(); err != nil {
log.Fatal(err) log.Fatalf("LDAP sync failed: %v", err)
} }
log.Info("LDAP sync successfull") log.Info("LDAP sync successfull")
} }
@ -157,16 +157,16 @@ func main() {
if flagGenJWT != "" { if flagGenJWT != "" {
user, err := authentication.GetUser(flagGenJWT) user, err := authentication.GetUser(flagGenJWT)
if err != nil { if err != nil {
log.Fatal(err) log.Fatalf("could not get user from JWT: %v", err)
} }
if !user.HasRole(auth.RoleApi) { if !user.HasRole(auth.RoleApi) {
log.Warn("that user does not have the API role") log.Warnf("user '%s' does not have the API role", user.Username)
} }
jwt, err := authentication.JwtAuth.ProvideJWT(user) jwt, err := authentication.JwtAuth.ProvideJWT(user)
if err != nil { if err != nil {
log.Fatal(err) log.Fatalf("failed to provide JWT to user '%s': %v", user.Username, err)
} }
fmt.Printf("JWT for '%s': %s\n", user.Username, jwt) fmt.Printf("JWT for '%s': %s\n", user.Username, jwt)
@ -176,16 +176,16 @@ func main() {
} }
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil { if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
log.Fatal(err) log.Fatalf("failed to initialize archive: %s", err.Error())
} }
if err := metricdata.Init(config.Keys.DisableArchive); err != nil { if err := metricdata.Init(config.Keys.DisableArchive); err != nil {
log.Fatal(err) log.Fatalf("failed to initialize metricdata repository: %s", err.Error())
} }
if flagReinitDB { if flagReinitDB {
if err := repository.InitDB(); err != nil { if err := repository.InitDB(); err != nil {
log.Fatal(err) log.Fatal("failed to re-initialize repository DB: %s", err.Error())
} }
} }
@ -361,7 +361,7 @@ func main() {
// Start http or https server // Start http or https server
listener, err := net.Listen("tcp", config.Keys.Addr) listener, err := net.Listen("tcp", config.Keys.Addr)
if err != nil { if err != nil {
log.Fatal(err) log.Fatalf("starting http listener failed: %v", err)
} }
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" { if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
@ -373,7 +373,7 @@ func main() {
if config.Keys.HttpsCertFile != "" && config.Keys.HttpsKeyFile != "" { if config.Keys.HttpsCertFile != "" && config.Keys.HttpsKeyFile != "" {
cert, err := tls.LoadX509KeyPair(config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile) cert, err := tls.LoadX509KeyPair(config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
if err != nil { if err != nil {
log.Fatal(err) log.Fatalf("loading X509 keypair failed: %v", err)
} }
listener = tls.NewListener(listener, &tls.Config{ listener = tls.NewListener(listener, &tls.Config{
Certificates: []tls.Certificate{cert}, Certificates: []tls.Certificate{cert},
@ -400,7 +400,7 @@ func main() {
go func() { go func() {
defer wg.Done() defer wg.Done()
if err := server.Serve(listener); err != nil && err != http.ErrServerClosed { if err := server.Serve(listener); err != nil && err != http.ErrServerClosed {
log.Fatal(err) log.Fatalf("starting server failed: %v", err)
} }
}() }()
@ -424,7 +424,7 @@ func main() {
for range time.Tick(30 * time.Minute) { for range time.Tick(30 * time.Minute) {
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime) err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
if err != nil { if err != nil {
log.Errorf("error while looking for jobs exceeding theire walltime: %s", err.Error()) log.Errorf("error while looking for jobs exceeding their walltime: %s", err.Error())
} }
runtime.GC() runtime.GC()
} }

View File

@ -134,7 +134,7 @@ type ApiTag struct {
type TagJobApiRequest []*ApiTag type TagJobApiRequest []*ApiTag
func handleError(err error, statusCode int, rw http.ResponseWriter) { func handleError(err error, statusCode int, rw http.ResponseWriter) {
log.Warnf("REST API: %s", err.Error()) log.Warnf("API/REST > ERROR : %s", err.Error())
rw.Header().Add("Content-Type", "application/json") rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(statusCode) rw.WriteHeader(statusCode)
json.NewEncoder(rw).Encode(ErrorResponse{ json.NewEncoder(rw).Encode(ErrorResponse{
@ -169,7 +169,7 @@ func decode(r io.Reader, val interface{}) error {
// @router /jobs/ [get] // @router /jobs/ [get]
func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -271,7 +271,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
results = append(results, res) results = append(results, res)
} }
log.Debugf("/api/jobs: %d jobs returned", len(results)) log.Debugf("API/REST > /api/jobs: %d jobs returned", len(results))
bw := bufio.NewWriter(rw) bw := bufio.NewWriter(rw)
defer bw.Flush() defer bw.Flush()
if err := json.NewEncoder(bw).Encode(map[string]interface{}{ if err := json.NewEncoder(bw).Encode(map[string]interface{}{
@ -300,7 +300,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/tag_job/{id} [post] // @router /jobs/tag_job/{id} [post]
func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -365,13 +365,13 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/start_job/ [post] // @router /jobs/start_job/ [post]
func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
req := schema.JobMeta{BaseJob: schema.JobDefaults} req := schema.JobMeta{BaseJob: schema.JobDefaults}
if err := decode(r.Body, &req); err != nil { if err := decode(r.Body, &req); err != nil {
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw)
return return
} }
@ -391,12 +391,12 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
// Check if combination of (job_id, cluster_id, start_time) already exists: // Check if combination of (job_id, cluster_id, start_time) already exists:
jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil) jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil)
if err != nil && err != sql.ErrNoRows { if err != nil && err != sql.ErrNoRows {
handleError(fmt.Errorf("checking for duplicate failed: %w", err), http.StatusInternalServerError, rw) handleError(fmt.Errorf("API/REST > checking for duplicate failed: %w", err), http.StatusInternalServerError, rw)
return return
} else if err == nil { } else if err == nil {
for _, job := range jobs { for _, job := range jobs {
if (req.StartTime - job.StartTimeUnix) < 86400 { if (req.StartTime - job.StartTimeUnix) < 86400 {
handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("API/REST > a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw)
return return
} }
} }
@ -404,7 +404,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
id, err := api.JobRepository.Start(&req) id, err := api.JobRepository.Start(&req)
if err != nil { if err != nil {
handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw) handleError(fmt.Errorf("API/REST > insert into database failed: %w", err), http.StatusInternalServerError, rw)
return return
} }
// unlock here, adding Tags can be async // unlock here, adding Tags can be async
@ -413,12 +413,12 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
for _, tag := range req.Tags { for _, tag := range req.Tags {
if _, err := api.JobRepository.AddTagOrCreate(id, tag.Type, tag.Name); err != nil { if _, err := api.JobRepository.AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError) http.Error(rw, err.Error(), http.StatusInternalServerError)
handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw) handleError(fmt.Errorf("API/REST > adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw)
return return
} }
} }
log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime) log.Printf("API/REST > new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
rw.Header().Add("Content-Type", "application/json") rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusCreated) rw.WriteHeader(http.StatusCreated)
json.NewEncoder(rw).Encode(StartJobApiResponse{ json.NewEncoder(rw).Encode(StartJobApiResponse{
@ -446,14 +446,14 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/stop_job/{id} [post] // @router /jobs/stop_job/{id} [post]
func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
// Parse request body: Only StopTime and State // Parse request body: Only StopTime and State
req := StopJobApiRequest{} req := StopJobApiRequest{}
if err := decode(r.Body, &req); err != nil { if err := decode(r.Body, &req); err != nil {
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw)
return return
} }
@ -464,17 +464,17 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
if ok { if ok {
id, e := strconv.ParseInt(id, 10, 64) id, e := strconv.ParseInt(id, 10, 64)
if e != nil { if e != nil {
handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw) handleError(fmt.Errorf("API/REST > integer expected in path for id: %w", e), http.StatusBadRequest, rw)
return return
} }
job, err = api.JobRepository.FindById(id) job, err = api.JobRepository.FindById(id)
} else { } else {
handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw) handleError(errors.New("API/REST > the parameter 'id' is required"), http.StatusBadRequest, rw)
return return
} }
if err != nil { if err != nil {
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
return return
} }
@ -499,14 +499,14 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/stop_job/ [post] // @router /jobs/stop_job/ [post]
func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
// Parse request body // Parse request body
req := StopJobApiRequest{} req := StopJobApiRequest{}
if err := decode(r.Body, &req); err != nil { if err := decode(r.Body, &req); err != nil {
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw)
return return
} }
@ -514,14 +514,14 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
var job *schema.Job var job *schema.Job
var err error var err error
if req.JobId == nil { if req.JobId == nil {
handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw) handleError(errors.New("API/REST > the field 'jobId' is required"), http.StatusBadRequest, rw)
return return
} }
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
if err != nil { if err != nil {
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
return return
} }
@ -545,7 +545,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/delete_job/{id} [delete] // @router /jobs/delete_job/{id} [delete]
func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -555,23 +555,23 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
if ok { if ok {
id, e := strconv.ParseInt(id, 10, 64) id, e := strconv.ParseInt(id, 10, 64)
if e != nil { if e != nil {
handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw) handleError(fmt.Errorf("API/REST > integer expected in path for id: %w", e), http.StatusBadRequest, rw)
return return
} }
err = api.JobRepository.DeleteJobById(id) err = api.JobRepository.DeleteJobById(id)
} else { } else {
handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw) handleError(errors.New("API/REST > the parameter 'id' is required"), http.StatusBadRequest, rw)
return return
} }
if err != nil { if err != nil {
handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("API/REST > deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
return return
} }
rw.Header().Add("Content-Type", "application/json") rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusOK) rw.WriteHeader(http.StatusOK)
json.NewEncoder(rw).Encode(DeleteJobApiResponse{ json.NewEncoder(rw).Encode(DeleteJobApiResponse{
Message: fmt.Sprintf("Successfully deleted job %s", id), Message: fmt.Sprintf("API/REST > Successfully deleted job %s", id),
}) })
} }
@ -593,14 +593,14 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/delete_job/ [delete] // @router /jobs/delete_job/ [delete]
func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
// Parse request body // Parse request body
req := DeleteJobApiRequest{} req := DeleteJobApiRequest{}
if err := decode(r.Body, &req); err != nil { if err := decode(r.Body, &req); err != nil {
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw)
return return
} }
@ -608,27 +608,27 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
var job *schema.Job var job *schema.Job
var err error var err error
if req.JobId == nil { if req.JobId == nil {
handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw) handleError(errors.New("API/REST > the field 'jobId' is required"), http.StatusBadRequest, rw)
return return
} }
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
if err != nil { if err != nil {
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
return return
} }
err = api.JobRepository.DeleteJobById(job.ID) err = api.JobRepository.DeleteJobById(job.ID)
if err != nil { if err != nil {
handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("API/REST > deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
return return
} }
rw.Header().Add("Content-Type", "application/json") rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusOK) rw.WriteHeader(http.StatusOK)
json.NewEncoder(rw).Encode(DeleteJobApiResponse{ json.NewEncoder(rw).Encode(DeleteJobApiResponse{
Message: fmt.Sprintf("Successfully deleted job %d", job.ID), Message: fmt.Sprintf("API/REST > Successfully deleted job %d", job.ID),
}) })
} }
@ -649,7 +649,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
// @router /jobs/delete_job_before/{ts} [delete] // @router /jobs/delete_job_before/{ts} [delete]
func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return return
} }
@ -660,24 +660,24 @@ func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
if ok { if ok {
ts, e := strconv.ParseInt(id, 10, 64) ts, e := strconv.ParseInt(id, 10, 64)
if e != nil { if e != nil {
handleError(fmt.Errorf("integer expected in path for ts: %w", e), http.StatusBadRequest, rw) handleError(fmt.Errorf("API/REST > integer expected in path for ts: %w", e), http.StatusBadRequest, rw)
return return
} }
cnt, err = api.JobRepository.DeleteJobsBefore(ts) cnt, err = api.JobRepository.DeleteJobsBefore(ts)
} else { } else {
handleError(errors.New("the parameter 'ts' is required"), http.StatusBadRequest, rw) handleError(errors.New("API/REST > the parameter 'ts' is required"), http.StatusBadRequest, rw)
return return
} }
if err != nil { if err != nil {
handleError(fmt.Errorf("deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("API/REST > deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw)
return return
} }
rw.Header().Add("Content-Type", "application/json") rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusOK) rw.WriteHeader(http.StatusOK)
json.NewEncoder(rw).Encode(DeleteJobApiResponse{ json.NewEncoder(rw).Encode(DeleteJobApiResponse{
Message: fmt.Sprintf("Successfully deleted %d jobs", cnt), Message: fmt.Sprintf("API/REST > Successfully deleted %d jobs", cnt),
}) })
} }
@ -685,12 +685,12 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
// Sanity checks // Sanity checks
if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning { if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning {
handleError(errors.New("stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw) handleError(errors.New("API/REST > stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw)
return return
} }
if req.State != "" && !req.State.Valid() { if req.State != "" && !req.State.Valid() {
handleError(fmt.Errorf("invalid job state: %#v", req.State), http.StatusBadRequest, rw) handleError(fmt.Errorf("API/REST > invalid job state: %#v", req.State), http.StatusBadRequest, rw)
return return
} else if req.State == "" { } else if req.State == "" {
req.State = schema.JobStateCompleted req.State = schema.JobStateCompleted
@ -700,11 +700,11 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
job.Duration = int32(req.StopTime - job.StartTime.Unix()) job.Duration = int32(req.StopTime - job.StartTime.Unix())
job.State = req.State job.State = req.State
if err := api.JobRepository.Stop(job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil { if err := api.JobRepository.Stop(job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil {
handleError(fmt.Errorf("marking job as stopped failed: %w", err), http.StatusInternalServerError, rw) handleError(fmt.Errorf("API/REST > marking job as stopped failed: %w", err), http.StatusInternalServerError, rw)
return return
} }
log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime) log.Printf("API/REST > archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime)
// Send a response (with status OK). This means that erros that happen from here on forward // Send a response (with status OK). This means that erros that happen from here on forward
// can *NOT* be communicated to the client. If reading from a MetricDataRepository or // can *NOT* be communicated to the client. If reading from a MetricDataRepository or
@ -724,7 +724,7 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
// func (api *RestApi) importJob(rw http.ResponseWriter, r *http.Request) { // func (api *RestApi) importJob(rw http.ResponseWriter, r *http.Request) {
// if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { // if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
// handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) // handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
// return // return
// } // }
@ -733,12 +733,12 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
// Data *schema.JobData `json:"data"` // Data *schema.JobData `json:"data"`
// } // }
// if err := decode(r.Body, &body); err != nil { // if err := decode(r.Body, &body); err != nil {
// handleError(fmt.Errorf("import failed: %s", err.Error()), http.StatusBadRequest, rw) // handleError(fmt.Errorf("API/REST > import failed: %s", err.Error()), http.StatusBadRequest, rw)
// return // return
// } // }
// if err := api.JobRepository.ImportJob(body.Meta, body.Data); err != nil { // if err := api.JobRepository.ImportJob(body.Meta, body.Data); err != nil {
// handleError(fmt.Errorf("import failed: %s", err.Error()), http.StatusUnprocessableEntity, rw) // handleError(fmt.Errorf("API/REST > import failed: %s", err.Error()), http.StatusUnprocessableEntity, rw)
// return // return
// } // }
@ -793,7 +793,7 @@ func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) {
me := auth.GetUser(r.Context()) me := auth.GetUser(r.Context())
if !me.HasRole(auth.RoleAdmin) { if !me.HasRole(auth.RoleAdmin) {
if username != me.Username { if username != me.Username {
http.Error(rw, "only admins are allowed to sign JWTs not for themselves", http.StatusForbidden) http.Error(rw, "API/REST > only admins are allowed to sign JWTs not for themselves", http.StatusForbidden)
return return
} }
} }
@ -818,13 +818,13 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "text/plain") rw.Header().Set("Content-Type", "text/plain")
me := auth.GetUser(r.Context()) me := auth.GetUser(r.Context())
if !me.HasRole(auth.RoleAdmin) { if !me.HasRole(auth.RoleAdmin) {
http.Error(rw, "only admins are allowed to create new users", http.StatusForbidden) http.Error(rw, "API/REST > only admins are allowed to create new users", http.StatusForbidden)
return return
} }
username, password, role, name, email := r.FormValue("username"), r.FormValue("password"), r.FormValue("role"), r.FormValue("name"), r.FormValue("email") username, password, role, name, email := r.FormValue("username"), r.FormValue("password"), r.FormValue("role"), r.FormValue("name"), r.FormValue("email")
if len(password) == 0 && role != auth.RoleApi { if len(password) == 0 && role != auth.RoleApi {
http.Error(rw, "only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest) http.Error(rw, "API/REST > only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest)
return return
} }
@ -838,12 +838,12 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
return return
} }
rw.Write([]byte(fmt.Sprintf("User %#v successfully created!\n", username))) rw.Write([]byte(fmt.Sprintf("API/REST > User %#v successfully created!\n", username)))
} }
func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "only admins are allowed to delete a user", http.StatusForbidden) http.Error(rw, "API/REST > only admins are allowed to delete a user", http.StatusForbidden)
return return
} }
@ -858,7 +858,7 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "only admins are allowed to fetch a list of users", http.StatusForbidden) http.Error(rw, "API/REST > only admins are allowed to fetch a list of users", http.StatusForbidden)
return return
} }
@ -873,7 +873,7 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "only admins are allowed to update a user", http.StatusForbidden) http.Error(rw, "API/REST > only admins are allowed to update a user", http.StatusForbidden)
return return
} }
@ -903,7 +903,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
rw.Header().Set("Content-Type", "text/plain") rw.Header().Set("Content-Type", "text/plain")
key, value := r.FormValue("key"), r.FormValue("value") key, value := r.FormValue("key"), r.FormValue("value")
fmt.Printf("KEY: %#v\nVALUE: %#v\n", key, value) fmt.Printf("API/REST > KEY: %#v\nVALUE: %#v\n", key, value)
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, auth.GetUser(r.Context())); err != nil { if err := repository.GetUserCfgRepo().UpdateConfig(key, value, auth.GetUser(r.Context())); err != nil {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity) http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
@ -915,7 +915,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
if api.MachineStateDir == "" { if api.MachineStateDir == "" {
http.Error(rw, "not enabled", http.StatusNotFound) http.Error(rw, "API/REST > machine state not enabled", http.StatusNotFound)
return return
} }
@ -928,7 +928,7 @@ func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
return return
} }
filename := filepath.Join(dir, fmt.Sprintf("%s.json", host)) filename := filepath.Join(dir, fmt.Sprintf("API/REST > %s.json", host))
f, err := os.Create(filename) f, err := os.Create(filename)
if err != nil { if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError) http.Error(rw, err.Error(), http.StatusInternalServerError)
@ -946,12 +946,12 @@ func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) getMachineState(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getMachineState(rw http.ResponseWriter, r *http.Request) {
if api.MachineStateDir == "" { if api.MachineStateDir == "" {
http.Error(rw, "not enabled", http.StatusNotFound) http.Error(rw, "API/REST > machine state not enabled", http.StatusNotFound)
return return
} }
vars := mux.Vars(r) vars := mux.Vars(r)
filename := filepath.Join(api.MachineStateDir, vars["cluster"], fmt.Sprintf("%s.json", vars["host"])) filename := filepath.Join(api.MachineStateDir, vars["cluster"], fmt.Sprintf("API/REST > %s.json", vars["host"]))
// Sets the content-type and 'Last-Modified' Header and so on automatically // Sets the content-type and 'Last-Modified' Header and so on automatically
http.ServeFile(rw, r, filename) http.ServeFile(rw, r, filename)

View File

@ -99,7 +99,7 @@ func Init(db *sqlx.DB,
sessKey := os.Getenv("SESSION_KEY") sessKey := os.Getenv("SESSION_KEY")
if sessKey == "" { if sessKey == "" {
log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)") log.Warn("AUTH/AUTH > environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
bytes := make([]byte, 32) bytes := make([]byte, 32)
if _, err := rand.Read(bytes); err != nil { if _, err := rand.Read(bytes); err != nil {
return nil, err return nil, err
@ -169,7 +169,7 @@ func (auth *Authentication) Login(
user := (*User)(nil) user := (*User)(nil)
if username != "" { if username != "" {
if user, _ = auth.GetUser(username); err != nil { if user, _ = auth.GetUser(username); err != nil {
// log.Warnf("login of unkown user %#v", username) // log.Warnf("AUTH/AUTH > login of unkown user %#v", username)
_ = err _ = err
} }
} }
@ -181,14 +181,14 @@ func (auth *Authentication) Login(
user, err = authenticator.Login(user, rw, r) user, err = authenticator.Login(user, rw, r)
if err != nil { if err != nil {
log.Warnf("login failed: %s", err.Error()) log.Warnf("AUTH/AUTH > user '%s' login failed: %s", user.Username, err.Error())
onfailure(rw, r, err) onfailure(rw, r, err)
return return
} }
session, err := auth.sessionStore.New(r, "session") session, err := auth.sessionStore.New(r, "session")
if err != nil { if err != nil {
log.Errorf("session creation failed: %s", err.Error()) log.Errorf("AUTH/AUTH > session creation failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError) http.Error(rw, err.Error(), http.StatusInternalServerError)
return return
} }
@ -199,18 +199,18 @@ func (auth *Authentication) Login(
session.Values["username"] = user.Username session.Values["username"] = user.Username
session.Values["roles"] = user.Roles session.Values["roles"] = user.Roles
if err := auth.sessionStore.Save(r, rw, session); err != nil { if err := auth.sessionStore.Save(r, rw, session); err != nil {
log.Errorf("session save failed: %s", err.Error()) log.Errorf("AUTH/AUTH > session save failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError) http.Error(rw, err.Error(), http.StatusInternalServerError)
return return
} }
log.Infof("login successfull: user: %#v (roles: %v)", user.Username, user.Roles) log.Infof("AUTH/AUTH > login successfull: user: %#v (roles: %v)", user.Username, user.Roles)
ctx := context.WithValue(r.Context(), ContextUserKey, user) ctx := context.WithValue(r.Context(), ContextUserKey, user)
onsuccess.ServeHTTP(rw, r.WithContext(ctx)) onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return return
} }
log.Warn("login failed: no authenticator applied") log.Warn("AUTH/AUTH > login failed: no authenticator applied")
onfailure(rw, r, err) onfailure(rw, r, err)
}) })
} }
@ -226,7 +226,7 @@ func (auth *Authentication) Auth(
for _, authenticator := range auth.authenticators { for _, authenticator := range auth.authenticators {
user, err := authenticator.Auth(rw, r) user, err := authenticator.Auth(rw, r)
if err != nil { if err != nil {
log.Warnf("authentication failed: %s", err.Error()) log.Warnf("AUTH/AUTH > authentication failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnauthorized) http.Error(rw, err.Error(), http.StatusUnauthorized)
return return
} }
@ -239,7 +239,7 @@ func (auth *Authentication) Auth(
return return
} }
log.Warnf("authentication failed: %s", "no authenticator applied") log.Warnf("AUTH/AUTH > authentication failed: %s", "no authenticator applied")
// http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) // http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
onfailure(rw, r, errors.New("unauthorized (login first or use a token)")) onfailure(rw, r, errors.New("unauthorized (login first or use a token)"))
}) })

View File

@ -41,7 +41,7 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY") pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
if pubKey == "" || privKey == "" { if pubKey == "" || privKey == "" {
log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") log.Warn("AUTH/JWT > environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
} else { } else {
bytes, err := base64.StdEncoding.DecodeString(pubKey) bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil { if err != nil {
@ -75,20 +75,20 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
// Warn if other necessary settings are not configured // Warn if other necessary settings are not configured
if ja.config != nil { if ja.config != nil {
if ja.config.CookieName == "" { if ja.config.CookieName == "" {
log.Warn("cookieName for JWTs not configured (cross login via JWT cookie will fail)") log.Warn("AUTH/JWT > cookieName for JWTs not configured (cross login via JWT cookie will fail)")
} }
if !ja.config.ForceJWTValidationViaDatabase { if !ja.config.ForceJWTValidationViaDatabase {
log.Warn("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!") log.Warn("AUTH/JWT > forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
} }
if ja.config.TrustedExternalIssuer == "" { if ja.config.TrustedExternalIssuer == "" {
log.Warn("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") log.Warn("AUTH/JWT > trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
} }
} else { } else {
log.Warn("cookieName and trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") log.Warn("AUTH/JWT > cookieName and trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
} }
} else { } else {
ja.publicKeyCrossLogin = nil ja.publicKeyCrossLogin = nil
log.Warn("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)") log.Warn("AUTH/JWT > environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
} }
return nil return nil
@ -123,7 +123,7 @@ func (ja *JWTAuthenticator) Login(
if t.Method == jwt.SigningMethodHS256 || t.Method == jwt.SigningMethodHS512 { if t.Method == jwt.SigningMethodHS256 || t.Method == jwt.SigningMethodHS512 {
return ja.loginTokenKey, nil return ja.loginTokenKey, nil
} }
return nil, fmt.Errorf("unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg()) return nil, fmt.Errorf("AUTH/JWT > unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg())
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -243,7 +243,7 @@ func (ja *JWTAuthenticator) Auth(
// Deny any logins for unknown usernames // Deny any logins for unknown usernames
if err != nil { if err != nil {
log.Warn("Could not find user from JWT in internal database.") log.Warn("AUTH/JWT > Could not find user from JWT in internal database.")
return nil, errors.New("unknown user") return nil, errors.New("unknown user")
} }
@ -264,7 +264,7 @@ func (ja *JWTAuthenticator) Auth(
// Create a session so that we no longer need the JTW Cookie // Create a session so that we no longer need the JTW Cookie
session, err := ja.auth.sessionStore.New(r, "session") session, err := ja.auth.sessionStore.New(r, "session")
if err != nil { if err != nil {
log.Errorf("session creation failed: %s", err.Error()) log.Errorf("AUTH/JWT > session creation failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError) http.Error(rw, err.Error(), http.StatusInternalServerError)
return nil, err return nil, err
} }
@ -276,7 +276,7 @@ func (ja *JWTAuthenticator) Auth(
session.Values["roles"] = roles session.Values["roles"] = roles
if err := ja.auth.sessionStore.Save(r, rw, session); err != nil { if err := ja.auth.sessionStore.Save(r, rw, session); err != nil {
log.Errorf("session save failed: %s", err.Error()) log.Errorf("AUTH/JWT > session save failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError) http.Error(rw, err.Error(), http.StatusInternalServerError)
return nil, err return nil, err
} }

View File

@ -33,7 +33,7 @@ func (la *LdapAuthenticator) Init(
la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD") la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD")
if la.syncPassword == "" { if la.syncPassword == "" {
log.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)") log.Warn("AUTH/LDAP > environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
} }
if la.config != nil && la.config.SyncInterval != "" { if la.config != nil && la.config.SyncInterval != "" {
@ -49,11 +49,11 @@ func (la *LdapAuthenticator) Init(
go func() { go func() {
ticker := time.NewTicker(interval) ticker := time.NewTicker(interval)
for t := range ticker.C { for t := range ticker.C {
log.Printf("LDAP sync started at %s", t.Format(time.RFC3339)) log.Printf("AUTH/LDAP > sync started at %s", t.Format(time.RFC3339))
if err := la.Sync(); err != nil { if err := la.Sync(); err != nil {
log.Errorf("LDAP sync failed: %s", err.Error()) log.Errorf("AUTH/LDAP > sync failed: %s", err.Error())
} }
log.Print("LDAP sync done") log.Print("AUTH/LDAP > sync done")
} }
}() }()
} }
@ -147,13 +147,13 @@ func (la *LdapAuthenticator) Sync() error {
for username, where := range users { for username, where := range users {
if where == IN_DB && la.config.SyncDelOldUsers { if where == IN_DB && la.config.SyncDelOldUsers {
log.Debugf("ldap-sync: remove %#v (does not show up in LDAP anymore)", username) log.Debugf("AUTH/LDAP > sync: remove %#v (does not show up in LDAP anymore)", username)
if _, err := la.auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil { if _, err := la.auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil {
return err return err
} }
} else if where == IN_LDAP { } else if where == IN_LDAP {
name := newnames[username] name := newnames[username]
log.Debugf("ldap-sync: add %#v (name: %#v, roles: [user], ldap: true)", username, name) log.Debugf("AUTH/LDAP > sync: add %#v (name: %#v, roles: [user], ldap: true)", username, name)
if _, err := la.auth.db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`, if _, err := la.auth.db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`,
username, 1, name, "[\""+RoleUser+"\"]"); err != nil { username, 1, name, "[\""+RoleUser+"\"]"); err != nil {
return err return err

View File

@ -39,7 +39,7 @@ func (la *LocalAuthenticator) Login(
r *http.Request) (*User, error) { r *http.Request) (*User, error) {
if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(r.FormValue("password"))); e != nil { if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(r.FormValue("password"))); e != nil {
return nil, fmt.Errorf("user '%s' provided the wrong password (%w)", user.Username, e) return nil, fmt.Errorf("AUTH/LOCAL > user '%s' provided the wrong password (%w)", user.Username, e)
} }
return user, nil return user, nil

View File

@ -67,7 +67,7 @@ func (auth *Authentication) AddUser(user *User) error {
return err return err
} }
log.Infof("new user %#v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource) log.Infof("AUTH/USERS > new user %#v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource)
return nil return nil
} }
@ -121,12 +121,12 @@ func (auth *Authentication) AddRole(
} }
if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport { if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport {
return fmt.Errorf("invalid user role: %#v", role) return fmt.Errorf("AUTH/USERS > invalid user role: %#v", role)
} }
for _, r := range user.Roles { for _, r := range user.Roles {
if r == role { if r == role {
return fmt.Errorf("user %#v already has role %#v", username, role) return fmt.Errorf("AUTH/USERS > user %#v already has role %#v", username, role)
} }
} }
@ -144,7 +144,7 @@ func (auth *Authentication) RemoveRole(ctx context.Context, username string, rol
} }
if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport { if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport {
return fmt.Errorf("invalid user role: %#v", role) return fmt.Errorf("AUTH/USERS > invalid user role: %#v", role)
} }
var exists bool var exists bool
@ -164,7 +164,7 @@ func (auth *Authentication) RemoveRole(ctx context.Context, username string, rol
} }
return nil return nil
} else { } else {
return fmt.Errorf("user %#v already does not have role %#v", username, role) return fmt.Errorf("AUTH/USERS > user %#v already does not have role %#v", username, role)
} }
} }

View File

@ -49,20 +49,20 @@ func Init(flagConfigFile string) {
raw, err := os.ReadFile(flagConfigFile) raw, err := os.ReadFile(flagConfigFile)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
log.Fatal(err) log.Fatalf("CONFIG/CONFIG > ERROR: %v", err)
} }
} else { } else {
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil { if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
log.Fatalf("Validate config: %v\n", err) log.Fatalf("CONFIG/CONFIG > Validate config: %v\n", err)
} }
dec := json.NewDecoder(bytes.NewReader(raw)) dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields() dec.DisallowUnknownFields()
if err := dec.Decode(&Keys); err != nil { if err := dec.Decode(&Keys); err != nil {
log.Fatal(err) log.Fatalf("CONFIG/CONFIG > could not decode: %v", err)
} }
if Keys.Clusters == nil || len(Keys.Clusters) < 1 { if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
log.Fatal("At least one cluster required in config!") log.Fatal("CONFIG/CONFIG > At least one cluster required in config!")
} }
} }
} }

View File

@ -51,7 +51,7 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s
// DeleteTag is the resolver for the deleteTag field. // DeleteTag is the resolver for the deleteTag field.
func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) { func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) {
panic(fmt.Errorf("not implemented: DeleteTag - deleteTag")) panic(fmt.Errorf("GRAPH/RESOLVERS > not implemented: DeleteTag - deleteTag"))
} }
// AddTagsToJob is the resolver for the addTagsToJob field. // AddTagsToJob is the resolver for the addTagsToJob field.
@ -175,7 +175,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
for name, md := range data { for name, md := range data {
for scope, metric := range md { for scope, metric := range md {
if metric.Scope != schema.MetricScope(scope) { if metric.Scope != schema.MetricScope(scope) {
panic("WTF?") panic("GRAPH/RESOLVERS > metric.Scope != schema.MetricScope(scope) : Should not happen!")
} }
res = append(res, &model.JobMetricWithName{ res = append(res, &model.JobMetricWithName{

View File

@ -211,7 +211,7 @@ func (r *queryResolver) rooflineHeatmap(
return nil, err return nil, err
} }
if len(jobs) > MAX_JOBS_FOR_ANALYSIS { if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
return nil, fmt.Errorf("too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS) return nil, fmt.Errorf("GRAPH/STATS > too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
} }
fcols, frows := float64(cols), float64(rows) fcols, frows := float64(cols), float64(rows)
@ -233,14 +233,14 @@ func (r *queryResolver) rooflineHeatmap(
flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"] flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"]
if flops_ == nil && membw_ == nil { if flops_ == nil && membw_ == nil {
return nil, fmt.Errorf("'flops_any' or 'mem_bw' missing for job %d", job.ID) return nil, fmt.Errorf("GRAPH/STATS > 'flops_any' or 'mem_bw' missing for job %d", job.ID)
} }
flops, ok1 := flops_["node"] flops, ok1 := flops_["node"]
membw, ok2 := membw_["node"] membw, ok2 := membw_["node"]
if !ok1 || !ok2 { if !ok1 || !ok2 {
// TODO/FIXME: // TODO/FIXME:
return nil, errors.New("todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level") return nil, errors.New("GRAPH/STATS > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
} }
for n := 0; n < len(flops.Series); n++ { for n := 0; n < len(flops.Series); n++ {
@ -275,7 +275,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
return nil, err return nil, err
} }
if len(jobs) > MAX_JOBS_FOR_ANALYSIS { if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
return nil, fmt.Errorf("too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS) return nil, fmt.Errorf("GRAPH/STATS > too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
} }
avgs := make([][]schema.Float, len(metrics)) avgs := make([][]schema.Float, len(metrics))

View File

@ -202,7 +202,7 @@ func (ccms *CCMetricStore) LoadData(
for _, res := range row { for _, res := range row {
if res.Error != nil { if res.Error != nil {
errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) errors = append(errors, fmt.Sprintf("METRICDATA/CCMS > failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error))
continue continue
} }
@ -245,7 +245,7 @@ func (ccms *CCMetricStore) LoadData(
} }
if len(errors) != 0 { if len(errors) != 0 {
return jobData, fmt.Errorf("cc-metric-store: %s", strings.Join(errors, ", ")) return jobData, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
} }
return jobData, nil return jobData, nil
@ -272,8 +272,8 @@ func (ccms *CCMetricStore) buildQueries(
remoteName := ccms.toRemoteName(metric) remoteName := ccms.toRemoteName(metric)
mc := archive.GetMetricConfig(job.Cluster, metric) mc := archive.GetMetricConfig(job.Cluster, metric)
if mc == nil { if mc == nil {
// return nil, fmt.Errorf("metric '%s' is not specified for cluster '%s'", metric, job.Cluster) // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
// log.Printf("metric '%s' is not specified for cluster '%s'", metric, job.Cluster) // log.Printf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
continue continue
} }
@ -483,7 +483,7 @@ func (ccms *CCMetricStore) buildQueries(
continue continue
} }
return nil, nil, fmt.Errorf("TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope) return nil, nil, fmt.Errorf("METRICDATA/CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
} }
} }
} }
@ -521,7 +521,7 @@ func (ccms *CCMetricStore) LoadStats(
metric := ccms.toLocalName(query.Metric) metric := ccms.toLocalName(query.Metric)
data := res[0] data := res[0]
if data.Error != nil { if data.Error != nil {
return nil, fmt.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
} }
metricdata, ok := stats[metric] metricdata, ok := stats[metric]
@ -531,7 +531,7 @@ func (ccms *CCMetricStore) LoadStats(
} }
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() { if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
return nil, fmt.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN") return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
} }
metricdata[query.Hostname] = schema.MetricStatistics{ metricdata[query.Hostname] = schema.MetricStatistics{
@ -593,11 +593,11 @@ func (ccms *CCMetricStore) LoadNodeData(
metric := ccms.toLocalName(query.Metric) metric := ccms.toLocalName(query.Metric)
qdata := res[0] qdata := res[0]
if qdata.Error != nil { if qdata.Error != nil {
errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error)) errors = append(errors, fmt.Sprintf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error))
} }
if qdata.Avg.IsNaN() || qdata.Min.IsNaN() || qdata.Max.IsNaN() { if qdata.Avg.IsNaN() || qdata.Min.IsNaN() || qdata.Max.IsNaN() {
// return nil, fmt.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN") // return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
qdata.Avg, qdata.Min, qdata.Max = 0., 0., 0. qdata.Avg, qdata.Min, qdata.Max = 0., 0., 0.
} }
@ -627,7 +627,7 @@ func (ccms *CCMetricStore) LoadNodeData(
} }
if len(errors) != 0 { if len(errors) != 0 {
return data, fmt.Errorf("cc-metric-store: %s", strings.Join(errors, ", ")) return data, fmt.Errorf("METRICDATA/CCMS > errors: %s", strings.Join(errors, ", "))
} }
return data, nil return data, nil

View File

@ -10,12 +10,12 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"log"
"strings" "strings"
"time" "time"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/log"
influxdb2 "github.com/influxdata/influxdb-client-go/v2" influxdb2 "github.com/influxdata/influxdb-client-go/v2"
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api" influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
) )
@ -71,7 +71,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
for _, h := range job.Resources { for _, h := range job.Resources {
if h.HWThreads != nil || h.Accelerators != nil { if h.HWThreads != nil || h.Accelerators != nil {
// TODO // TODO
return nil, errors.New("the InfluxDB metric data repository does not yet support HWThreads or Accelerators") return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support HWThreads or Accelerators")
} }
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname)) hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname))
} }
@ -84,7 +84,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
switch scope { switch scope {
case "node": case "node":
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows // Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows
// log.Println("Note: Scope 'node' requested. ") // log.Info("METRICDATA/INFLUXV2 > Scope 'node' requested. ")
query = fmt.Sprintf(` query = fmt.Sprintf(`
from(bucket: "%s") from(bucket: "%s")
|> range(start: %s, stop: %s) |> range(start: %s, stop: %s)
@ -97,10 +97,10 @@ func (idb *InfluxDBv2DataRepository) LoadData(
idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))), idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))),
measurementsCond, hostsCond) measurementsCond, hostsCond)
case "socket": case "socket":
log.Println("Note: Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ") log.Info("METRICDATA/INFLUXV2 > Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
continue continue
case "core": case "core":
log.Println("Note: Scope 'core' requested, but not yet supported: Will return 'node' scope only. ") log.Info("METRICDATA/INFLUXV2 > Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
continue continue
// Get Finest Granularity only, Set NULL to 0.0 // Get Finest Granularity only, Set NULL to 0.0
// query = fmt.Sprintf(` // query = fmt.Sprintf(`
@ -114,9 +114,9 @@ func (idb *InfluxDBv2DataRepository) LoadData(
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )), // idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
// measurementsCond, hostsCond) // measurementsCond, hostsCond)
default: default:
log.Println("Note: Unknown Scope requested: Will return 'node' scope. ") log.Info("METRICDATA/INFLUXV2 > Unknown Scope requested: Will return 'node' scope. ")
continue continue
// return nil, errors.New("the InfluxDB metric data repository does not yet support other scopes than 'node'") // return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support other scopes than 'node'")
} }
rows, err := idb.queryClient.Query(ctx, query) rows, err := idb.queryClient.Query(ctx, query)
@ -208,15 +208,15 @@ func (idb *InfluxDBv2DataRepository) LoadData(
for _, scope := range scopes { for _, scope := range scopes {
if scope == "node" { // No 'socket/core' support yet if scope == "node" { // No 'socket/core' support yet
for metric, nodes := range stats { for metric, nodes := range stats {
// log.Println(fmt.Sprintf("<< Add Stats for : Field %s >>", metric)) // log.Debugf("<< Add Stats for : Field %s >>", metric)
for node, stats := range nodes { for node, stats := range nodes {
// log.Println(fmt.Sprintf("<< Add Stats for : Host %s : Min %.2f, Max %.2f, Avg %.2f >>", node, stats.Min, stats.Max, stats.Avg )) // log.Debugf("<< Add Stats for : Host %s : Min %.2f, Max %.2f, Avg %.2f >>", node, stats.Min, stats.Max, stats.Avg )
for index, _ := range jobData[metric][scope].Series { for index, _ := range jobData[metric][scope].Series {
// log.Println(fmt.Sprintf("<< Try to add Stats to Series in Position %d >>", index)) // log.Debugf("<< Try to add Stats to Series in Position %d >>", index)
if jobData[metric][scope].Series[index].Hostname == node { if jobData[metric][scope].Series[index].Hostname == node {
// log.Println(fmt.Sprintf("<< Match for Series in Position %d : Host %s >>", index, jobData[metric][scope].Series[index].Hostname)) // log.Debugf("<< Match for Series in Position %d : Host %s >>", index, jobData[metric][scope].Series[index].Hostname)
jobData[metric][scope].Series[index].Statistics = &schema.MetricStatistics{Avg: stats.Avg, Min: stats.Min, Max: stats.Max} jobData[metric][scope].Series[index].Statistics = &schema.MetricStatistics{Avg: stats.Avg, Min: stats.Min, Max: stats.Max}
// log.Println(fmt.Sprintf("<< Result Inner: Min %.2f, Max %.2f, Avg %.2f >>", jobData[metric][scope].Series[index].Statistics.Min, jobData[metric][scope].Series[index].Statistics.Max, jobData[metric][scope].Series[index].Statistics.Avg)) // log.Debugf("<< Result Inner: Min %.2f, Max %.2f, Avg %.2f >>", jobData[metric][scope].Series[index].Statistics.Min, jobData[metric][scope].Series[index].Statistics.Max, jobData[metric][scope].Series[index].Statistics.Avg)
} }
} }
} }
@ -228,9 +228,9 @@ func (idb *InfluxDBv2DataRepository) LoadData(
// for _, scope := range scopes { // for _, scope := range scopes {
// for _, met := range metrics { // for _, met := range metrics {
// for _, series := range jobData[met][scope].Series { // for _, series := range jobData[met][scope].Series {
// log.Println(fmt.Sprintf("<< Result: %d data points for metric %s on %s with scope %s, Stats: Min %.2f, Max %.2f, Avg %.2f >>", // log.Debugf("<< Result: %d data points for metric %s on %s with scope %s, Stats: Min %.2f, Max %.2f, Avg %.2f >>",
// len(series.Data), met, series.Hostname, scope, // len(series.Data), met, series.Hostname, scope,
// series.Statistics.Min, series.Statistics.Max, series.Statistics.Avg)) // series.Statistics.Min, series.Statistics.Max, series.Statistics.Avg)
// } // }
// } // }
// } // }
@ -249,7 +249,7 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
for _, h := range job.Resources { for _, h := range job.Resources {
if h.HWThreads != nil || h.Accelerators != nil { if h.HWThreads != nil || h.Accelerators != nil {
// TODO // TODO
return nil, errors.New("the InfluxDB metric data repository does not yet support HWThreads or Accelerators") return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support HWThreads or Accelerators")
} }
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname)) hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname))
} }
@ -258,7 +258,7 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
// lenMet := len(metrics) // lenMet := len(metrics)
for _, metric := range metrics { for _, metric := range metrics {
// log.Println(fmt.Sprintf("<< You are here: %s (Index %d of %d metrics)", metric, index, lenMet)) // log.Debugf("<< You are here: %s (Index %d of %d metrics)", metric, index, lenMet)
query := fmt.Sprintf(` query := fmt.Sprintf(`
data = from(bucket: "%s") data = from(bucket: "%s")
@ -285,17 +285,17 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
avg, avgok := row.ValueByKey("avg").(float64) avg, avgok := row.ValueByKey("avg").(float64)
if !avgok { if !avgok {
// log.Println(fmt.Sprintf(">> Assertion error for metric %s, statistic AVG. Expected 'float64', got %v", metric, avg)) // log.Debugf(">> Assertion error for metric %s, statistic AVG. Expected 'float64', got %v", metric, avg)
avg = 0.0 avg = 0.0
} }
min, minok := row.ValueByKey("min").(float64) min, minok := row.ValueByKey("min").(float64)
if !minok { if !minok {
// log.Println(fmt.Sprintf(">> Assertion error for metric %s, statistic MIN. Expected 'float64', got %v", metric, min)) // log.Debugf(">> Assertion error for metric %s, statistic MIN. Expected 'float64', got %v", metric, min)
min = 0.0 min = 0.0
} }
max, maxok := row.ValueByKey("max").(float64) max, maxok := row.ValueByKey("max").(float64)
if !maxok { if !maxok {
// log.Println(fmt.Sprintf(">> Assertion error for metric %s, statistic MAX. Expected 'float64', got %v", metric, max)) // log.Debugf(">> Assertion error for metric %s, statistic MAX. Expected 'float64', got %v", metric, max)
max = 0.0 max = 0.0
} }
@ -319,7 +319,7 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData(
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
// TODO : Implement to be used in Analysis- und System/Node-View // TODO : Implement to be used in Analysis- und System/Node-View
log.Println(fmt.Sprintf("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)) log.Infof("METRICDATA/INFLUXV2 > LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)
return nil, errors.New("unimplemented for InfluxDBv2DataRepository") return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
} }

View File

@ -60,7 +60,7 @@ func Init(disableArchive bool) error {
case "test": case "test":
mdr = &TestMetricDataRepository{} mdr = &TestMetricDataRepository{}
default: default:
return fmt.Errorf("unkown metric data repository '%s' for cluster '%s'", kind.Kind, cluster.Name) return fmt.Errorf("METRICDATA/METRICDATA > unkown metric data repository '%s' for cluster '%s'", kind.Kind, cluster.Name)
} }
if err := mdr.Init(cluster.MetricDataRepository); err != nil { if err := mdr.Init(cluster.MetricDataRepository); err != nil {
@ -90,7 +90,7 @@ func LoadData(job *schema.Job,
repo, ok := metricDataRepos[job.Cluster] repo, ok := metricDataRepos[job.Cluster]
if !ok { if !ok {
return fmt.Errorf("no metric data repository configured for '%s'", job.Cluster), 0, 0 return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster), 0, 0
} }
if scopes == nil { if scopes == nil {
@ -107,7 +107,7 @@ func LoadData(job *schema.Job,
jd, err = repo.LoadData(job, metrics, scopes, ctx) jd, err = repo.LoadData(job, metrics, scopes, ctx)
if err != nil { if err != nil {
if len(jd) != 0 { if len(jd) != 0 {
log.Errorf("partial error: %s", err.Error()) log.Errorf("METRICDATA/METRICDATA > partial error: %s", err.Error())
} else { } else {
return err, 0, 0 return err, 0, 0
} }
@ -182,7 +182,7 @@ func LoadAverages(
repo, ok := metricDataRepos[job.Cluster] repo, ok := metricDataRepos[job.Cluster]
if !ok { if !ok {
return fmt.Errorf("no metric data repository configured for '%s'", job.Cluster) return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster)
} }
stats, err := repo.LoadStats(job, metrics, ctx) stats, err := repo.LoadStats(job, metrics, ctx)
@ -217,7 +217,7 @@ func LoadNodeData(
repo, ok := metricDataRepos[cluster] repo, ok := metricDataRepos[cluster]
if !ok { if !ok {
return nil, fmt.Errorf("no metric data repository configured for '%s'", cluster) return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
} }
if metrics == nil { if metrics == nil {
@ -229,14 +229,14 @@ func LoadNodeData(
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil { if err != nil {
if len(data) != 0 { if len(data) != 0 {
log.Errorf("partial error: %s", err.Error()) log.Errorf("METRICDATA/METRICDATA > partial error: %s", err.Error())
} else { } else {
return nil, err return nil, err
} }
} }
if data == nil { if data == nil {
return nil, fmt.Errorf("the metric data repository for '%s' does not support this query", cluster) return nil, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
} }
return data, nil return data, nil

View File

@ -163,7 +163,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
rt = promcfg.NewBasicAuthRoundTripper(config.Username, prom_pw, "", promapi.DefaultRoundTripper) rt = promcfg.NewBasicAuthRoundTripper(config.Username, prom_pw, "", promapi.DefaultRoundTripper)
} else { } else {
if config.Username != "" { if config.Username != "" {
return errors.New("Prometheus username provided, but PROMETHEUS_PASSWORD not set.") return errors.New("METRICDATA/PROMETHEUS > Prometheus username provided, but PROMETHEUS_PASSWORD not set.")
} }
} }
// init client // init client
@ -184,9 +184,9 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
for metric, templ := range config.Templates { for metric, templ := range config.Templates {
pdb.templates[metric], err = template.New(metric).Parse(templ) pdb.templates[metric], err = template.New(metric).Parse(templ)
if err == nil { if err == nil {
log.Debugf("Added PromQL template for %s: %s", metric, templ) log.Debugf("METRICDATA/PROMETHEUS > Added PromQL template for %s: %s", metric, templ)
} else { } else {
log.Errorf("Failed to parse PromQL template %s for metric %s", templ, metric) log.Errorf("METRICDATA/PROMETHEUS > Failed to parse PromQL template %s for metric %s", templ, metric)
} }
} }
return nil return nil
@ -213,14 +213,14 @@ func (pdb *PrometheusDataRepository) FormatQuery(
if templ, ok := pdb.templates[metric]; ok { if templ, ok := pdb.templates[metric]; ok {
err := templ.Execute(buf, args) err := templ.Execute(buf, args)
if err != nil { if err != nil {
return "", errors.New(fmt.Sprintf("Error compiling template %v", templ)) return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ))
} else { } else {
query := buf.String() query := buf.String()
log.Debugf(fmt.Sprintf("PromQL: %s", query)) log.Debugf("METRICDATA/PROMETHEUS > PromQL: %s", query)
return query, nil return query, nil
} }
} else { } else {
return "", errors.New(fmt.Sprintf("No PromQL for metric %s configured.", metric)) return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > No PromQL for metric %s configured.", metric))
} }
} }
@ -283,16 +283,15 @@ func (pdb *PrometheusDataRepository) LoadData(
for _, scope := range scopes { for _, scope := range scopes {
if scope != schema.MetricScopeNode { if scope != schema.MetricScopeNode {
logOnce.Do(func(){log.Infof(fmt.Sprintf("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope))}) logOnce.Do(func(){log.Infof("METRICDATA/PROMETHEUS > Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)})
continue continue
} }
for _, metric := range metrics { for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(job.Cluster, metric) metricConfig := archive.GetMetricConfig(job.Cluster, metric)
if metricConfig == nil { if metricConfig == nil {
log.Errorf(fmt.Sprintf("Error in LoadData: Metric %s for cluster %s not configured", log.Errorf("METRICDATA/PROMETHEUS > Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
metric, job.Cluster)) return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus query error")
return nil, errors.New("Prometheus querry error")
} }
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster) query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
if err != nil { if err != nil {
@ -308,11 +307,11 @@ func (pdb *PrometheusDataRepository) LoadData(
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil { if err != nil {
log.Errorf(fmt.Sprintf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)) log.Errorf("METRICDATA/PROMETHEUS > Prometheus query error in LoadData: %v\nQuery: %s", err, query)
return nil, errors.New("Prometheus querry error") return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus query error")
} }
if len(warnings) > 0 { if len(warnings) > 0 {
log.Warnf(fmt.Sprintf("Warnings: %v\n", warnings)) log.Warnf("Warnings: %v\n", warnings)
} }
// init data structures // init data structures
@ -390,15 +389,14 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
} }
for _, scope := range scopes { for _, scope := range scopes {
if scope != schema.MetricScopeNode { if scope != schema.MetricScopeNode {
logOnce.Do(func(){log.Infof(fmt.Sprintf("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope))}) logOnce.Do(func(){log.Infof("METRICDATA/PROMETHEUS > Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)})
continue continue
} }
for _, metric := range metrics { for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(cluster, metric) metricConfig := archive.GetMetricConfig(cluster, metric)
if metricConfig == nil { if metricConfig == nil {
log.Errorf(fmt.Sprintf("Error in LoadNodeData: Metric %s for cluster %s not configured", log.Errorf("METRICDATA/PROMETHEUS > Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
metric, cluster)) return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus querry error")
return nil, errors.New("Prometheus querry error")
} }
query, err := pdb.FormatQuery(metric, scope, nodes, cluster) query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
if err != nil { if err != nil {
@ -414,11 +412,11 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil { if err != nil {
log.Errorf(fmt.Sprintf("Prometheus query error in LoadNodeData: %v\n", err)) log.Errorf("METRICDATA/PROMETHEUS > Prometheus query error in LoadNodeData: %v\n", err)
return nil, errors.New("Prometheus querry error") return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus querry error")
} }
if len(warnings) > 0 { if len(warnings) > 0 {
log.Warnf(fmt.Sprintf("Warnings: %v\n", warnings)) log.Warnf("METRICDATA/PROMETHEUS > Warnings: %v\n", warnings)
} }
step := int64(metricConfig.Timestep) step := int64(metricConfig.Timestep)
@ -444,6 +442,6 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
} }
} }
t1 := time.Since(t0) t1 := time.Since(t0)
log.Debugf(fmt.Sprintf("LoadNodeData of %v nodes took %s", len(data), t1)) log.Debugf("METRICDATA/PROMETHEUS > LoadNodeData of %v nodes took %s", len(data), t1)
return data, nil return data, nil
} }

View File

@ -39,14 +39,14 @@ func Connect(driver string, db string) {
} else if driver == "mysql" { } else if driver == "mysql" {
dbHandle, err = sqlx.Open("mysql", fmt.Sprintf("%s?multiStatements=true", db)) dbHandle, err = sqlx.Open("mysql", fmt.Sprintf("%s?multiStatements=true", db))
if err != nil { if err != nil {
log.Fatal(err) log.Fatalf("REPOSITORY/DBCONNECTION > sqlx.Open() error: %v", err)
} }
dbHandle.SetConnMaxLifetime(time.Minute * 3) dbHandle.SetConnMaxLifetime(time.Minute * 3)
dbHandle.SetMaxOpenConns(10) dbHandle.SetMaxOpenConns(10)
dbHandle.SetMaxIdleConns(10) dbHandle.SetMaxIdleConns(10)
} else { } else {
log.Fatalf("unsupported database driver: %s", driver) log.Fatalf("REPOSITORY/DBCONNECTION > unsupported database driver: %s", driver)
} }
dbConnInstance = &DBConnection{DB: dbHandle} dbConnInstance = &DBConnection{DB: dbHandle}
@ -55,7 +55,7 @@ func Connect(driver string, db string) {
func GetConnection() *DBConnection { func GetConnection() *DBConnection {
if dbConnInstance == nil { if dbConnInstance == nil {
log.Fatalf("Database connection not initialized!") log.Fatalf("REPOSITORY/DBCONNECTION > Database connection not initialized!")
} }
return dbConnInstance return dbConnInstance

View File

@ -95,7 +95,7 @@ func HandleImportFlag(flag string) error {
for _, pair := range strings.Split(flag, ",") { for _, pair := range strings.Split(flag, ",") {
files := strings.Split(pair, ":") files := strings.Split(pair, ":")
if len(files) != 2 { if len(files) != 2 {
return fmt.Errorf("invalid import flag format") return fmt.Errorf("REPOSITORY/INIT > invalid import flag format")
} }
raw, err := os.ReadFile(files[0]) raw, err := os.ReadFile(files[0])
@ -105,7 +105,7 @@ func HandleImportFlag(flag string) error {
if config.Keys.Validate { if config.Keys.Validate {
if err := schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil { if err := schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil {
return fmt.Errorf("validate job meta: %v", err) return fmt.Errorf("REPOSITORY/INIT > validate job meta: %v", err)
} }
} }
dec := json.NewDecoder(bytes.NewReader(raw)) dec := json.NewDecoder(bytes.NewReader(raw))
@ -122,7 +122,7 @@ func HandleImportFlag(flag string) error {
if config.Keys.Validate { if config.Keys.Validate {
if err := schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil { if err := schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil {
return fmt.Errorf("validate job data: %v", err) return fmt.Errorf("REPOSITORY/INIT > validate job data: %v", err)
} }
} }
dec = json.NewDecoder(bytes.NewReader(raw)) dec = json.NewDecoder(bytes.NewReader(raw))
@ -139,7 +139,7 @@ func HandleImportFlag(flag string) error {
return err return err
} }
return fmt.Errorf("a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID) return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID)
} }
job := schema.Job{ job := schema.Job{
@ -186,7 +186,7 @@ func HandleImportFlag(flag string) error {
} }
} }
log.Infof("Successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id) log.Infof("REPOSITORY/INIT > successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
} }
return nil return nil
} }
@ -260,34 +260,34 @@ func InitDB() error {
job.RawResources, err = json.Marshal(job.Resources) job.RawResources, err = json.Marshal(job.Resources)
if err != nil { if err != nil {
log.Errorf("repository initDB()- %v", err) log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
errorOccured++ errorOccured++
continue continue
} }
job.RawMetaData, err = json.Marshal(job.MetaData) job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil { if err != nil {
log.Errorf("repository initDB()- %v", err) log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
errorOccured++ errorOccured++
continue continue
} }
if err := SanityChecks(&job.BaseJob); err != nil { if err := SanityChecks(&job.BaseJob); err != nil {
log.Errorf("repository initDB()- %v", err) log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
errorOccured++ errorOccured++
continue continue
} }
res, err := stmt.Exec(job) res, err := stmt.Exec(job)
if err != nil { if err != nil {
log.Errorf("repository initDB()- %v", err) log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
errorOccured++ errorOccured++
continue continue
} }
id, err := res.LastInsertId() id, err := res.LastInsertId()
if err != nil { if err != nil {
log.Errorf("repository initDB()- %v", err) log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
errorOccured++ errorOccured++
continue continue
} }
@ -318,7 +318,7 @@ func InitDB() error {
} }
if errorOccured > 0 { if errorOccured > 0 {
log.Errorf("Error in import of %d jobs!", errorOccured) log.Errorf("REPOSITORY/INIT > Error in import of %d jobs!", errorOccured)
} }
if err := tx.Commit(); err != nil { if err := tx.Commit(); err != nil {

View File

@ -214,12 +214,12 @@ func (r *JobRepository) FindById(jobId int64) (*schema.Job, error) {
func (r *JobRepository) Start(job *schema.JobMeta) (id int64, err error) { func (r *JobRepository) Start(job *schema.JobMeta) (id int64, err error) {
job.RawResources, err = json.Marshal(job.Resources) job.RawResources, err = json.Marshal(job.Resources)
if err != nil { if err != nil {
return -1, fmt.Errorf("encoding resources field failed: %w", err) return -1, fmt.Errorf("REPOSITORY/JOB > encoding resources field failed: %w", err)
} }
job.RawMetaData, err = json.Marshal(job.MetaData) job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil { if err != nil {
return -1, fmt.Errorf("encoding metaData field failed: %w", err) return -1, fmt.Errorf("REPOSITORY/JOB > encoding metaData field failed: %w", err)
} }
res, err := r.DB.NamedExec(`INSERT INTO job ( res, err := r.DB.NamedExec(`INSERT INTO job (
@ -259,9 +259,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
err := r.DB.Get(&cnt, qs) //ignore error as it will also occur in delete statement err := r.DB.Get(&cnt, qs) //ignore error as it will also occur in delete statement
_, err = r.DB.Exec(`DELETE FROM job WHERE job.start_time < ?`, startTime) _, err = r.DB.Exec(`DELETE FROM job WHERE job.start_time < ?`, startTime)
if err != nil { if err != nil {
log.Warnf(" DeleteJobsBefore(%d): error %v", startTime, err) log.Warnf("REPOSITORY/JOB > DeleteJobsBefore(%d): error %v", startTime, err)
} else { } else {
log.Infof("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt) log.Infof("REPOSITORY/JOB > DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
} }
return cnt, err return cnt, err
} }
@ -269,9 +269,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
func (r *JobRepository) DeleteJobById(id int64) error { func (r *JobRepository) DeleteJobById(id int64) error {
_, err := r.DB.Exec(`DELETE FROM job WHERE job.id = ?`, id) _, err := r.DB.Exec(`DELETE FROM job WHERE job.id = ?`, id)
if err != nil { if err != nil {
log.Warnf("DeleteJobById(%d): error %v", id, err) log.Warnf("REPOSITORY/JOB > DeleteJobById(%d): error %v", id, err)
} else { } else {
log.Infof("DeleteJobById(%d): Success", id) log.Infof("REPOSITORY/JOB > DeleteJobById(%d): Success", id)
} }
return err return err
} }
@ -376,7 +376,7 @@ func (r *JobRepository) archivingWorker(){
// not using meta data, called to load JobMeta into Cache? // not using meta data, called to load JobMeta into Cache?
// will fail if job meta not in repository // will fail if job meta not in repository
if _, err := r.FetchMetadata(job); err != nil { if _, err := r.FetchMetadata(job); err != nil {
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
continue continue
} }
@ -385,18 +385,18 @@ func (r *JobRepository) archivingWorker(){
// TODO: Maybe use context with cancel/timeout here // TODO: Maybe use context with cancel/timeout here
jobMeta, err := metricdata.ArchiveJob(job, context.Background()) jobMeta, err := metricdata.ArchiveJob(job, context.Background())
if err != nil { if err != nil {
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
continue continue
} }
// Update the jobs database entry one last time: // Update the jobs database entry one last time:
if err := r.MarkArchived(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil { if err := r.MarkArchived(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil {
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
continue continue
} }
log.Printf("archiving job (dbid: %d) successful", job.ID) log.Printf("REPOSITORY/JOB > archiving job (dbid: %d) successful", job.ID)
r.archivePending.Done() r.archivePending.Done()
} }
} }
@ -523,7 +523,7 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
} }
if rowsAffected > 0 { if rowsAffected > 0 {
log.Warnf("%d jobs have been marked as failed due to running too long", rowsAffected) log.Warnf("REPOSITORY/JOB > %d jobs have been marked as failed due to running too long", rowsAffected)
} }
return nil return nil
} }

View File

@ -36,7 +36,7 @@ func (r *JobRepository) QueryJobs(
} else if order.Order == model.SortDirectionEnumDesc { } else if order.Order == model.SortDirectionEnumDesc {
query = query.OrderBy(fmt.Sprintf("job.%s DESC", field)) query = query.OrderBy(fmt.Sprintf("job.%s DESC", field))
} else { } else {
return nil, errors.New("invalid sorting order") return nil, errors.New("REPOSITORY/QUERY > invalid sorting order")
} }
} }
@ -54,7 +54,7 @@ func (r *JobRepository) QueryJobs(
return nil, err return nil, err
} }
log.Debugf("SQL query: `%s`, args: %#v", sql, args) log.Debugf("REPOSITORY/QUERY > SQL query: `%s`, args: %#v", sql, args)
rows, err := query.RunWith(r.stmtCache).Query() rows, err := query.RunWith(r.stmtCache).Query()
if err != nil { if err != nil {
return nil, err return nil, err
@ -209,7 +209,7 @@ var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")
func toSnakeCase(str string) string { func toSnakeCase(str string) string {
for _, c := range str { for _, c := range str {
if c == '\'' || c == '\\' { if c == '\'' || c == '\\' {
panic("A hacker (probably not)!!!") panic("REPOSITORY/QUERY > toSnakeCase() attack vector!")
} }
} }

View File

@ -42,12 +42,12 @@ func GetUserCfgRepo() *UserCfgRepo {
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`) FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`)
if err != nil { if err != nil {
log.Fatal(err) log.Fatalf("REPOSITORY/USER > db.DB.exec() error: %v", err)
} }
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`) lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
if err != nil { if err != nil {
log.Fatal(err) log.Fatalf("REPOSITORY/USER > db.DB.Preparex() error: %v", err)
} }
userCfgRepoInstance = &UserCfgRepo{ userCfgRepoInstance = &UserCfgRepo{

View File

@ -61,12 +61,12 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
State: []schema.JobState{schema.JobStateRunning}, State: []schema.JobState{schema.JobStateRunning},
}}, nil, nil) }}, nil, nil)
if err != nil { if err != nil {
log.Errorf("failed to count jobs: %s", err.Error()) log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error())
runningJobs = map[string]int{} runningJobs = map[string]int{}
} }
totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil, nil) totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil, nil)
if err != nil { if err != nil {
log.Errorf("failed to count jobs: %s", err.Error()) log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error())
totalJobs = map[string]int{} totalJobs = map[string]int{}
} }
from := time.Now().Add(-24 * time.Hour) from := time.Now().Add(-24 * time.Hour)
@ -75,7 +75,7 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
Duration: &schema.IntRange{From: 0, To: graph.ShortJobDuration}, Duration: &schema.IntRange{From: 0, To: graph.ShortJobDuration},
}}, nil, nil) }}, nil, nil)
if err != nil { if err != nil {
log.Errorf("failed to count jobs: %s", err.Error()) log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error())
recentShortJobs = map[string]int{} recentShortJobs = map[string]int{}
} }
@ -150,7 +150,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
tags, counts, err := jobRepo.CountTags(username) tags, counts, err := jobRepo.CountTags(username)
tagMap := make(map[string][]map[string]interface{}) tagMap := make(map[string][]map[string]interface{})
if err != nil { if err != nil {
log.Errorf("GetTags failed: %s", err.Error()) log.Errorf("ROUTERCONFIG/ROUTES > GetTags failed: %s", err.Error())
i["tagmap"] = tagMap i["tagmap"] = tagMap
return i return i
} }

View File

@ -40,14 +40,14 @@ func LoadEnv(file string) error {
line = strings.TrimPrefix(line, "export ") line = strings.TrimPrefix(line, "export ")
parts := strings.SplitN(line, "=", 2) parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 { if len(parts) != 2 {
return fmt.Errorf("unsupported line: %#v", line) return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
} }
key := strings.TrimSpace(parts[0]) key := strings.TrimSpace(parts[0])
val := strings.TrimSpace(parts[1]) val := strings.TrimSpace(parts[1])
if strings.HasPrefix(val, "\"") { if strings.HasPrefix(val, "\"") {
if !strings.HasSuffix(val, "\"") { if !strings.HasSuffix(val, "\"") {
return fmt.Errorf("unsupported line: %#v", line) return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
} }
runes := []rune(val[1 : len(val)-1]) runes := []rune(val[1 : len(val)-1])
@ -65,7 +65,7 @@ func LoadEnv(file string) error {
case '"': case '"':
sb.WriteRune('"') sb.WriteRune('"')
default: default:
return fmt.Errorf("unsupprorted escape sequence in quoted string: backslash %#v", runes[i]) return fmt.Errorf("RUNTIME/SETUP > unsupprorted escape sequence in quoted string: backslash %#v", runes[i])
} }
continue continue
} }

View File

@ -49,7 +49,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
// case "s3": // case "s3":
// ar = &S3Archive{} // ar = &S3Archive{}
default: default:
return fmt.Errorf("unkown archive backend '%s''", kind.Kind) return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", kind.Kind)
} }
if err := ar.Init(rawConfig); err != nil { if err := ar.Init(rawConfig); err != nil {

View File

@ -59,7 +59,7 @@ func initClusterConfig() error {
nl, err := ParseNodeList(sc.Nodes) nl, err := ParseNodeList(sc.Nodes)
if err != nil { if err != nil {
return fmt.Errorf("in %s/cluster.json: %w", cluster.Name, err) return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > in %s/cluster.json: %w", cluster.Name, err)
} }
nodeLists[cluster.Name][sc.Name] = nl nodeLists[cluster.Name][sc.Name] = nl
} }
@ -112,7 +112,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
cluster := GetCluster(job.Cluster) cluster := GetCluster(job.Cluster)
if cluster == nil { if cluster == nil {
return fmt.Errorf("unkown cluster: %#v", job.Cluster) return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %#v", job.Cluster)
} }
if job.SubCluster != "" { if job.SubCluster != "" {
@ -121,11 +121,11 @@ func AssignSubCluster(job *schema.BaseJob) error {
return nil return nil
} }
} }
return fmt.Errorf("already assigned subcluster %#v unkown (cluster: %#v)", job.SubCluster, job.Cluster) return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > already assigned subcluster %#v unkown (cluster: %#v)", job.SubCluster, job.Cluster)
} }
if len(job.Resources) == 0 { if len(job.Resources) == 0 {
return fmt.Errorf("job without any resources/hosts") return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > job without any resources/hosts")
} }
host0 := job.Resources[0].Hostname host0 := job.Resources[0].Hostname
@ -141,7 +141,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
return nil return nil
} }
return fmt.Errorf("no subcluster found for cluster %#v and host %#v", job.Cluster, host0) return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %#v and host %#v", job.Cluster, host0)
} }
func GetSubClusterByNode(cluster, hostname string) (string, error) { func GetSubClusterByNode(cluster, hostname string) (string, error) {
@ -154,12 +154,12 @@ func GetSubClusterByNode(cluster, hostname string) (string, error) {
c := GetCluster(cluster) c := GetCluster(cluster)
if c == nil { if c == nil {
return "", fmt.Errorf("unkown cluster: %#v", cluster) return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %#v", cluster)
} }
if c.SubClusters[0].Nodes == "" { if c.SubClusters[0].Nodes == "" {
return c.SubClusters[0].Name, nil return c.SubClusters[0].Name, nil
} }
return "", fmt.Errorf("no subcluster found for cluster %#v and host %#v", cluster, hostname) return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %#v and host %#v", cluster, hostname)
} }

View File

@ -46,7 +46,7 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
log.Errorf("fsBackend loadJobMeta()- %v", err) log.Errorf("ARCHIVE/FSBACKEND > loadJobMeta() > open file error: %v", err)
return &schema.JobMeta{}, err return &schema.JobMeta{}, err
} }
defer f.Close() defer f.Close()
@ -58,19 +58,19 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) error {
var config FsArchiveConfig var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil { if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Errorf("fsBackend Init()- %v", err) log.Errorf("ARCHIVE/FSBACKEND > Init() > Unmarshal error: %v", err)
return err return err
} }
if config.Path == "" { if config.Path == "" {
err := fmt.Errorf("fsBackend Init()- empty path") err := fmt.Errorf("ARCHIVE/FSBACKEND > Init() : empty config.Path")
log.Errorf("fsBackend Init()- %v", err) log.Errorf("ARCHIVE/FSBACKEND > Init() > config.Path error: %v", err)
return err return err
} }
fsa.path = config.Path fsa.path = config.Path
entries, err := os.ReadDir(fsa.path) entries, err := os.ReadDir(fsa.path)
if err != nil { if err != nil {
log.Errorf("fsBackend Init()- %v", err) log.Errorf("ARCHIVE/FSBACKEND > Init() > ReadDir() error: %v", err)
return err return err
} }
@ -86,7 +86,7 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
filename := getPath(job, fsa.path, "data.json") filename := getPath(job, fsa.path, "data.json")
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
log.Errorf("fsBackend LoadJobData()- %v", err) log.Errorf("ARCHIVE/FSBACKEND > LoadJobData() > open file error: %v", err)
return nil, err return nil, err
} }
defer f.Close() defer f.Close()
@ -104,12 +104,12 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json")) b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil { if err != nil {
log.Errorf("fsBackend LoadClusterCfg()- %v", err) log.Errorf("ARCHIVE/FSBACKEND > LoadClusterCfg() > open file error: %v", err)
return &schema.Cluster{}, err return &schema.Cluster{}, err
} }
if config.Keys.Validate { if config.Keys.Validate {
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil { if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
return &schema.Cluster{}, fmt.Errorf("Validate cluster config: %v\n", err) return &schema.Cluster{}, fmt.Errorf("ARCHIVE/FSBACKEND > Validate cluster config: %v\n", err)
} }
} }
return DecodeCluster(bytes.NewReader(b)) return DecodeCluster(bytes.NewReader(b))
@ -121,13 +121,13 @@ func (fsa *FsArchive) Iter() <-chan *schema.JobMeta {
go func() { go func() {
clustersDir, err := os.ReadDir(fsa.path) clustersDir, err := os.ReadDir(fsa.path)
if err != nil { if err != nil {
log.Fatalf("Reading clusters failed: %s", err.Error()) log.Fatalf("ARCHIVE/FSBACKEND > Reading clusters failed @ cluster dirs: %s", err.Error())
} }
for _, clusterDir := range clustersDir { for _, clusterDir := range clustersDir {
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name())) lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
if err != nil { if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error()) log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ lvl1 dirs: %s", err.Error())
} }
for _, lvl1Dir := range lvl1Dirs { for _, lvl1Dir := range lvl1Dirs {
@ -138,21 +138,21 @@ func (fsa *FsArchive) Iter() <-chan *schema.JobMeta {
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name())) lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
if err != nil { if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error()) log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ lvl2 dirs: %s", err.Error())
} }
for _, lvl2Dir := range lvl2Dirs { for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name()) dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath) startTimeDirs, err := os.ReadDir(dirpath)
if err != nil { if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error()) log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ starttime dirs: %s", err.Error())
} }
for _, startTimeDir := range startTimeDirs { for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() { if startTimeDir.IsDir() {
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json")) job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
if err != nil { if err != nil {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) log.Errorf("ARCHIVE/FSBACKEND > error in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
} else { } else {
ch <- job ch <- job
} }

View File

@ -64,7 +64,7 @@ type NLExprIntRange struct {
func (nle NLExprIntRange) consume(input string) (next string, ok bool) { func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
if !nle.zeroPadded || nle.digits < 1 { if !nle.zeroPadded || nle.digits < 1 {
log.Error("node list: only zero-padded ranges are allowed") log.Error("ARCHIVE/NODELIST > only zero-padded ranges are allowed")
return "", false return "", false
} }
@ -102,7 +102,7 @@ func ParseNodeList(raw string) (NodeList, error) {
i++ i++
} }
if i == len(raw) { if i == len(raw) {
return nil, fmt.Errorf("node list: unclosed '['") return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
} }
} else if raw[i] == ',' { } else if raw[i] == ',' {
rawterms = append(rawterms, raw[prevterm:i]) rawterms = append(rawterms, raw[prevterm:i])
@ -135,7 +135,7 @@ func ParseNodeList(raw string) (NodeList, error) {
end := strings.Index(rawterm[i:], "]") end := strings.Index(rawterm[i:], "]")
if end == -1 { if end == -1 {
return nil, fmt.Errorf("node list: unclosed '['") return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
} }
parts := strings.Split(rawterm[i+1:i+end], ",") parts := strings.Split(rawterm[i+1:i+end], ",")
@ -144,21 +144,21 @@ func ParseNodeList(raw string) (NodeList, error) {
for _, part := range parts { for _, part := range parts {
minus := strings.Index(part, "-") minus := strings.Index(part, "-")
if minus == -1 { if minus == -1 {
return nil, fmt.Errorf("node list: no '-' found inside '[...]'") return nil, fmt.Errorf("ARCHIVE/NODELIST > no '-' found inside '[...]'")
} }
s1, s2 := part[0:minus], part[minus+1:] s1, s2 := part[0:minus], part[minus+1:]
if len(s1) != len(s2) || len(s1) == 0 { if len(s1) != len(s2) || len(s1) == 0 {
return nil, fmt.Errorf("node list: %#v and %#v are not of equal length or of length zero", s1, s2) return nil, fmt.Errorf("ARCHIVE/NODELIST > %#v and %#v are not of equal length or of length zero", s1, s2)
} }
x1, err := strconv.ParseInt(s1, 10, 32) x1, err := strconv.ParseInt(s1, 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("node list: %w", err) return nil, fmt.Errorf("ARCHIVE/NODELIST > could not parse int: %w", err)
} }
x2, err := strconv.ParseInt(s2, 10, 32) x2, err := strconv.ParseInt(s2, 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("node list: %w", err) return nil, fmt.Errorf("ARCHIVE/NODELIST > could not parse int: %w", err)
} }
nles = append(nles, NLExprIntRange{ nles = append(nles, NLExprIntRange{
@ -172,7 +172,7 @@ func ParseNodeList(raw string) (NodeList, error) {
exprs = append(exprs, nles) exprs = append(exprs, nles)
i += end i += end
} else { } else {
return nil, fmt.Errorf("node list: invalid character: %#v", rune(c)) return nil, fmt.Errorf("ARCHIVE/NODELIST > invalid character: %#v", rune(c))
} }
} }
nl = append(nl, exprs) nl = append(nl, exprs)

View File

@ -69,7 +69,7 @@ func (c *Cache) Get(key string, computeValue ComputeValue) interface{} {
if now.After(entry.expiration) { if now.After(entry.expiration) {
if !c.evictEntry(entry) { if !c.evictEntry(entry) {
if entry.expiration.IsZero() { if entry.expiration.IsZero() {
panic("cache entry that shoud have been waited for could not be evicted.") panic("LRUCACHE/CACHE > cache entry that shoud have been waited for could not be evicted.")
} }
c.mutex.Unlock() c.mutex.Unlock()
return entry.value return entry.value
@ -208,7 +208,7 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
size := 0 size := 0
for key, e := range c.entries { for key, e := range c.entries {
if key != e.key { if key != e.key {
panic("key mismatch") panic("LRUCACHE/CACHE > key mismatch")
} }
if now.After(e.expiration) { if now.After(e.expiration) {
@ -219,13 +219,13 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
if e.prev != nil { if e.prev != nil {
if e.prev.next != e { if e.prev.next != e {
panic("list corrupted") panic("LRUCACHE/CACHE > list corrupted")
} }
} }
if e.next != nil { if e.next != nil {
if e.next.prev != e { if e.next.prev != e {
panic("list corrupted") panic("LRUCACHE/CACHE > list corrupted")
} }
} }
@ -234,18 +234,18 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
} }
if size != c.usedmemory { if size != c.usedmemory {
panic("size calculations failed") panic("LRUCACHE/CACHE > size calculations failed")
} }
if c.head != nil { if c.head != nil {
if c.tail == nil || c.head.prev != nil { if c.tail == nil || c.head.prev != nil {
panic("head/tail corrupted") panic("LRUCACHE/CACHE > head/tail corrupted")
} }
} }
if c.tail != nil { if c.tail != nil {
if c.head == nil || c.tail.next != nil { if c.head == nil || c.tail.next != nil {
panic("head/tail corrupted") panic("LRUCACHE/CACHE > head/tail corrupted")
} }
} }
} }
@ -281,7 +281,7 @@ func (c *Cache) unlinkEntry(e *cacheEntry) {
func (c *Cache) evictEntry(e *cacheEntry) bool { func (c *Cache) evictEntry(e *cacheEntry) bool {
if e.waitingForComputation != 0 { if e.waitingForComputation != 0 {
// panic("cannot evict this entry as other goroutines need the value") // panic("LRUCACHE/CACHE > cannot evict this entry as other goroutines need the value")
return false return false
} }

View File

@ -133,12 +133,12 @@ const (
func (e *JobState) UnmarshalGQL(v interface{}) error { func (e *JobState) UnmarshalGQL(v interface{}) error {
str, ok := v.(string) str, ok := v.(string)
if !ok { if !ok {
return fmt.Errorf("enums must be strings") return fmt.Errorf("SCHEMA/JOB > enums must be strings")
} }
*e = JobState(str) *e = JobState(str)
if !e.Valid() { if !e.Valid() {
return errors.New("invalid job state") return errors.New("SCHEMA/JOB > invalid job state")
} }
return nil return nil

View File

@ -92,12 +92,12 @@ func (e *MetricScope) Max(other MetricScope) MetricScope {
func (e *MetricScope) UnmarshalGQL(v interface{}) error { func (e *MetricScope) UnmarshalGQL(v interface{}) error {
str, ok := v.(string) str, ok := v.(string)
if !ok { if !ok {
return fmt.Errorf("enums must be strings") return fmt.Errorf("SCHEMA/METRICS > enums must be strings")
} }
*e = MetricScope(str) *e = MetricScope(str)
if !e.Valid() { if !e.Valid() {
return fmt.Errorf("%s is not a valid MetricScope", str) return fmt.Errorf("SCHEMA/METRICS > %s is not a valid MetricScope", str)
} }
return nil return nil
} }
@ -303,7 +303,7 @@ func (jm *JobMetric) AddPercentiles(ps []int) bool {
for _, p := range ps { for _, p := range ps {
if p < 1 || p > 99 { if p < 1 || p > 99 {
panic("invalid percentile") panic("SCHEMA/METRICS > invalid percentile")
} }
if _, ok := jm.StatisticsSeries.Percentiles[p]; ok { if _, ok := jm.StatisticsSeries.Percentiles[p]; ok {

View File

@ -45,7 +45,7 @@ func Validate(k Kind, r io.Reader) (err error) {
case Config: case Config:
s, err = jsonschema.Compile("embedfs://config.schema.json") s, err = jsonschema.Compile("embedfs://config.schema.json")
default: default:
return fmt.Errorf("unkown schema kind ") return fmt.Errorf("SCHEMA/VALIDATE > unkown schema kind: %v", k)
} }
if err != nil { if err != nil {
@ -54,12 +54,12 @@ func Validate(k Kind, r io.Reader) (err error) {
var v interface{} var v interface{}
if err := json.NewDecoder(r).Decode(&v); err != nil { if err := json.NewDecoder(r).Decode(&v); err != nil {
log.Errorf("schema.Validate() - Failed to decode %v", err) log.Errorf("SCHEMA/VALIDATE > Failed to decode %v", err)
return err return err
} }
if err = s.Validate(v); err != nil { if err = s.Validate(v); err != nil {
return fmt.Errorf("%#v", err) return fmt.Errorf("SCHEMA/VALIDATE > %#v", err)
} }
return nil return nil

View File

@ -192,7 +192,7 @@ func GetUnitUnitFactor(in Unit, out Unit) (func(value interface{}) interface{},
} else if in.getMeasure() == TemperatureF && out.getMeasure() == TemperatureC { } else if in.getMeasure() == TemperatureF && out.getMeasure() == TemperatureC {
return convertTempF2TempC, nil return convertTempF2TempC, nil
} else if in.getMeasure() != out.getMeasure() || in.getUnitDenominator() != out.getUnitDenominator() { } else if in.getMeasure() != out.getMeasure() || in.getUnitDenominator() != out.getUnitDenominator() {
return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("invalid measures in in and out Unit") return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("UNITS/UNITS > invalid measures in in and out Unit")
} }
return GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()), nil return GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()), nil
} }

View File

@ -24,6 +24,7 @@ var frontendFiles embed.FS
func ServeFiles() http.Handler { func ServeFiles() http.Handler {
publicFiles, err := fs.Sub(frontendFiles, "frontend/public") publicFiles, err := fs.Sub(frontendFiles, "frontend/public")
if err != nil { if err != nil {
log.Fatalf("WEB/WEB > cannot find frontend public files")
panic(err) panic(err)
} }
return http.FileServer(http.FS(publicFiles)) return http.FileServer(http.FS(publicFiles))
@ -47,6 +48,7 @@ func init() {
templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFS(templateFiles, path)) templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFS(templateFiles, path))
return nil return nil
}); err != nil { }); err != nil {
log.Fatalf("WEB/WEB > cannot find frontend template files")
panic(err) panic(err)
} }
@ -80,6 +82,7 @@ type Page struct {
func RenderTemplate(rw http.ResponseWriter, r *http.Request, file string, page *Page) { func RenderTemplate(rw http.ResponseWriter, r *http.Request, file string, page *Page) {
t, ok := templates[file] t, ok := templates[file]
if !ok { if !ok {
log.Fatalf("WEB/WEB > template '%s' not found", file)
panic("template not found") panic("template not found")
} }
@ -91,6 +94,6 @@ func RenderTemplate(rw http.ResponseWriter, r *http.Request, file string, page *
log.Infof("%v\n", page.Config) log.Infof("%v\n", page.Config)
if err := t.Execute(rw, page); err != nil { if err := t.Execute(rw, page); err != nil {
log.Errorf("template error: %s", err.Error()) log.Errorf("WEB/WEB > template error: %s", err.Error())
} }
} }