Rework pkg/log, add 'loglevel' and 'logdate' flags, streamline

- removes some previously added manual location strings: now handled by pkg/log depending on loglevel
- kept manual string locations on fmt print functions
- add 'notice' and 'critical' loglevels
- add 'Panic' and 'Panicf' functions to log panics
- adresses issue #26
This commit is contained in:
Christoph Kluge
2023-01-23 18:48:06 +01:00
parent 25286ff068
commit 79a949b55e
22 changed files with 368 additions and 209 deletions

View File

@@ -134,7 +134,7 @@ type ApiTag struct {
type TagJobApiRequest []*ApiTag
func handleError(err error, statusCode int, rw http.ResponseWriter) {
log.Warnf("API/REST > ERROR : %s", err.Error())
log.Warnf("REST ERROR : %s", err.Error())
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(statusCode)
json.NewEncoder(rw).Encode(ErrorResponse{
@@ -169,7 +169,7 @@ func decode(r io.Reader, val interface{}) error {
// @router /jobs/ [get]
func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return
}
@@ -184,7 +184,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
for _, s := range vals {
state := schema.JobState(s)
if !state.Valid() {
http.Error(rw, "invalid query parameter value: state", http.StatusBadRequest)
http.Error(rw, "REST > invalid query parameter value: state", http.StatusBadRequest)
return
}
filter.State = append(filter.State, state)
@@ -194,7 +194,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
case "start-time":
st := strings.Split(vals[0], "-")
if len(st) != 2 {
http.Error(rw, "invalid query parameter value: startTime", http.StatusBadRequest)
http.Error(rw, "REST > invalid query parameter value: startTime", http.StatusBadRequest)
return
}
from, err := strconv.ParseInt(st[0], 10, 64)
@@ -226,7 +226,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
case "with-metadata":
withMetadata = true
default:
http.Error(rw, "invalid query parameter: "+key, http.StatusBadRequest)
http.Error(rw, "REST > invalid query parameter: "+key, http.StatusBadRequest)
return
}
}
@@ -271,7 +271,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
results = append(results, res)
}
log.Debugf("API/REST > /api/jobs: %d jobs returned", len(results))
log.Debugf("/api/jobs: %d jobs returned", len(results))
bw := bufio.NewWriter(rw)
defer bw.Flush()
if err := json.NewEncoder(bw).Encode(map[string]interface{}{
@@ -300,7 +300,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/tag_job/{id} [post]
func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return
}
@@ -365,13 +365,13 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/start_job/ [post]
func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return
}
req := schema.JobMeta{BaseJob: schema.JobDefaults}
if err := decode(r.Body, &req); err != nil {
handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw)
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
return
}
@@ -391,12 +391,12 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
// Check if combination of (job_id, cluster_id, start_time) already exists:
jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil)
if err != nil && err != sql.ErrNoRows {
handleError(fmt.Errorf("API/REST > checking for duplicate failed: %w", err), http.StatusInternalServerError, rw)
handleError(fmt.Errorf("checking for duplicate failed: %w", err), http.StatusInternalServerError, rw)
return
} else if err == nil {
for _, job := range jobs {
if (req.StartTime - job.StartTimeUnix) < 86400 {
handleError(fmt.Errorf("API/REST > a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw)
handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw)
return
}
}
@@ -404,7 +404,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
id, err := api.JobRepository.Start(&req)
if err != nil {
handleError(fmt.Errorf("API/REST > insert into database failed: %w", err), http.StatusInternalServerError, rw)
handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw)
return
}
// unlock here, adding Tags can be async
@@ -413,12 +413,12 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
for _, tag := range req.Tags {
if _, err := api.JobRepository.AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
handleError(fmt.Errorf("API/REST > adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw)
handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw)
return
}
}
log.Printf("API/REST > new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusCreated)
json.NewEncoder(rw).Encode(StartJobApiResponse{
@@ -446,14 +446,14 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/stop_job/{id} [post]
func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return
}
// Parse request body: Only StopTime and State
req := StopJobApiRequest{}
if err := decode(r.Body, &req); err != nil {
handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw)
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
return
}
@@ -464,17 +464,17 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
if ok {
id, e := strconv.ParseInt(id, 10, 64)
if e != nil {
handleError(fmt.Errorf("API/REST > integer expected in path for id: %w", e), http.StatusBadRequest, rw)
handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
return
}
job, err = api.JobRepository.FindById(id)
} else {
handleError(errors.New("API/REST > the parameter 'id' is required"), http.StatusBadRequest, rw)
handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw)
return
}
if err != nil {
handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
return
}
@@ -499,14 +499,14 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/stop_job/ [post]
func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return
}
// Parse request body
req := StopJobApiRequest{}
if err := decode(r.Body, &req); err != nil {
handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw)
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
return
}
@@ -514,14 +514,14 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
var job *schema.Job
var err error
if req.JobId == nil {
handleError(errors.New("API/REST > the field 'jobId' is required"), http.StatusBadRequest, rw)
handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
return
}
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
if err != nil {
handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
return
}
@@ -545,7 +545,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/delete_job/{id} [delete]
func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return
}
@@ -555,23 +555,23 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
if ok {
id, e := strconv.ParseInt(id, 10, 64)
if e != nil {
handleError(fmt.Errorf("API/REST > integer expected in path for id: %w", e), http.StatusBadRequest, rw)
handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
return
}
err = api.JobRepository.DeleteJobById(id)
} else {
handleError(errors.New("API/REST > the parameter 'id' is required"), http.StatusBadRequest, rw)
handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw)
return
}
if err != nil {
handleError(fmt.Errorf("API/REST > deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
return
}
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusOK)
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
Message: fmt.Sprintf("API/REST > Successfully deleted job %s", id),
Message: fmt.Sprintf("Successfully deleted job %s", id),
})
}
@@ -593,14 +593,14 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
// @router /jobs/delete_job/ [delete]
func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return
}
// Parse request body
req := DeleteJobApiRequest{}
if err := decode(r.Body, &req); err != nil {
handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw)
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
return
}
@@ -608,27 +608,27 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
var job *schema.Job
var err error
if req.JobId == nil {
handleError(errors.New("API/REST > the field 'jobId' is required"), http.StatusBadRequest, rw)
handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
return
}
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
if err != nil {
handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
return
}
err = api.JobRepository.DeleteJobById(job.ID)
if err != nil {
handleError(fmt.Errorf("API/REST > deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
return
}
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusOK)
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
Message: fmt.Sprintf("API/REST > Successfully deleted job %d", job.ID),
Message: fmt.Sprintf("Successfully deleted job %d", job.ID),
})
}
@@ -649,7 +649,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
// @router /jobs/delete_job_before/{ts} [delete]
func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
return
}
@@ -660,24 +660,24 @@ func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
if ok {
ts, e := strconv.ParseInt(id, 10, 64)
if e != nil {
handleError(fmt.Errorf("API/REST > integer expected in path for ts: %w", e), http.StatusBadRequest, rw)
handleError(fmt.Errorf("integer expected in path for ts: %w", e), http.StatusBadRequest, rw)
return
}
cnt, err = api.JobRepository.DeleteJobsBefore(ts)
} else {
handleError(errors.New("API/REST > the parameter 'ts' is required"), http.StatusBadRequest, rw)
handleError(errors.New("the parameter 'ts' is required"), http.StatusBadRequest, rw)
return
}
if err != nil {
handleError(fmt.Errorf("API/REST > deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw)
handleError(fmt.Errorf("deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw)
return
}
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusOK)
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
Message: fmt.Sprintf("API/REST > Successfully deleted %d jobs", cnt),
Message: fmt.Sprintf("Successfully deleted %d jobs", cnt),
})
}
@@ -685,12 +685,12 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
// Sanity checks
if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning {
handleError(errors.New("API/REST > stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw)
handleError(errors.New("stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw)
return
}
if req.State != "" && !req.State.Valid() {
handleError(fmt.Errorf("API/REST > invalid job state: %#v", req.State), http.StatusBadRequest, rw)
handleError(fmt.Errorf("invalid job state: %#v", req.State), http.StatusBadRequest, rw)
return
} else if req.State == "" {
req.State = schema.JobStateCompleted
@@ -700,11 +700,11 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
job.Duration = int32(req.StopTime - job.StartTime.Unix())
job.State = req.State
if err := api.JobRepository.Stop(job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil {
handleError(fmt.Errorf("API/REST > marking job as stopped failed: %w", err), http.StatusInternalServerError, rw)
handleError(fmt.Errorf("marking job as stopped failed: %w", err), http.StatusInternalServerError, rw)
return
}
log.Printf("API/REST > archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime)
log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime)
// Send a response (with status OK). This means that erros that happen from here on forward
// can *NOT* be communicated to the client. If reading from a MetricDataRepository or
@@ -724,7 +724,7 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
// func (api *RestApi) importJob(rw http.ResponseWriter, r *http.Request) {
// if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
// handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
// handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
// return
// }
@@ -733,12 +733,12 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
// Data *schema.JobData `json:"data"`
// }
// if err := decode(r.Body, &body); err != nil {
// handleError(fmt.Errorf("API/REST > import failed: %s", err.Error()), http.StatusBadRequest, rw)
// handleError(fmt.Errorf("import failed: %s", err.Error()), http.StatusBadRequest, rw)
// return
// }
// if err := api.JobRepository.ImportJob(body.Meta, body.Data); err != nil {
// handleError(fmt.Errorf("API/REST > import failed: %s", err.Error()), http.StatusUnprocessableEntity, rw)
// handleError(fmt.Errorf("import failed: %s", err.Error()), http.StatusUnprocessableEntity, rw)
// return
// }
@@ -793,7 +793,7 @@ func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) {
me := auth.GetUser(r.Context())
if !me.HasRole(auth.RoleAdmin) {
if username != me.Username {
http.Error(rw, "API/REST > only admins are allowed to sign JWTs not for themselves", http.StatusForbidden)
http.Error(rw, "REST > only admins are allowed to sign JWTs not for themselves", http.StatusForbidden)
return
}
}
@@ -818,13 +818,13 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "text/plain")
me := auth.GetUser(r.Context())
if !me.HasRole(auth.RoleAdmin) {
http.Error(rw, "API/REST > only admins are allowed to create new users", http.StatusForbidden)
http.Error(rw, "REST > only admins are allowed to create new users", http.StatusForbidden)
return
}
username, password, role, name, email := r.FormValue("username"), r.FormValue("password"), r.FormValue("role"), r.FormValue("name"), r.FormValue("email")
if len(password) == 0 && role != auth.RoleApi {
http.Error(rw, "API/REST > only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest)
http.Error(rw, "REST > only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest)
return
}
@@ -838,12 +838,12 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
return
}
rw.Write([]byte(fmt.Sprintf("API/REST > User %#v successfully created!\n", username)))
rw.Write([]byte(fmt.Sprintf("User %#v successfully created!\n", username)))
}
func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "API/REST > only admins are allowed to delete a user", http.StatusForbidden)
http.Error(rw, "REST > only admins are allowed to delete a user", http.StatusForbidden)
return
}
@@ -858,7 +858,7 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "API/REST > only admins are allowed to fetch a list of users", http.StatusForbidden)
http.Error(rw, "REST > only admins are allowed to fetch a list of users", http.StatusForbidden)
return
}
@@ -873,7 +873,7 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
http.Error(rw, "API/REST > only admins are allowed to update a user", http.StatusForbidden)
http.Error(rw, "REST > only admins are allowed to update a user", http.StatusForbidden)
return
}
@@ -893,9 +893,9 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
}
rw.Write([]byte("Remove Role Success"))
rw.Write([]byte("REST > Remove Role Success"))
} else {
http.Error(rw, "Not Add or Del?", http.StatusInternalServerError)
http.Error(rw, "REST > Not Add or Del?", http.StatusInternalServerError)
}
}
@@ -903,7 +903,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
rw.Header().Set("Content-Type", "text/plain")
key, value := r.FormValue("key"), r.FormValue("value")
fmt.Printf("API/REST > KEY: %#v\nVALUE: %#v\n", key, value)
fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value)
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, auth.GetUser(r.Context())); err != nil {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
@@ -915,7 +915,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
if api.MachineStateDir == "" {
http.Error(rw, "API/REST > machine state not enabled", http.StatusNotFound)
http.Error(rw, "REST > machine state not enabled", http.StatusNotFound)
return
}
@@ -928,7 +928,7 @@ func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
return
}
filename := filepath.Join(dir, fmt.Sprintf("API/REST > %s.json", host))
filename := filepath.Join(dir, fmt.Sprintf("%s.json", host))
f, err := os.Create(filename)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
@@ -946,12 +946,12 @@ func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) getMachineState(rw http.ResponseWriter, r *http.Request) {
if api.MachineStateDir == "" {
http.Error(rw, "API/REST > machine state not enabled", http.StatusNotFound)
http.Error(rw, "REST > machine state not enabled", http.StatusNotFound)
return
}
vars := mux.Vars(r)
filename := filepath.Join(api.MachineStateDir, vars["cluster"], fmt.Sprintf("API/REST > %s.json", vars["host"]))
filename := filepath.Join(api.MachineStateDir, vars["cluster"], fmt.Sprintf("%s.json", vars["host"]))
// Sets the content-type and 'Last-Modified' Header and so on automatically
http.ServeFile(rw, r, filename)

View File

@@ -99,7 +99,7 @@ func Init(db *sqlx.DB,
sessKey := os.Getenv("SESSION_KEY")
if sessKey == "" {
log.Warn("AUTH/AUTH > environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
bytes := make([]byte, 32)
if _, err := rand.Read(bytes); err != nil {
return nil, err
@@ -169,7 +169,7 @@ func (auth *Authentication) Login(
user := (*User)(nil)
if username != "" {
if user, _ = auth.GetUser(username); err != nil {
// log.Warnf("AUTH/AUTH > login of unkown user %#v", username)
// log.Warnf("login of unkown user %#v", username)
_ = err
}
}
@@ -181,14 +181,14 @@ func (auth *Authentication) Login(
user, err = authenticator.Login(user, rw, r)
if err != nil {
log.Warnf("AUTH/AUTH > user '%s' login failed: %s", user.Username, err.Error())
log.Warnf("user '%s' login failed: %s", user.Username, err.Error())
onfailure(rw, r, err)
return
}
session, err := auth.sessionStore.New(r, "session")
if err != nil {
log.Errorf("AUTH/AUTH > session creation failed: %s", err.Error())
log.Errorf("session creation failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
@@ -199,18 +199,18 @@ func (auth *Authentication) Login(
session.Values["username"] = user.Username
session.Values["roles"] = user.Roles
if err := auth.sessionStore.Save(r, rw, session); err != nil {
log.Errorf("AUTH/AUTH > session save failed: %s", err.Error())
log.Errorf("session save failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("AUTH/AUTH > login successfull: user: %#v (roles: %v)", user.Username, user.Roles)
log.Infof("login successfull: user: %#v (roles: %v)", user.Username, user.Roles)
ctx := context.WithValue(r.Context(), ContextUserKey, user)
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return
}
log.Warn("AUTH/AUTH > login failed: no authenticator applied")
log.Warn("login failed: no authenticator applied")
onfailure(rw, r, err)
})
}
@@ -226,7 +226,7 @@ func (auth *Authentication) Auth(
for _, authenticator := range auth.authenticators {
user, err := authenticator.Auth(rw, r)
if err != nil {
log.Warnf("AUTH/AUTH > authentication failed: %s", err.Error())
log.Warnf("authentication failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnauthorized)
return
}
@@ -239,7 +239,7 @@ func (auth *Authentication) Auth(
return
}
log.Warnf("AUTH/AUTH > authentication failed: %s", "no authenticator applied")
log.Warnf("authentication failed: %s", "no authenticator applied")
// http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
onfailure(rw, r, errors.New("unauthorized (login first or use a token)"))
})

View File

@@ -41,7 +41,7 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
if pubKey == "" || privKey == "" {
log.Warn("AUTH/JWT > environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
} else {
bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil {
@@ -75,20 +75,20 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
// Warn if other necessary settings are not configured
if ja.config != nil {
if ja.config.CookieName == "" {
log.Warn("AUTH/JWT > cookieName for JWTs not configured (cross login via JWT cookie will fail)")
log.Warn("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
}
if !ja.config.ForceJWTValidationViaDatabase {
log.Warn("AUTH/JWT > forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
log.Warn("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
}
if ja.config.TrustedExternalIssuer == "" {
log.Warn("AUTH/JWT > trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
log.Warn("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
}
} else {
log.Warn("AUTH/JWT > cookieName and trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
log.Warn("cookieName and trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
}
} else {
ja.publicKeyCrossLogin = nil
log.Warn("AUTH/JWT > environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
log.Warn("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
}
return nil
@@ -243,7 +243,7 @@ func (ja *JWTAuthenticator) Auth(
// Deny any logins for unknown usernames
if err != nil {
log.Warn("AUTH/JWT > Could not find user from JWT in internal database.")
log.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
@@ -264,7 +264,7 @@ func (ja *JWTAuthenticator) Auth(
// Create a session so that we no longer need the JTW Cookie
session, err := ja.auth.sessionStore.New(r, "session")
if err != nil {
log.Errorf("AUTH/JWT > session creation failed: %s", err.Error())
log.Errorf("session creation failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
return nil, err
}
@@ -276,7 +276,7 @@ func (ja *JWTAuthenticator) Auth(
session.Values["roles"] = roles
if err := ja.auth.sessionStore.Save(r, rw, session); err != nil {
log.Errorf("AUTH/JWT > session save failed: %s", err.Error())
log.Errorf("session save failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
return nil, err
}

View File

@@ -33,7 +33,7 @@ func (la *LdapAuthenticator) Init(
la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD")
if la.syncPassword == "" {
log.Warn("AUTH/LDAP > environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
log.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
}
if la.config != nil && la.config.SyncInterval != "" {
@@ -49,11 +49,11 @@ func (la *LdapAuthenticator) Init(
go func() {
ticker := time.NewTicker(interval)
for t := range ticker.C {
log.Printf("AUTH/LDAP > sync started at %s", t.Format(time.RFC3339))
log.Printf("sync started at %s", t.Format(time.RFC3339))
if err := la.Sync(); err != nil {
log.Errorf("AUTH/LDAP > sync failed: %s", err.Error())
log.Errorf("sync failed: %s", err.Error())
}
log.Print("AUTH/LDAP > sync done")
log.Print("sync done")
}
}()
}
@@ -147,13 +147,13 @@ func (la *LdapAuthenticator) Sync() error {
for username, where := range users {
if where == IN_DB && la.config.SyncDelOldUsers {
log.Debugf("AUTH/LDAP > sync: remove %#v (does not show up in LDAP anymore)", username)
log.Debugf("sync: remove %#v (does not show up in LDAP anymore)", username)
if _, err := la.auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil {
return err
}
} else if where == IN_LDAP {
name := newnames[username]
log.Debugf("AUTH/LDAP > sync: add %#v (name: %#v, roles: [user], ldap: true)", username, name)
log.Debugf("sync: add %#v (name: %#v, roles: [user], ldap: true)", username, name)
if _, err := la.auth.db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`,
username, 1, name, "[\""+RoleUser+"\"]"); err != nil {
return err

View File

@@ -67,7 +67,7 @@ func (auth *Authentication) AddUser(user *User) error {
return err
}
log.Infof("AUTH/USERS > new user %#v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource)
log.Infof("new user %#v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource)
return nil
}

View File

@@ -49,20 +49,20 @@ func Init(flagConfigFile string) {
raw, err := os.ReadFile(flagConfigFile)
if err != nil {
if !os.IsNotExist(err) {
log.Fatalf("CONFIG/CONFIG > ERROR: %v", err)
log.Fatalf("CONFIG ERROR: %v", err)
}
} else {
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
log.Fatalf("CONFIG/CONFIG > Validate config: %v\n", err)
log.Fatalf("Validate config: %v\n", err)
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
if err := dec.Decode(&Keys); err != nil {
log.Fatalf("CONFIG/CONFIG > could not decode: %v", err)
log.Fatalf("could not decode: %v", err)
}
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
log.Fatal("CONFIG/CONFIG > At least one cluster required in config!")
log.Fatal("At least one cluster required in config!")
}
}
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
// Partitions is the resolver for the partitions field.
@@ -51,7 +52,7 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s
// DeleteTag is the resolver for the deleteTag field.
func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) {
panic(fmt.Errorf("GRAPH/RESOLVERS > not implemented: DeleteTag - deleteTag"))
log.Panic(fmt.Errorf("not implemented: DeleteTag - deleteTag"))
}
// AddTagsToJob is the resolver for the addTagsToJob field.
@@ -175,7 +176,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
for name, md := range data {
for scope, metric := range md {
if metric.Scope != schema.MetricScope(scope) {
panic("GRAPH/RESOLVERS > metric.Scope != schema.MetricScope(scope) : Should not happen!")
log.Panic("metric.Scope != schema.MetricScope(scope) : Should not happen!")
}
res = append(res, &model.JobMetricWithName{

View File

@@ -84,7 +84,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
switch scope {
case "node":
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows
// log.Info("METRICDATA/INFLUXV2 > Scope 'node' requested. ")
// log.Info("Scope 'node' requested. ")
query = fmt.Sprintf(`
from(bucket: "%s")
|> range(start: %s, stop: %s)
@@ -97,10 +97,10 @@ func (idb *InfluxDBv2DataRepository) LoadData(
idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))),
measurementsCond, hostsCond)
case "socket":
log.Info("METRICDATA/INFLUXV2 > Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
log.Info("Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
continue
case "core":
log.Info("METRICDATA/INFLUXV2 > Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
log.Info(" Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
continue
// Get Finest Granularity only, Set NULL to 0.0
// query = fmt.Sprintf(`
@@ -114,7 +114,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
// measurementsCond, hostsCond)
default:
log.Info("METRICDATA/INFLUXV2 > Unknown Scope requested: Will return 'node' scope. ")
log.Info("Unknown Scope requested: Will return 'node' scope. ")
continue
// return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support other scopes than 'node'")
}
@@ -319,7 +319,7 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData(
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
// TODO : Implement to be used in Analysis- und System/Node-View
log.Infof("METRICDATA/INFLUXV2 > LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)
log.Infof("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
}

View File

@@ -107,7 +107,7 @@ func LoadData(job *schema.Job,
jd, err = repo.LoadData(job, metrics, scopes, ctx)
if err != nil {
if len(jd) != 0 {
log.Errorf("METRICDATA/METRICDATA > partial error: %s", err.Error())
log.Errorf("partial error: %s", err.Error())
} else {
return err, 0, 0
}
@@ -229,7 +229,7 @@ func LoadNodeData(
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
if len(data) != 0 {
log.Errorf("METRICDATA/METRICDATA > partial error: %s", err.Error())
log.Errorf("partial error: %s", err.Error())
} else {
return nil, err
}

View File

@@ -184,9 +184,9 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
for metric, templ := range config.Templates {
pdb.templates[metric], err = template.New(metric).Parse(templ)
if err == nil {
log.Debugf("METRICDATA/PROMETHEUS > Added PromQL template for %s: %s", metric, templ)
log.Debugf("Added PromQL template for %s: %s", metric, templ)
} else {
log.Errorf("METRICDATA/PROMETHEUS > Failed to parse PromQL template %s for metric %s", templ, metric)
log.Errorf("Failed to parse PromQL template %s for metric %s", templ, metric)
}
}
return nil
@@ -216,7 +216,7 @@ func (pdb *PrometheusDataRepository) FormatQuery(
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ))
} else {
query := buf.String()
log.Debugf("METRICDATA/PROMETHEUS > PromQL: %s", query)
log.Debugf("PromQL: %s", query)
return query, nil
}
} else {
@@ -283,14 +283,14 @@ func (pdb *PrometheusDataRepository) LoadData(
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func(){log.Infof("METRICDATA/PROMETHEUS > Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)})
logOnce.Do(func(){log.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)})
continue
}
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(job.Cluster, metric)
if metricConfig == nil {
log.Errorf("METRICDATA/PROMETHEUS > Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
log.Errorf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus query error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
@@ -307,7 +307,7 @@ func (pdb *PrometheusDataRepository) LoadData(
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
log.Errorf("METRICDATA/PROMETHEUS > Prometheus query error in LoadData: %v\nQuery: %s", err, query)
log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus query error")
}
if len(warnings) > 0 {
@@ -389,13 +389,13 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
}
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func(){log.Infof("METRICDATA/PROMETHEUS > Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)})
logOnce.Do(func(){log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)})
continue
}
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(cluster, metric)
if metricConfig == nil {
log.Errorf("METRICDATA/PROMETHEUS > Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
log.Errorf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus querry error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
@@ -412,11 +412,11 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
log.Errorf("METRICDATA/PROMETHEUS > Prometheus query error in LoadNodeData: %v\n", err)
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus querry error")
}
if len(warnings) > 0 {
log.Warnf("METRICDATA/PROMETHEUS > Warnings: %v\n", warnings)
log.Warnf("Warnings: %v\n", warnings)
}
step := int64(metricConfig.Timestep)
@@ -442,6 +442,6 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
}
}
t1 := time.Since(t0)
log.Debugf("METRICDATA/PROMETHEUS > LoadNodeData of %v nodes took %s", len(data), t1)
log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
return data, nil
}

View File

@@ -39,14 +39,14 @@ func Connect(driver string, db string) {
} else if driver == "mysql" {
dbHandle, err = sqlx.Open("mysql", fmt.Sprintf("%s?multiStatements=true", db))
if err != nil {
log.Fatalf("REPOSITORY/DBCONNECTION > sqlx.Open() error: %v", err)
log.Fatalf("sqlx.Open() error: %v", err)
}
dbHandle.SetConnMaxLifetime(time.Minute * 3)
dbHandle.SetMaxOpenConns(10)
dbHandle.SetMaxIdleConns(10)
} else {
log.Fatalf("REPOSITORY/DBCONNECTION > unsupported database driver: %s", driver)
log.Fatalf("unsupported database driver: %s", driver)
}
dbConnInstance = &DBConnection{DB: dbHandle}
@@ -55,7 +55,7 @@ func Connect(driver string, db string) {
func GetConnection() *DBConnection {
if dbConnInstance == nil {
log.Fatalf("REPOSITORY/DBCONNECTION > Database connection not initialized!")
log.Fatalf("Database connection not initialized!")
}
return dbConnInstance

View File

@@ -186,7 +186,7 @@ func HandleImportFlag(flag string) error {
}
}
log.Infof("REPOSITORY/INIT > successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
}
return nil
}
@@ -260,34 +260,34 @@ func InitDB() error {
job.RawResources, err = json.Marshal(job.Resources)
if err != nil {
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil {
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
if err := SanityChecks(&job.BaseJob); err != nil {
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
res, err := stmt.Exec(job)
if err != nil {
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
id, err := res.LastInsertId()
if err != nil {
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
@@ -318,7 +318,7 @@ func InitDB() error {
}
if errorOccured > 0 {
log.Errorf("REPOSITORY/INIT > Error in import of %d jobs!", errorOccured)
log.Errorf("Error in import of %d jobs!", errorOccured)
}
if err := tx.Commit(); err != nil {

View File

@@ -259,9 +259,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
err := r.DB.Get(&cnt, qs) //ignore error as it will also occur in delete statement
_, err = r.DB.Exec(`DELETE FROM job WHERE job.start_time < ?`, startTime)
if err != nil {
log.Warnf("REPOSITORY/JOB > DeleteJobsBefore(%d): error %v", startTime, err)
log.Warnf(" DeleteJobsBefore(%d): error %v", startTime, err)
} else {
log.Infof("REPOSITORY/JOB > DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
log.Infof("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
}
return cnt, err
}
@@ -269,9 +269,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
func (r *JobRepository) DeleteJobById(id int64) error {
_, err := r.DB.Exec(`DELETE FROM job WHERE job.id = ?`, id)
if err != nil {
log.Warnf("REPOSITORY/JOB > DeleteJobById(%d): error %v", id, err)
log.Warnf("DeleteJobById(%d): error %v", id, err)
} else {
log.Infof("REPOSITORY/JOB > DeleteJobById(%d): Success", id)
log.Infof("DeleteJobById(%d): Success", id)
}
return err
}
@@ -376,7 +376,7 @@ func (r *JobRepository) archivingWorker(){
// not using meta data, called to load JobMeta into Cache?
// will fail if job meta not in repository
if _, err := r.FetchMetadata(job); err != nil {
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
continue
}
@@ -385,18 +385,18 @@ func (r *JobRepository) archivingWorker(){
// TODO: Maybe use context with cancel/timeout here
jobMeta, err := metricdata.ArchiveJob(job, context.Background())
if err != nil {
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
continue
}
// Update the jobs database entry one last time:
if err := r.MarkArchived(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil {
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
continue
}
log.Printf("REPOSITORY/JOB > archiving job (dbid: %d) successful", job.ID)
log.Printf("archiving job (dbid: %d) successful", job.ID)
r.archivePending.Done()
}
}
@@ -523,7 +523,7 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
}
if rowsAffected > 0 {
log.Warnf("REPOSITORY/JOB > %d jobs have been marked as failed due to running too long", rowsAffected)
log.Warnf("%d jobs have been marked as failed due to running too long", rowsAffected)
}
return nil
}

View File

@@ -54,7 +54,7 @@ func (r *JobRepository) QueryJobs(
return nil, err
}
log.Debugf("REPOSITORY/QUERY > SQL query: `%s`, args: %#v", sql, args)
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
return nil, err
@@ -209,7 +209,7 @@ var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")
func toSnakeCase(str string) string {
for _, c := range str {
if c == '\'' || c == '\\' {
panic("REPOSITORY/QUERY > toSnakeCase() attack vector!")
log.Panic("toSnakeCase() attack vector!")
}
}

View File

@@ -42,12 +42,12 @@ func GetUserCfgRepo() *UserCfgRepo {
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`)
if err != nil {
log.Fatalf("REPOSITORY/USER > db.DB.exec() error: %v", err)
log.Fatalf("db.DB.exec() error: %v", err)
}
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
if err != nil {
log.Fatalf("REPOSITORY/USER > db.DB.Preparex() error: %v", err)
log.Fatalf("db.DB.Preparex() error: %v", err)
}
userCfgRepoInstance = &UserCfgRepo{

View File

@@ -61,12 +61,12 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
State: []schema.JobState{schema.JobStateRunning},
}}, nil, nil)
if err != nil {
log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error())
log.Errorf("failed to count jobs: %s", err.Error())
runningJobs = map[string]int{}
}
totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil, nil)
if err != nil {
log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error())
log.Errorf("failed to count jobs: %s", err.Error())
totalJobs = map[string]int{}
}
from := time.Now().Add(-24 * time.Hour)
@@ -75,7 +75,7 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
Duration: &schema.IntRange{From: 0, To: graph.ShortJobDuration},
}}, nil, nil)
if err != nil {
log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error())
log.Errorf("failed to count jobs: %s", err.Error())
recentShortJobs = map[string]int{}
}
@@ -150,7 +150,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
tags, counts, err := jobRepo.CountTags(username)
tagMap := make(map[string][]map[string]interface{})
if err != nil {
log.Errorf("ROUTERCONFIG/ROUTES > GetTags failed: %s", err.Error())
log.Errorf("GetTags failed: %s", err.Error())
i["tagmap"] = tagMap
return i
}

View File

@@ -65,7 +65,7 @@ func LoadEnv(file string) error {
case '"':
sb.WriteRune('"')
default:
return fmt.Errorf("RUNTIME/SETUP > unsupprorted escape sequence in quoted string: backslash %#v", runes[i])
return fmt.Errorf("RUNTIME/SETUP > unsupported escape sequence in quoted string: backslash %#v", runes[i])
}
continue
}