Merge branch 'master' into dev

This commit is contained in:
Jan Eitzinger 2024-11-09 09:01:04 +01:00
commit ee3710c5ed
4 changed files with 19 additions and 11 deletions

View File

@ -486,11 +486,11 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request)
job, err = api.JobRepository.FindById(r.Context(), id) // Get Job from Repo by ID job, err = api.JobRepository.FindById(r.Context(), id) // Get Job from Repo by ID
} else { } else {
handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw) handleError(fmt.Errorf("the parameter 'id' is required"), http.StatusBadRequest, rw)
return return
} }
if err != nil { if err != nil {
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("finding job with db id %s failed: %w", id, err), http.StatusUnprocessableEntity, rw)
return return
} }
@ -526,7 +526,7 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request)
if r.URL.Query().Get("all-metrics") == "true" { if r.URL.Query().Get("all-metrics") == "true" {
data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution) data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution)
if err != nil { if err != nil {
log.Warn("Error while loading job data") log.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
return return
} }
} }
@ -583,7 +583,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
return return
} }
if err != nil { if err != nil {
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("finding job with db id %s failed: %w", id, err), http.StatusUnprocessableEntity, rw)
return return
} }
@ -622,7 +622,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution) data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution)
if err != nil { if err != nil {
log.Warn("Error while loading job data") log.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
return return
} }
@ -916,6 +916,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
return return
} }
// log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req)
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
if err != nil { if err != nil {
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
@ -1065,12 +1066,12 @@ func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Job, req StopJobApiRequest) { func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Job, req StopJobApiRequest) {
// Sanity checks // Sanity checks
if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning { if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning {
handleError(errors.New("stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw) handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger than startTime %d and only running jobs can be stopped (state is: %s)", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix(), job.State), http.StatusBadRequest, rw)
return return
} }
if req.State != "" && !req.State.Valid() { if req.State != "" && !req.State.Valid() {
handleError(fmt.Errorf("invalid job state: %#v", req.State), http.StatusBadRequest, rw) handleError(fmt.Errorf("jobId %d (id %d) on %s : invalid requested job state: %#v", job.JobID, job.ID, job.Cluster, req.State), http.StatusBadRequest, rw)
return return
} else if req.State == "" { } else if req.State == "" {
req.State = schema.JobStateCompleted req.State = schema.JobStateCompleted
@ -1080,11 +1081,11 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
job.Duration = int32(req.StopTime - job.StartTime.Unix()) job.Duration = int32(req.StopTime - job.StartTime.Unix())
job.State = req.State job.State = req.State
if err := api.JobRepository.Stop(job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil { if err := api.JobRepository.Stop(job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil {
handleError(fmt.Errorf("marking job as stopped failed: %w", err), http.StatusInternalServerError, rw) handleError(fmt.Errorf("jobId %d (id %d) on %s : marking job as '%s' (duration: %d) in DB failed: %w", job.JobID, job.ID, job.Cluster, job.State, job.Duration, err), http.StatusInternalServerError, rw)
return return
} }
log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime) log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State)
// Send a response (with status OK). This means that erros that happen from here on forward // Send a response (with status OK). This means that erros that happen from here on forward
// can *NOT* be communicated to the client. If reading from a MetricDataRepository or // can *NOT* be communicated to the client. If reading from a MetricDataRepository or

View File

@ -37,8 +37,9 @@ func (r *JobRepository) Find(
q = q.Where("job.start_time = ?", *startTime) q = q.Where("job.start_time = ?", *startTime)
} }
log.Debugf("Timer Find %s", time.Since(start)) q = q.OrderBy("job.id DESC") // always use newest matching job by db id if more than one match
log.Debugf("Timer Find %s", time.Since(start))
return scanJob(q.RunWith(r.stmtCache).QueryRow()) return scanJob(q.RunWith(r.stmtCache).QueryRow())
} }

View File

@ -339,9 +339,10 @@ const ContextUserKey ContextKey = "user"
func GetUserFromContext(ctx context.Context) *schema.User { func GetUserFromContext(ctx context.Context) *schema.User {
x := ctx.Value(ContextUserKey) x := ctx.Value(ContextUserKey)
if x == nil { if x == nil {
log.Warnf("no user retrieved from context")
return nil return nil
} }
// log.Infof("user retrieved from context: %v", x.(*schema.User))
return x.(*schema.User) return x.(*schema.User)
} }

View File

@ -52,15 +52,19 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
jobRepo := repository.GetJobRepository() jobRepo := repository.GetJobRepository()
groupBy := model.AggregateCluster groupBy := model.AggregateCluster
// startJobCount := time.Now()
stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy) stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy)
if err != nil { if err != nil {
log.Warnf("failed to count jobs: %s", err.Error()) log.Warnf("failed to count jobs: %s", err.Error())
} }
// log.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount))
// startRunningJobCount := time.Now()
stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running") stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running")
if err != nil { if err != nil {
log.Warnf("failed to count running jobs: %s", err.Error()) log.Warnf("failed to count running jobs: %s", err.Error())
} }
// log.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount))
i["clusters"] = stats i["clusters"] = stats
@ -293,6 +297,7 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) {
// Get User -> What if NIL? // Get User -> What if NIL?
user := repository.GetUserFromContext(r.Context()) user := repository.GetUserFromContext(r.Context())
// Get Roles // Get Roles
availableRoles, _ := schema.GetValidRolesMap(user) availableRoles, _ := schema.GetValidRolesMap(user)