Merge pull request #156 from giesselmann/log_levels

lower log level for frequent messages
This commit is contained in:
Jan Eitzinger 2023-06-23 10:33:44 +02:00 committed by GitHub
commit 2f6c16a7d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 28 additions and 28 deletions

View File

@ -341,7 +341,7 @@ func main() {
handlers.AllowedOrigins([]string{"*"}))) handlers.AllowedOrigins([]string{"*"})))
handler := handlers.CustomLoggingHandler(io.Discard, r, func(_ io.Writer, params handlers.LogFormatterParams) { handler := handlers.CustomLoggingHandler(io.Discard, r, func(_ io.Writer, params handlers.LogFormatterParams) {
if strings.HasPrefix(params.Request.RequestURI, "/api/") { if strings.HasPrefix(params.Request.RequestURI, "/api/") {
log.Infof("%s %s (%d, %.02fkb, %dms)", log.Debugf("%s %s (%d, %.02fkb, %dms)",
params.Request.Method, params.URL.RequestURI(), params.Request.Method, params.URL.RequestURI(),
params.StatusCode, float32(params.Size)/1024, params.StatusCode, float32(params.Size)/1024,
time.Since(params.TimeStamp).Milliseconds()) time.Since(params.TimeStamp).Milliseconds())

View File

@ -523,7 +523,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
} else if err == nil { } else if err == nil {
for _, job := range jobs { for _, job := range jobs {
if (req.StartTime - job.StartTimeUnix) < 86400 { if (req.StartTime - job.StartTimeUnix) < 86400 {
handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw) handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d, jobid: %d", job.ID, job.JobID), http.StatusUnprocessableEntity, rw)
return return
} }
} }

View File

@ -364,7 +364,7 @@ func (auth *Authentication) Login(
return return
} }
log.Warn("login failed: no authenticator applied") log.Debugf("login failed: no authenticator applied")
onfailure(rw, r, err) onfailure(rw, r, err)
}) })
} }
@ -380,7 +380,7 @@ func (auth *Authentication) Auth(
for _, authenticator := range auth.authenticators { for _, authenticator := range auth.authenticators {
user, err := authenticator.Auth(rw, r) user, err := authenticator.Auth(rw, r)
if err != nil { if err != nil {
log.Warnf("authentication failed: %s", err.Error()) log.Infof("authentication failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnauthorized) http.Error(rw, err.Error(), http.StatusUnauthorized)
return return
} }
@ -393,7 +393,7 @@ func (auth *Authentication) Auth(
return return
} }
log.Warnf("authentication failed: %s", "no authenticator applied") log.Debugf("authentication failed: %s", "no authenticator applied")
// http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) // http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
onfailure(rw, r, errors.New("unauthorized (login first or use a token)")) onfailure(rw, r, errors.New("unauthorized (login first or use a token)"))
}) })

View File

@ -16,13 +16,13 @@ type Hooks struct{}
// Before hook will print the query with it's args and return the context with the timestamp // Before hook will print the query with it's args and return the context with the timestamp
func (h *Hooks) Before(ctx context.Context, query string, args ...interface{}) (context.Context, error) { func (h *Hooks) Before(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
log.Infof("SQL query %s %q", query, args) log.Debugf("SQL query %s %q", query, args)
return context.WithValue(ctx, "begin", time.Now()), nil return context.WithValue(ctx, "begin", time.Now()), nil
} }
// After hook will get the timestamp registered on the Before hook and print the elapsed time // After hook will get the timestamp registered on the Before hook and print the elapsed time
func (h *Hooks) After(ctx context.Context, query string, args ...interface{}) (context.Context, error) { func (h *Hooks) After(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
begin := ctx.Value("begin").(time.Time) begin := ctx.Value("begin").(time.Time)
log.Infof("Took: %s\n", time.Since(begin)) log.Debugf("Took: %s\n", time.Since(begin))
return ctx, nil return ctx, nil
} }

View File

@ -55,7 +55,6 @@ func GetJobRepository() *JobRepository {
// start archiving worker // start archiving worker
go jobRepoInstance.archivingWorker() go jobRepoInstance.archivingWorker()
}) })
return jobRepoInstance return jobRepoInstance
} }
@ -178,7 +177,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
} }
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour) r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
log.Infof("Timer FetchMetadata %s", time.Since(start)) log.Debugf("Timer FetchMetadata %s", time.Since(start))
return job.MetaData, nil return job.MetaData, nil
} }
@ -238,7 +237,7 @@ func (r *JobRepository) Find(
q = q.Where("job.start_time = ?", *startTime) q = q.Where("job.start_time = ?", *startTime)
} }
log.Infof("Timer Find %s", time.Since(start)) log.Debugf("Timer Find %s", time.Since(start))
return scanJob(q.RunWith(r.stmtCache).QueryRow()) return scanJob(q.RunWith(r.stmtCache).QueryRow())
} }
@ -278,7 +277,7 @@ func (r *JobRepository) FindAll(
} }
jobs = append(jobs, job) jobs = append(jobs, job)
} }
log.Infof("Timer FindAll %s", time.Since(start)) log.Debugf("Timer FindAll %s", time.Since(start))
return jobs, nil return jobs, nil
} }
@ -344,7 +343,7 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
if err != nil { if err != nil {
log.Errorf(" DeleteJobsBefore(%d): error %#v", startTime, err) log.Errorf(" DeleteJobsBefore(%d): error %#v", startTime, err)
} else { } else {
log.Infof("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt) log.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
} }
return cnt, err return cnt, err
} }
@ -354,7 +353,7 @@ func (r *JobRepository) DeleteJobById(id int64) error {
if err != nil { if err != nil {
log.Errorf("DeleteJobById(%d): error %#v", id, err) log.Errorf("DeleteJobById(%d): error %#v", id, err)
} else { } else {
log.Infof("DeleteJobById(%d): Success", id) log.Debugf("DeleteJobById(%d): Success", id)
} }
return err return err
} }
@ -383,7 +382,7 @@ func (r *JobRepository) CountGroupedJobs(
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now) count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
runner = r.DB runner = r.DB
default: default:
log.Infof("CountGroupedJobs() Weight %v unknown.", *weight) log.Debugf("CountGroupedJobs() Weight %v unknown.", *weight)
} }
} }
@ -418,7 +417,7 @@ func (r *JobRepository) CountGroupedJobs(
counts[group] = count counts[group] = count
} }
log.Infof("Timer CountGroupedJobs %s", time.Since(start)) log.Debugf("Timer CountGroupedJobs %s", time.Since(start))
return counts, nil return counts, nil
} }
@ -457,7 +456,7 @@ func (r *JobRepository) MarkArchived(
case "file_bw": case "file_bw":
stmt = stmt.Set("file_bw_avg", stats.Avg) stmt = stmt.Set("file_bw_avg", stats.Avg)
default: default:
log.Infof("MarkArchived() Metric '%v' unknown", metric) log.Debugf("MarkArchived() Metric '%v' unknown", metric)
} }
} }
@ -476,6 +475,7 @@ func (r *JobRepository) archivingWorker() {
if !ok { if !ok {
break break
} }
start := time.Now()
// not using meta data, called to load JobMeta into Cache? // not using meta data, called to load JobMeta into Cache?
// will fail if job meta not in repository // will fail if job meta not in repository
if _, err := r.FetchMetadata(job); err != nil { if _, err := r.FetchMetadata(job); err != nil {
@ -498,7 +498,7 @@ func (r *JobRepository) archivingWorker() {
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
continue continue
} }
log.Debugf("archiving job %d took %s", job.JobID, time.Since(start))
log.Printf("archiving job (dbid: %d) successful", job.ID) log.Printf("archiving job (dbid: %d) successful", job.ID)
r.archivePending.Done() r.archivePending.Done()
} }
@ -634,7 +634,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Infof("Timer Partitions %s", time.Since(start)) log.Debugf("Timer Partitions %s", time.Since(start))
return partitions.([]string), nil return partitions.([]string), nil
} }
@ -679,7 +679,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
} }
} }
log.Infof("Timer AllocatedNodes %s", time.Since(start)) log.Debugf("Timer AllocatedNodes %s", time.Since(start))
return subclusters, nil return subclusters, nil
} }
@ -708,7 +708,7 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
if rowsAffected > 0 { if rowsAffected > 0 {
log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected) log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
} }
log.Infof("Timer StopJobsExceedingWalltimeBy %s", time.Since(start)) log.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
return nil return nil
} }

View File

@ -211,7 +211,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde
if len(user.Projects) != 0 { if len(user.Projects) != 0 {
return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.user": user.Username}}), nil return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.user": user.Username}}), nil
} else { } else {
log.Infof("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username) log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
return query.Where("job.user = ?", user.Username), nil return query.Where("job.user = ?", user.Username), nil
} }
} else if user.HasRole(auth.RoleUser) { // User : Only personal jobs } else if user.HasRole(auth.RoleUser) { // User : Only personal jobs

View File

@ -171,7 +171,7 @@ func (r *JobRepository) JobsStatsGrouped(
} }
} }
log.Infof("Timer JobsStatsGrouped %s", time.Since(start)) log.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
return stats, nil return stats, nil
} }
@ -212,7 +212,7 @@ func (r *JobRepository) JobsStats(
TotalAccHours: totalAccHours}) TotalAccHours: totalAccHours})
} }
log.Infof("Timer JobStats %s", time.Since(start)) log.Debugf("Timer JobStats %s", time.Since(start))
return stats, nil return stats, nil
} }
@ -251,7 +251,7 @@ func (r *JobRepository) JobCountGrouped(
} }
} }
log.Infof("Timer JobCountGrouped %s", time.Since(start)) log.Debugf("Timer JobCountGrouped %s", time.Since(start))
return stats, nil return stats, nil
} }
@ -300,7 +300,7 @@ func (r *JobRepository) AddJobCountGrouped(
} }
} }
log.Infof("Timer AddJobCountGrouped %s", time.Since(start)) log.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
return stats, nil return stats, nil
} }
@ -343,7 +343,7 @@ func (r *JobRepository) AddJobCount(
} }
} }
log.Infof("Timer JobJobCount %s", time.Since(start)) log.Debugf("Timer JobJobCount %s", time.Since(start))
return stats, nil return stats, nil
} }
@ -368,7 +368,7 @@ func (r *JobRepository) AddHistograms(
return nil, err return nil, err
} }
log.Infof("Timer AddHistograms %s", time.Since(start)) log.Debugf("Timer AddHistograms %s", time.Since(start))
return stat, nil return stat, nil
} }
@ -406,6 +406,6 @@ func (r *JobRepository) jobsStatisticsHistogram(
points = append(points, &point) points = append(points, &point)
} }
log.Infof("Timer jobsStatisticsHistogram %s", time.Since(start)) log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil return points, nil
} }