Rework pkg/log, add 'loglevel' and 'logdate' flags, streamline

- removes some previously added manual location strings: now handled by pkg/log depending on loglevel
- kept manual string locations on fmt print functions
- add 'notice' and 'critical' loglevels
- add 'Panic' and 'Panicf' functions to log panics
- adresses issue #26
This commit is contained in:
Christoph Kluge
2023-01-23 18:48:06 +01:00
parent 25286ff068
commit 79a949b55e
22 changed files with 368 additions and 209 deletions

View File

@@ -259,9 +259,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
err := r.DB.Get(&cnt, qs) //ignore error as it will also occur in delete statement
_, err = r.DB.Exec(`DELETE FROM job WHERE job.start_time < ?`, startTime)
if err != nil {
log.Warnf("REPOSITORY/JOB > DeleteJobsBefore(%d): error %v", startTime, err)
log.Warnf(" DeleteJobsBefore(%d): error %v", startTime, err)
} else {
log.Infof("REPOSITORY/JOB > DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
log.Infof("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
}
return cnt, err
}
@@ -269,9 +269,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
func (r *JobRepository) DeleteJobById(id int64) error {
_, err := r.DB.Exec(`DELETE FROM job WHERE job.id = ?`, id)
if err != nil {
log.Warnf("REPOSITORY/JOB > DeleteJobById(%d): error %v", id, err)
log.Warnf("DeleteJobById(%d): error %v", id, err)
} else {
log.Infof("REPOSITORY/JOB > DeleteJobById(%d): Success", id)
log.Infof("DeleteJobById(%d): Success", id)
}
return err
}
@@ -376,7 +376,7 @@ func (r *JobRepository) archivingWorker(){
// not using meta data, called to load JobMeta into Cache?
// will fail if job meta not in repository
if _, err := r.FetchMetadata(job); err != nil {
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
continue
}
@@ -385,18 +385,18 @@ func (r *JobRepository) archivingWorker(){
// TODO: Maybe use context with cancel/timeout here
jobMeta, err := metricdata.ArchiveJob(job, context.Background())
if err != nil {
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
continue
}
// Update the jobs database entry one last time:
if err := r.MarkArchived(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil {
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
continue
}
log.Printf("REPOSITORY/JOB > archiving job (dbid: %d) successful", job.ID)
log.Printf("archiving job (dbid: %d) successful", job.ID)
r.archivePending.Done()
}
}
@@ -523,7 +523,7 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
}
if rowsAffected > 0 {
log.Warnf("REPOSITORY/JOB > %d jobs have been marked as failed due to running too long", rowsAffected)
log.Warnf("%d jobs have been marked as failed due to running too long", rowsAffected)
}
return nil
}