Adapt loglevel for logs, shorten strings, fix formats, streamline

- Switched to Warn for most errors, reduces bloat, improves log control
This commit is contained in:
Christoph Kluge
2023-02-01 11:58:27 +01:00
parent b77bd078e5
commit a885e69125
26 changed files with 193 additions and 186 deletions

View File

@@ -100,7 +100,7 @@ func HandleImportFlag(flag string) error {
raw, err := os.ReadFile(files[0])
if err != nil {
log.Error("Error while reading metadata file for import")
log.Warn("Error while reading metadata file for import")
return err
}
@@ -113,13 +113,13 @@ func HandleImportFlag(flag string) error {
dec.DisallowUnknownFields()
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
if err := dec.Decode(&jobMeta); err != nil {
log.Error("Error while decoding raw json metadata for import")
log.Warn("Error while decoding raw json metadata for import")
return err
}
raw, err = os.ReadFile(files[1])
if err != nil {
log.Error("Error while reading jobdata file for import")
log.Warn("Error while reading jobdata file for import")
return err
}
@@ -132,7 +132,7 @@ func HandleImportFlag(flag string) error {
dec.DisallowUnknownFields()
jobData := schema.JobData{}
if err := dec.Decode(&jobData); err != nil {
log.Error("Error while decoding raw json jobdata for import")
log.Warn("Error while decoding raw json jobdata for import")
return err
}
@@ -140,7 +140,7 @@ func HandleImportFlag(flag string) error {
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
if err != nil {
log.Error("Error while finding job in jobRepository")
log.Warn("Error while finding job in jobRepository")
return err
}
@@ -160,17 +160,17 @@ func HandleImportFlag(flag string) error {
job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
job.RawResources, err = json.Marshal(job.Resources)
if err != nil {
log.Error("Error while marshaling job resources")
log.Warn("Error while marshaling job resources")
return err
}
job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil {
log.Error("Error while marshaling job metadata")
log.Warn("Error while marshaling job metadata")
return err
}
if err := SanityChecks(&job.BaseJob); err != nil {
log.Error("BaseJob SanityChecks failed")
log.Warn("BaseJob SanityChecks failed")
return err
}
@@ -181,13 +181,13 @@ func HandleImportFlag(flag string) error {
res, err := GetConnection().DB.NamedExec(NamedJobInsert, job)
if err != nil {
log.Error("Error while NamedJobInsert")
log.Warn("Error while NamedJobInsert")
return err
}
id, err := res.LastInsertId()
if err != nil {
log.Error("Error while getting last insert ID")
log.Warn("Error while getting last insert ID")
return err
}
@@ -221,13 +221,13 @@ func InitDB() error {
// that speeds up inserts A LOT.
tx, err := db.DB.Beginx()
if err != nil {
log.Error("Error while bundling transactions")
log.Warn("Error while bundling transactions")
return err
}
stmt, err := tx.PrepareNamed(NamedJobInsert)
if err != nil {
log.Error("Error while preparing namedJobInsert")
log.Warn("Error while preparing namedJobInsert")
return err
}
tags := make(map[string]int64)
@@ -247,14 +247,14 @@ func InitDB() error {
if i%10 == 0 {
if tx != nil {
if err := tx.Commit(); err != nil {
log.Error("Error while committing transactions for jobMeta")
log.Warn("Error while committing transactions for jobMeta")
return err
}
}
tx, err = db.DB.Beginx()
if err != nil {
log.Error("Error while bundling transactions for jobMeta")
log.Warn("Error while bundling transactions for jobMeta")
return err
}
@@ -315,19 +315,19 @@ func InitDB() error {
if !ok {
res, err := tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type)
if err != nil {
log.Errorf("Error while inserting tag into tag table: %#v %#v", tag.Name, tag.Type)
log.Errorf("Error while inserting tag into tag table: %v (Type %v)", tag.Name, tag.Type)
return err
}
tagId, err = res.LastInsertId()
if err != nil {
log.Error("Error while getting last insert ID")
log.Warn("Error while getting last insert ID")
return err
}
tags[tagstr] = tagId
}
if _, err := tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, id, tagId); err != nil {
log.Errorf("Error while inserting jobtag into jobtag table: %#v %#v", id, tagId)
log.Errorf("Error while inserting jobtag into jobtag table: %v (TagID %v)", id, tagId)
return err
}
}
@@ -338,18 +338,18 @@ func InitDB() error {
}
if errorOccured > 0 {
log.Errorf("Error in import of %d jobs!", errorOccured)
log.Warnf("Error in import of %d jobs!", errorOccured)
}
if err := tx.Commit(); err != nil {
log.Error("Error while committing SQL transactions")
log.Warn("Error while committing SQL transactions")
return err
}
// Create indexes after inserts so that they do not
// need to be continually updated.
if _, err := db.DB.Exec(JobsDbIndexes); err != nil {
log.Error("Error while creating indices after inserts")
log.Warn("Error while creating indices after inserts")
return err
}
@@ -360,14 +360,14 @@ func InitDB() error {
// This function also sets the subcluster if necessary!
func SanityChecks(job *schema.BaseJob) error {
if c := archive.GetCluster(job.Cluster); c == nil {
return fmt.Errorf("no such cluster: %#v", job.Cluster)
return fmt.Errorf("no such cluster: %v", job.Cluster)
}
if err := archive.AssignSubCluster(job); err != nil {
log.Error("Error while assigning subcluster to job")
log.Warn("Error while assigning subcluster to job")
return err
}
if !job.State.Valid() {
return fmt.Errorf("not a valid job state: %#v", job.State)
return fmt.Errorf("not a valid job state: %v", job.State)
}
if len(job.Resources) == 0 || len(job.User) == 0 {
return fmt.Errorf("'resources' and 'user' should not be empty")

View File

@@ -68,12 +68,12 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
&job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId,
&job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
&job.Duration, &job.Walltime, &job.RawResources /*&job.MetaData*/); err != nil {
log.Error("Error while scanning rows")
log.Warn("Error while scanning rows")
return nil, err
}
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
log.Error("Error while unmarhsaling raw resources json")
log.Warn("Error while unmarhsaling raw resources json")
return nil, err
}
@@ -95,7 +95,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
log.Error("Error while scanning for job metadata")
log.Warn("Error while scanning for job metadata")
return nil, err
}
@@ -104,7 +104,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
}
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
log.Error("Error while unmarshaling raw metadata json")
log.Warn("Error while unmarshaling raw metadata json")
return nil, err
}
@@ -117,7 +117,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
r.cache.Del(cachekey)
if job.MetaData == nil {
if _, err = r.FetchMetadata(job); err != nil {
log.Errorf("Error while fetching metadata for job, DB ID '%#v'", job.ID)
log.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
return err
}
}
@@ -134,12 +134,12 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
}
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
log.Errorf("Error while marshaling metadata for job, DB ID '%#v'", job.ID)
log.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
return err
}
if _, err = sq.Update("job").Set("meta_data", job.RawMetaData).Where("job.id = ?", job.ID).RunWith(r.stmtCache).Exec(); err != nil {
log.Errorf("Error while updating metadata for job, DB ID '%#v'", job.ID)
log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
return err
}
@@ -200,7 +200,7 @@ func (r *JobRepository) FindAll(
for rows.Next() {
job, err := scanJob(rows)
if err != nil {
log.Error("Error while scanning rows")
log.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
@@ -302,7 +302,7 @@ func (r *JobRepository) CountGroupedJobs(ctx context.Context, aggreg model.Aggre
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
runner = r.DB
default:
log.Notef("CountGroupedJobs() Weight %#v unknown.", *weight)
log.Notef("CountGroupedJobs() Weight %v unknown.", *weight)
}
}
@@ -326,7 +326,7 @@ func (r *JobRepository) CountGroupedJobs(ctx context.Context, aggreg model.Aggre
var group string
var count int
if err := rows.Scan(&group, &count); err != nil {
log.Error("Error while scanning rows")
log.Warn("Error while scanning rows")
return nil, err
}
@@ -370,12 +370,12 @@ func (r *JobRepository) MarkArchived(
case "file_bw":
stmt = stmt.Set("file_bw_avg", stats.Avg)
default:
log.Notef("MarkArchived() Metric '%#v' unknown", metric)
log.Notef("MarkArchived() Metric '%v' unknown", metric)
}
}
if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil {
log.Error("Error while marking job as archived")
log.Warn("Error while marking job as archived")
return err
}
return nil
@@ -501,11 +501,11 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
var resources []*schema.Resource
var subcluster string
if err := rows.Scan(&raw, &subcluster); err != nil {
log.Error("Error while scanning rows")
log.Warn("Error while scanning rows")
return nil, err
}
if err := json.Unmarshal(raw, &resources); err != nil {
log.Error("Error while unmarshaling raw resources json")
log.Warn("Error while unmarshaling raw resources json")
return nil, err
}
@@ -533,13 +533,13 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
RunWith(r.DB).Exec()
if err != nil {
log.Error("Error while stopping jobs exceeding walltime")
log.Warn("Error while stopping jobs exceeding walltime")
return err
}
rowsAffected, err := res.RowsAffected()
if err != nil {
log.Error("Error while fetching affected rows after stopping due to exceeded walltime")
log.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
return err
}

View File

@@ -51,7 +51,7 @@ func (r *JobRepository) QueryJobs(
sql, args, err := query.ToSql()
if err != nil {
log.Error("Error while converting query to sql")
log.Warn("Error while converting query to sql")
return nil, err
}
@@ -67,7 +67,7 @@ func (r *JobRepository) QueryJobs(
job, err := scanJob(rows)
if err != nil {
rows.Close()
log.Error("Error while scanning rows")
log.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)

View File

@@ -20,13 +20,13 @@ func (r *JobRepository) AddTag(job int64, tag int64) ([]*schema.Tag, error) {
j, err := r.FindById(job)
if err != nil {
log.Error("Error while finding job by id")
log.Warn("Error while finding job by id")
return nil, err
}
tags, err := r.GetTags(&job)
if err != nil {
log.Error("Error while getting tags for job")
log.Warn("Error while getting tags for job")
return nil, err
}
@@ -42,13 +42,13 @@ func (r *JobRepository) RemoveTag(job, tag int64) ([]*schema.Tag, error) {
j, err := r.FindById(job)
if err != nil {
log.Error("Error while finding job by id")
log.Warn("Error while finding job by id")
return nil, err
}
tags, err := r.GetTags(&job)
if err != nil {
log.Error("Error while getting tags for job")
log.Warn("Error while getting tags for job")
return nil, err
}
@@ -153,7 +153,7 @@ func (r *JobRepository) GetTags(job *int64) ([]*schema.Tag, error) {
for rows.Next() {
tag := &schema.Tag{}
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name); err != nil {
log.Error("Error while scanning rows")
log.Warn("Error while scanning rows")
return nil, err
}
tags = append(tags, tag)

View File

@@ -82,7 +82,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *auth.User) (map[string]interface{}, e
rows, err := uCfg.Lookup.Query(user.Username)
if err != nil {
log.Errorf("Error while looking up user config for user '%#v'", user.Username)
log.Warnf("Error while looking up user config for user '%v'", user.Username)
return err, 0, 0
}
@@ -91,13 +91,13 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *auth.User) (map[string]interface{}, e
for rows.Next() {
var key, rawval string
if err := rows.Scan(&key, &rawval); err != nil {
log.Error("Error while scanning user config values")
log.Warn("Error while scanning user config values")
return err, 0, 0
}
var val interface{}
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
log.Error("Error while unmarshaling raw user config json")
log.Warn("Error while unmarshaling raw user config json")
return err, 0, 0
}
@@ -109,7 +109,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *auth.User) (map[string]interface{}, e
return config, 24 * time.Hour, size
})
if err, ok := data.(error); ok {
log.Error("Error in data set")
log.Error("Error in returned dataset")
return nil, err
}
@@ -126,7 +126,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
if user == nil {
var val interface{}
if err := json.Unmarshal([]byte(value), &val); err != nil {
log.Error("Error while unmarshaling raw user config json")
log.Warn("Error while unmarshaling raw user config json")
return err
}
@@ -138,7 +138,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`,
user, key, value); err != nil {
log.Errorf("Error while replacing user config in DB for user '$#v'", user)
log.Warnf("Error while replacing user config in DB for user '$#v'", user)
return err
}