mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-07-23 21:01:40 +02:00
add more information to existing errors logs and panics
- '$ROOT/$FILE' for better localization in the code - add text where none was given - fix unnecessary sprintf nesting in influxv2 and prometheus metricrepo logging
This commit is contained in:
@@ -39,14 +39,14 @@ func Connect(driver string, db string) {
|
||||
} else if driver == "mysql" {
|
||||
dbHandle, err = sqlx.Open("mysql", fmt.Sprintf("%s?multiStatements=true", db))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatalf("REPOSITORY/DBCONNECTION > sqlx.Open() error: %v", err)
|
||||
}
|
||||
|
||||
dbHandle.SetConnMaxLifetime(time.Minute * 3)
|
||||
dbHandle.SetMaxOpenConns(10)
|
||||
dbHandle.SetMaxIdleConns(10)
|
||||
} else {
|
||||
log.Fatalf("unsupported database driver: %s", driver)
|
||||
log.Fatalf("REPOSITORY/DBCONNECTION > unsupported database driver: %s", driver)
|
||||
}
|
||||
|
||||
dbConnInstance = &DBConnection{DB: dbHandle}
|
||||
@@ -55,7 +55,7 @@ func Connect(driver string, db string) {
|
||||
|
||||
func GetConnection() *DBConnection {
|
||||
if dbConnInstance == nil {
|
||||
log.Fatalf("Database connection not initialized!")
|
||||
log.Fatalf("REPOSITORY/DBCONNECTION > Database connection not initialized!")
|
||||
}
|
||||
|
||||
return dbConnInstance
|
||||
|
@@ -95,7 +95,7 @@ func HandleImportFlag(flag string) error {
|
||||
for _, pair := range strings.Split(flag, ",") {
|
||||
files := strings.Split(pair, ":")
|
||||
if len(files) != 2 {
|
||||
return fmt.Errorf("invalid import flag format")
|
||||
return fmt.Errorf("REPOSITORY/INIT > invalid import flag format")
|
||||
}
|
||||
|
||||
raw, err := os.ReadFile(files[0])
|
||||
@@ -105,7 +105,7 @@ func HandleImportFlag(flag string) error {
|
||||
|
||||
if config.Keys.Validate {
|
||||
if err := schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil {
|
||||
return fmt.Errorf("validate job meta: %v", err)
|
||||
return fmt.Errorf("REPOSITORY/INIT > validate job meta: %v", err)
|
||||
}
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewReader(raw))
|
||||
@@ -122,7 +122,7 @@ func HandleImportFlag(flag string) error {
|
||||
|
||||
if config.Keys.Validate {
|
||||
if err := schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil {
|
||||
return fmt.Errorf("validate job data: %v", err)
|
||||
return fmt.Errorf("REPOSITORY/INIT > validate job data: %v", err)
|
||||
}
|
||||
}
|
||||
dec = json.NewDecoder(bytes.NewReader(raw))
|
||||
@@ -139,7 +139,7 @@ func HandleImportFlag(flag string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return fmt.Errorf("a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID)
|
||||
return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID)
|
||||
}
|
||||
|
||||
job := schema.Job{
|
||||
@@ -186,7 +186,7 @@ func HandleImportFlag(flag string) error {
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
|
||||
log.Infof("REPOSITORY/INIT > successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -260,34 +260,34 @@ func InitDB() error {
|
||||
|
||||
job.RawResources, err = json.Marshal(job.Resources)
|
||||
if err != nil {
|
||||
log.Errorf("repository initDB()- %v", err)
|
||||
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
|
||||
errorOccured++
|
||||
continue
|
||||
}
|
||||
|
||||
job.RawMetaData, err = json.Marshal(job.MetaData)
|
||||
if err != nil {
|
||||
log.Errorf("repository initDB()- %v", err)
|
||||
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
|
||||
errorOccured++
|
||||
continue
|
||||
}
|
||||
|
||||
if err := SanityChecks(&job.BaseJob); err != nil {
|
||||
log.Errorf("repository initDB()- %v", err)
|
||||
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
|
||||
errorOccured++
|
||||
continue
|
||||
}
|
||||
|
||||
res, err := stmt.Exec(job)
|
||||
if err != nil {
|
||||
log.Errorf("repository initDB()- %v", err)
|
||||
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
|
||||
errorOccured++
|
||||
continue
|
||||
}
|
||||
|
||||
id, err := res.LastInsertId()
|
||||
if err != nil {
|
||||
log.Errorf("repository initDB()- %v", err)
|
||||
log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err)
|
||||
errorOccured++
|
||||
continue
|
||||
}
|
||||
@@ -318,7 +318,7 @@ func InitDB() error {
|
||||
}
|
||||
|
||||
if errorOccured > 0 {
|
||||
log.Errorf("Error in import of %d jobs!", errorOccured)
|
||||
log.Errorf("REPOSITORY/INIT > Error in import of %d jobs!", errorOccured)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
|
@@ -214,12 +214,12 @@ func (r *JobRepository) FindById(jobId int64) (*schema.Job, error) {
|
||||
func (r *JobRepository) Start(job *schema.JobMeta) (id int64, err error) {
|
||||
job.RawResources, err = json.Marshal(job.Resources)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("encoding resources field failed: %w", err)
|
||||
return -1, fmt.Errorf("REPOSITORY/JOB > encoding resources field failed: %w", err)
|
||||
}
|
||||
|
||||
job.RawMetaData, err = json.Marshal(job.MetaData)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("encoding metaData field failed: %w", err)
|
||||
return -1, fmt.Errorf("REPOSITORY/JOB > encoding metaData field failed: %w", err)
|
||||
}
|
||||
|
||||
res, err := r.DB.NamedExec(`INSERT INTO job (
|
||||
@@ -259,9 +259,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
|
||||
err := r.DB.Get(&cnt, qs) //ignore error as it will also occur in delete statement
|
||||
_, err = r.DB.Exec(`DELETE FROM job WHERE job.start_time < ?`, startTime)
|
||||
if err != nil {
|
||||
log.Warnf(" DeleteJobsBefore(%d): error %v", startTime, err)
|
||||
log.Warnf("REPOSITORY/JOB > DeleteJobsBefore(%d): error %v", startTime, err)
|
||||
} else {
|
||||
log.Infof("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
||||
log.Infof("REPOSITORY/JOB > DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
||||
}
|
||||
return cnt, err
|
||||
}
|
||||
@@ -269,9 +269,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
|
||||
func (r *JobRepository) DeleteJobById(id int64) error {
|
||||
_, err := r.DB.Exec(`DELETE FROM job WHERE job.id = ?`, id)
|
||||
if err != nil {
|
||||
log.Warnf("DeleteJobById(%d): error %v", id, err)
|
||||
log.Warnf("REPOSITORY/JOB > DeleteJobById(%d): error %v", id, err)
|
||||
} else {
|
||||
log.Infof("DeleteJobById(%d): Success", id)
|
||||
log.Infof("REPOSITORY/JOB > DeleteJobById(%d): Success", id)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -376,7 +376,7 @@ func (r *JobRepository) archivingWorker(){
|
||||
// not using meta data, called to load JobMeta into Cache?
|
||||
// will fail if job meta not in repository
|
||||
if _, err := r.FetchMetadata(job); err != nil {
|
||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
|
||||
continue
|
||||
}
|
||||
@@ -385,18 +385,18 @@ func (r *JobRepository) archivingWorker(){
|
||||
// TODO: Maybe use context with cancel/timeout here
|
||||
jobMeta, err := metricdata.ArchiveJob(job, context.Background())
|
||||
if err != nil {
|
||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update the jobs database entry one last time:
|
||||
if err := r.MarkArchived(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil {
|
||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||
log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("archiving job (dbid: %d) successful", job.ID)
|
||||
log.Printf("REPOSITORY/JOB > archiving job (dbid: %d) successful", job.ID)
|
||||
r.archivePending.Done()
|
||||
}
|
||||
}
|
||||
@@ -523,7 +523,7 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
||||
}
|
||||
|
||||
if rowsAffected > 0 {
|
||||
log.Warnf("%d jobs have been marked as failed due to running too long", rowsAffected)
|
||||
log.Warnf("REPOSITORY/JOB > %d jobs have been marked as failed due to running too long", rowsAffected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -36,7 +36,7 @@ func (r *JobRepository) QueryJobs(
|
||||
} else if order.Order == model.SortDirectionEnumDesc {
|
||||
query = query.OrderBy(fmt.Sprintf("job.%s DESC", field))
|
||||
} else {
|
||||
return nil, errors.New("invalid sorting order")
|
||||
return nil, errors.New("REPOSITORY/QUERY > invalid sorting order")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func (r *JobRepository) QueryJobs(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
|
||||
log.Debugf("REPOSITORY/QUERY > SQL query: `%s`, args: %#v", sql, args)
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -209,7 +209,7 @@ var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")
|
||||
func toSnakeCase(str string) string {
|
||||
for _, c := range str {
|
||||
if c == '\'' || c == '\\' {
|
||||
panic("A hacker (probably not)!!!")
|
||||
panic("REPOSITORY/QUERY > toSnakeCase() attack vector!")
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -42,12 +42,12 @@ func GetUserCfgRepo() *UserCfgRepo {
|
||||
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatalf("REPOSITORY/USER > db.DB.exec() error: %v", err)
|
||||
}
|
||||
|
||||
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatalf("REPOSITORY/USER > db.DB.Preparex() error: %v", err)
|
||||
}
|
||||
|
||||
userCfgRepoInstance = &UserCfgRepo{
|
||||
|
Reference in New Issue
Block a user