add more information to existing errors logs and panics

- '$ROOT/$FILE' for better localization in the code
- add text where none was given
- fix unnecessary sprintf nesting in influxv2 and prometheus metricrepo logging
This commit is contained in:
Christoph Kluge
2023-01-19 16:59:14 +01:00
parent 5abd3641b2
commit 24a4244f19
31 changed files with 254 additions and 253 deletions

View File

@@ -46,7 +46,7 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend loadJobMeta()- %v", err)
log.Errorf("ARCHIVE/FSBACKEND > loadJobMeta() > open file error: %v", err)
return &schema.JobMeta{}, err
}
defer f.Close()
@@ -58,19 +58,19 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) error {
var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Errorf("fsBackend Init()- %v", err)
log.Errorf("ARCHIVE/FSBACKEND > Init() > Unmarshal error: %v", err)
return err
}
if config.Path == "" {
err := fmt.Errorf("fsBackend Init()- empty path")
log.Errorf("fsBackend Init()- %v", err)
err := fmt.Errorf("ARCHIVE/FSBACKEND > Init() : empty config.Path")
log.Errorf("ARCHIVE/FSBACKEND > Init() > config.Path error: %v", err)
return err
}
fsa.path = config.Path
entries, err := os.ReadDir(fsa.path)
if err != nil {
log.Errorf("fsBackend Init()- %v", err)
log.Errorf("ARCHIVE/FSBACKEND > Init() > ReadDir() error: %v", err)
return err
}
@@ -86,7 +86,7 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
filename := getPath(job, fsa.path, "data.json")
f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend LoadJobData()- %v", err)
log.Errorf("ARCHIVE/FSBACKEND > LoadJobData() > open file error: %v", err)
return nil, err
}
defer f.Close()
@@ -104,12 +104,12 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil {
log.Errorf("fsBackend LoadClusterCfg()- %v", err)
log.Errorf("ARCHIVE/FSBACKEND > LoadClusterCfg() > open file error: %v", err)
return &schema.Cluster{}, err
}
if config.Keys.Validate {
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
return &schema.Cluster{}, fmt.Errorf("Validate cluster config: %v\n", err)
return &schema.Cluster{}, fmt.Errorf("ARCHIVE/FSBACKEND > Validate cluster config: %v\n", err)
}
}
return DecodeCluster(bytes.NewReader(b))
@@ -121,13 +121,13 @@ func (fsa *FsArchive) Iter() <-chan *schema.JobMeta {
go func() {
clustersDir, err := os.ReadDir(fsa.path)
if err != nil {
log.Fatalf("Reading clusters failed: %s", err.Error())
log.Fatalf("ARCHIVE/FSBACKEND > Reading clusters failed @ cluster dirs: %s", err.Error())
}
for _, clusterDir := range clustersDir {
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error())
log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
@@ -138,21 +138,21 @@ func (fsa *FsArchive) Iter() <-chan *schema.JobMeta {
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error())
log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error())
log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
if err != nil {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
log.Errorf("ARCHIVE/FSBACKEND > error in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
} else {
ch <- job
}