Port to cc-lib. Extend legal header.

This commit is contained in:
2025-06-30 12:06:35 +02:00
parent 544fb35121
commit 639e1b9c6d
120 changed files with 1140 additions and 6410 deletions

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -10,9 +10,9 @@ import (
"maps"
"sync"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/lrucache"
"github.com/ClusterCockpit/cc-lib/schema"
)
const Version uint64 = 2
@@ -75,7 +75,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
}
if err = json.Unmarshal(rawConfig, &cfg); err != nil {
log.Warn("Error while unmarshaling raw config json")
cclog.Warn("Error while unmarshaling raw config json")
return
}
@@ -91,10 +91,10 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
var version uint64
version, err = ar.Init(rawConfig)
if err != nil {
log.Errorf("Error while initializing archiveBackend: %s", err.Error())
cclog.Errorf("Error while initializing archiveBackend: %s", err.Error())
return
}
log.Infof("Load archive version %d", version)
cclog.Infof("Load archive version %d", version)
err = initClusterConfig()
})
@@ -114,7 +114,7 @@ func LoadAveragesFromArchive(
) error {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err
}
@@ -137,7 +137,7 @@ func LoadStatsFromArchive(
data := make(map[string]schema.MetricStatistics, len(metrics))
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return data, err
}
@@ -166,7 +166,7 @@ func LoadScopedStatsFromArchive(
) (schema.ScopedJobStats, error) {
data, err := ar.LoadJobStats(job)
if err != nil {
log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())
cclog.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())
return nil, err
}
@@ -176,7 +176,7 @@ func LoadScopedStatsFromArchive(
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return nil, err
}
@@ -195,7 +195,7 @@ func UpdateMetadata(job *schema.Job, metadata map[string]string) error {
jobMeta, err := ar.LoadJobMeta(job)
if err != nil {
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err
}
@@ -216,7 +216,7 @@ func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
jobMeta, err := ar.LoadJobMeta(job)
if err != nil {
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive_test
@@ -10,9 +10,9 @@ import (
"path/filepath"
"testing"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/ClusterCockpit/cc-lib/util"
)
var jobs []*schema.Job

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -8,8 +8,8 @@ import (
"errors"
"fmt"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
var (
@@ -27,7 +27,7 @@ func initClusterConfig() error {
cluster, err := ar.LoadClusterCfg(c)
if err != nil {
log.Warnf("Error while loading cluster config for cluster '%v'", c)
cclog.Warnf("Error while loading cluster config for cluster '%v'", c)
return err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive_test

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -21,9 +21,9 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/ClusterCockpit/cc-lib/util"
"github.com/santhosh-tekuri/jsonschema/v5"
)
@@ -68,7 +68,7 @@ func getPath(
func loadJobMeta(filename string) (*schema.Job, error) {
b, err := os.ReadFile(filename)
if err != nil {
log.Errorf("loadJobMeta() > open file error: %v", err)
cclog.Errorf("loadJobMeta() > open file error: %v", err)
return nil, err
}
if config.Keys.Validate {
@@ -83,7 +83,7 @@ func loadJobMeta(filename string) (*schema.Job, error) {
func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend LoadJobData()- %v", err)
cclog.Errorf("fsBackend LoadJobData()- %v", err)
return nil, err
}
defer f.Close()
@@ -91,7 +91,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
if isCompressed {
r, err := gzip.NewReader(f)
if err != nil {
log.Errorf(" %v", err)
cclog.Errorf(" %v", err)
return nil, err
}
defer r.Close()
@@ -116,7 +116,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) {
f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend LoadJobStats()- %v", err)
cclog.Errorf("fsBackend LoadJobStats()- %v", err)
return nil, err
}
defer f.Close()
@@ -124,7 +124,7 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er
if isCompressed {
r, err := gzip.NewReader(f)
if err != nil {
log.Errorf(" %v", err)
cclog.Errorf(" %v", err)
return nil, err
}
defer r.Close()
@@ -149,25 +149,25 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Warnf("Init() > Unmarshal error: %#v", err)
cclog.Warnf("Init() > Unmarshal error: %#v", err)
return 0, err
}
if config.Path == "" {
err := fmt.Errorf("Init() : empty config.Path")
log.Errorf("Init() > config.Path error: %v", err)
cclog.Errorf("Init() > config.Path error: %v", err)
return 0, err
}
fsa.path = config.Path
b, err := os.ReadFile(filepath.Join(fsa.path, "version.txt"))
if err != nil {
log.Warnf("fsBackend Init() - %v", err)
cclog.Warnf("fsBackend Init() - %v", err)
return 0, err
}
version, err := strconv.ParseUint(strings.TrimSuffix(string(b), "\n"), 10, 64)
if err != nil {
log.Errorf("fsBackend Init()- %v", err)
cclog.Errorf("fsBackend Init()- %v", err)
return 0, err
}
@@ -177,7 +177,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
entries, err := os.ReadDir(fsa.path)
if err != nil {
log.Errorf("Init() > ReadDir() error: %v", err)
cclog.Errorf("Init() > ReadDir() error: %v", err)
return 0, err
}
@@ -195,7 +195,7 @@ func (fsa *FsArchive) Info() {
fmt.Printf("Job archive %s\n", fsa.path)
clusters, err := os.ReadDir(fsa.path)
if err != nil {
log.Fatalf("Reading clusters failed: %s", err.Error())
cclog.Fatalf("Reading clusters failed: %s", err.Error())
}
ci := make(map[string]*clusterInfo)
@@ -209,7 +209,7 @@ func (fsa *FsArchive) Info() {
ci[cc] = &clusterInfo{dateFirst: time.Now().Unix()}
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
@@ -218,14 +218,14 @@ func (fsa *FsArchive) Info() {
}
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
@@ -233,7 +233,7 @@ func (fsa *FsArchive) Info() {
ci[cc].numJobs++
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
if err != nil {
log.Fatalf("Cannot parse starttime: %s", err.Error())
cclog.Fatalf("Cannot parse starttime: %s", err.Error())
}
ci[cc].dateFirst = util.Min(ci[cc].dateFirst, startTime)
ci[cc].dateLast = util.Max(ci[cc].dateLast, startTime)
@@ -278,7 +278,7 @@ func (fsa *FsArchive) Clean(before int64, after int64) {
clusters, err := os.ReadDir(fsa.path)
if err != nil {
log.Fatalf("Reading clusters failed: %s", err.Error())
cclog.Fatalf("Reading clusters failed: %s", err.Error())
}
for _, cluster := range clusters {
@@ -288,7 +288,7 @@ func (fsa *FsArchive) Clean(before int64, after int64) {
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
@@ -297,33 +297,33 @@ func (fsa *FsArchive) Clean(before int64, after int64) {
}
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
if err != nil {
log.Fatalf("Cannot parse starttime: %s", err.Error())
cclog.Fatalf("Cannot parse starttime: %s", err.Error())
}
if startTime < before || startTime > after {
if err := os.RemoveAll(filepath.Join(dirpath, startTimeDir.Name())); err != nil {
log.Errorf("JobArchive Cleanup() error: %v", err)
cclog.Errorf("JobArchive Cleanup() error: %v", err)
}
}
}
}
if util.GetFilecount(dirpath) == 0 {
if err := os.Remove(dirpath); err != nil {
log.Errorf("JobArchive Clean() error: %v", err)
cclog.Errorf("JobArchive Clean() error: %v", err)
}
}
}
@@ -337,16 +337,16 @@ func (fsa *FsArchive) Move(jobs []*schema.Job, path string) {
target := getDirectory(job, path)
if err := os.MkdirAll(filepath.Clean(filepath.Join(target, "..")), 0777); err != nil {
log.Errorf("JobArchive Move MkDir error: %v", err)
cclog.Errorf("JobArchive Move MkDir error: %v", err)
}
if err := os.Rename(source, target); err != nil {
log.Errorf("JobArchive Move() error: %v", err)
cclog.Errorf("JobArchive Move() error: %v", err)
}
parent := filepath.Clean(filepath.Join(source, ".."))
if util.GetFilecount(parent) == 0 {
if err := os.Remove(parent); err != nil {
log.Errorf("JobArchive Move() error: %v", err)
cclog.Errorf("JobArchive Move() error: %v", err)
}
}
}
@@ -357,18 +357,18 @@ func (fsa *FsArchive) CleanUp(jobs []*schema.Job) {
for _, job := range jobs {
dir := getDirectory(job, fsa.path)
if err := os.RemoveAll(dir); err != nil {
log.Errorf("JobArchive Cleanup() error: %v", err)
cclog.Errorf("JobArchive Cleanup() error: %v", err)
}
parent := filepath.Clean(filepath.Join(dir, ".."))
if util.GetFilecount(parent) == 0 {
if err := os.Remove(parent); err != nil {
log.Errorf("JobArchive Cleanup() error: %v", err)
cclog.Errorf("JobArchive Cleanup() error: %v", err)
}
}
}
log.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start))
cclog.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start))
}
func (fsa *FsArchive) Compress(jobs []*schema.Job) {
@@ -383,24 +383,24 @@ func (fsa *FsArchive) Compress(jobs []*schema.Job) {
}
}
log.Infof("Compression Service - %d files took %s", cnt, time.Since(start))
cclog.Infof("Compression Service - %d files took %s", cnt, time.Since(start))
}
func (fsa *FsArchive) CompressLast(starttime int64) int64 {
filename := filepath.Join(fsa.path, "compress.txt")
b, err := os.ReadFile(filename)
if err != nil {
log.Errorf("fsBackend Compress - %v", err)
cclog.Errorf("fsBackend Compress - %v", err)
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
return starttime
}
last, err := strconv.ParseInt(strings.TrimSuffix(string(b), "\n"), 10, 64)
if err != nil {
log.Errorf("fsBackend Compress - %v", err)
cclog.Errorf("fsBackend Compress - %v", err)
return starttime
}
log.Infof("fsBackend Compress - start %d last %d", starttime, last)
cclog.Infof("fsBackend Compress - start %d last %d", starttime, last)
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
return last
}
@@ -437,10 +437,10 @@ func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.Job, error) {
func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil {
log.Errorf("LoadClusterCfg() > open file error: %v", err)
cclog.Errorf("LoadClusterCfg() > open file error: %v", err)
// if config.Keys.Validate {
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
log.Warnf("Validate cluster config: %v\n", err)
cclog.Warnf("Validate cluster config: %v\n", err)
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
}
}
@@ -453,7 +453,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
go func() {
clustersDir, err := os.ReadDir(fsa.path)
if err != nil {
log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
cclog.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
}
for _, clusterDir := range clustersDir {
@@ -462,7 +462,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
}
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
@@ -473,21 +473,21 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
}
if loadMetricData {
@@ -501,10 +501,10 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
data, err := loadJobData(filename, isCompressed)
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
}
ch <- JobContainer{Meta: job, Data: &data}
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
} else {
ch <- JobContainer{Meta: job, Data: nil}
}
@@ -521,15 +521,15 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
func (fsa *FsArchive) StoreJobMeta(job *schema.Job) error {
f, err := os.Create(getPath(job, fsa.path, "meta.json"))
if err != nil {
log.Error("Error while creating filepath for meta.json")
cclog.Error("Error while creating filepath for meta.json")
return err
}
if err := EncodeJobMeta(f, job); err != nil {
log.Error("Error while encoding job metadata to meta.json file")
cclog.Error("Error while encoding job metadata to meta.json file")
return err
}
if err := f.Close(); err != nil {
log.Warn("Error while closing meta.json file")
cclog.Warn("Error while closing meta.json file")
return err
}
@@ -546,35 +546,35 @@ func (fsa *FsArchive) ImportJob(
) error {
dir := getPath(jobMeta, fsa.path, "")
if err := os.MkdirAll(dir, 0777); err != nil {
log.Error("Error while creating job archive path")
cclog.Error("Error while creating job archive path")
return err
}
f, err := os.Create(path.Join(dir, "meta.json"))
if err != nil {
log.Error("Error while creating filepath for meta.json")
cclog.Error("Error while creating filepath for meta.json")
return err
}
if err := EncodeJobMeta(f, jobMeta); err != nil {
log.Error("Error while encoding job metadata to meta.json file")
cclog.Error("Error while encoding job metadata to meta.json file")
return err
}
if err := f.Close(); err != nil {
log.Warn("Error while closing meta.json file")
cclog.Warn("Error while closing meta.json file")
return err
}
f, err = os.Create(path.Join(dir, "data.json"))
if err != nil {
log.Error("Error while creating filepath for data.json")
cclog.Error("Error while creating filepath for data.json")
return err
}
if err := EncodeJobData(f, jobData); err != nil {
log.Error("Error while encoding job metricdata to data.json file")
cclog.Error("Error while encoding job metricdata to data.json file")
return err
}
if err := f.Close(); err != nil {
log.Warn("Error while closing data.json file")
cclog.Warn("Error while closing data.json file")
}
return err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -10,8 +10,8 @@ import (
"path/filepath"
"testing"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/ClusterCockpit/cc-lib/util"
)
func TestInitEmptyPath(t *testing.T) {

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -9,15 +9,15 @@ import (
"io"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
)
func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
var d schema.JobData
if err := json.NewDecoder(r).Decode(&d); err != nil {
log.Warn("Error while decoding raw job data json")
cclog.Warn("Error while decoding raw job data json")
return err, 0, 1000
}
@@ -25,7 +25,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
})
if err, ok := data.(error); ok {
log.Warn("Error in decoded job data set")
cclog.Warn("Error in decoded job data set")
return nil, err
}
@@ -72,7 +72,7 @@ func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) {
func DecodeJobMeta(r io.Reader) (*schema.Job, error) {
var d schema.Job
if err := json.NewDecoder(r).Decode(&d); err != nil {
log.Warn("Error while decoding raw job meta json")
cclog.Warn("Error while decoding raw job meta json")
return &d, err
}
@@ -84,7 +84,7 @@ func DecodeJobMeta(r io.Reader) (*schema.Job, error) {
func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
var c schema.Cluster
if err := json.NewDecoder(r).Decode(&c); err != nil {
log.Warn("Error while decoding raw cluster json")
cclog.Warn("Error while decoding raw cluster json")
return &c, err
}
@@ -96,7 +96,7 @@ func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
func EncodeJobData(w io.Writer, d *schema.JobData) error {
// Sanitize parameters
if err := json.NewEncoder(w).Encode(d); err != nil {
log.Warn("Error while encoding new job data json")
cclog.Warn("Error while encoding new job data json")
return err
}
@@ -106,7 +106,7 @@ func EncodeJobData(w io.Writer, d *schema.JobData) error {
func EncodeJobMeta(w io.Writer, d *schema.Job) error {
// Sanitize parameters
if err := json.NewEncoder(w).Encode(d); err != nil {
log.Warn("Error while encoding new job meta json")
cclog.Warn("Error while encoding new job meta json")
return err
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -9,7 +9,7 @@ import (
"strconv"
"strings"
"github.com/ClusterCockpit/cc-backend/pkg/log"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
type NodeList [][]interface {
@@ -51,7 +51,7 @@ func (nl *NodeList) PrintList() []string {
if inner["zeroPadded"] == 1 {
out = append(out, fmt.Sprintf("%s%0*d", prefix, inner["digits"], i))
} else {
log.Error("node list: only zero-padded ranges are allowed")
cclog.Error("node list: only zero-padded ranges are allowed")
}
}
}
@@ -129,7 +129,7 @@ type NLExprIntRange struct {
func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
if !nle.zeroPadded || nle.digits < 1 {
log.Error("only zero-padded ranges are allowed")
cclog.Error("only zero-padded ranges are allowed")
return "", false
}

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive

View File

@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive