mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-07-23 04:51:39 +02:00
Merge branch 'master' into import-data-sanitation
This commit is contained in:
@@ -40,12 +40,15 @@ type JobContainer struct {
|
||||
|
||||
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
|
||||
var ar ArchiveBackend
|
||||
var useArchive bool
|
||||
|
||||
func Init(rawConfig json.RawMessage) error {
|
||||
func Init(rawConfig json.RawMessage, disableArchive bool) error {
|
||||
useArchive = !disableArchive
|
||||
var kind struct {
|
||||
Kind string `json:"kind"`
|
||||
}
|
||||
if err := json.Unmarshal(rawConfig, &kind); err != nil {
|
||||
log.Warn("Error while unmarshaling raw config json")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -55,11 +58,12 @@ func Init(rawConfig json.RawMessage) error {
|
||||
// case "s3":
|
||||
// ar = &S3Archive{}
|
||||
default:
|
||||
return fmt.Errorf("unkown archive backend '%s''", kind.Kind)
|
||||
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", kind.Kind)
|
||||
}
|
||||
|
||||
version, err := ar.Init(rawConfig)
|
||||
if err != nil {
|
||||
log.Error("Error while initializing archiveBackend")
|
||||
return err
|
||||
}
|
||||
log.Infof("Load archive version %d", version)
|
||||
@@ -78,6 +82,7 @@ func LoadAveragesFromArchive(
|
||||
|
||||
metaFile, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job metadata from archiveBackend")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -96,6 +101,7 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
|
||||
|
||||
metaFile, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job metadata from archiveBackend")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -106,12 +112,13 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
|
||||
// in that JSON file. If the job is not archived, nothing is done.
|
||||
func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
|
||||
|
||||
if job.State == schema.JobStateRunning {
|
||||
if job.State == schema.JobStateRunning || !useArchive {
|
||||
return nil
|
||||
}
|
||||
|
||||
jobMeta, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job metadata from archiveBackend")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
)
|
||||
|
||||
var Clusters []*schema.Cluster
|
||||
@@ -23,6 +24,7 @@ func initClusterConfig() error {
|
||||
|
||||
cluster, err := ar.LoadClusterCfg(c)
|
||||
if err != nil {
|
||||
log.Warnf("Error while loading cluster config for cluster '%v'", c)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,7 +61,7 @@ func initClusterConfig() error {
|
||||
|
||||
nl, err := ParseNodeList(sc.Nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("in %s/cluster.json: %w", cluster.Name, err)
|
||||
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > in %s/cluster.json: %w", cluster.Name, err)
|
||||
}
|
||||
nodeLists[cluster.Name][sc.Name] = nl
|
||||
}
|
||||
@@ -111,7 +113,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
|
||||
|
||||
cluster := GetCluster(job.Cluster)
|
||||
if cluster == nil {
|
||||
return fmt.Errorf("unkown cluster: %#v", job.Cluster)
|
||||
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", job.Cluster)
|
||||
}
|
||||
|
||||
if job.SubCluster != "" {
|
||||
@@ -120,11 +122,11 @@ func AssignSubCluster(job *schema.BaseJob) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("already assigned subcluster %#v unkown (cluster: %#v)", job.SubCluster, job.Cluster)
|
||||
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > already assigned subcluster %v unkown (cluster: %v)", job.SubCluster, job.Cluster)
|
||||
}
|
||||
|
||||
if len(job.Resources) == 0 {
|
||||
return fmt.Errorf("job without any resources/hosts")
|
||||
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > job without any resources/hosts")
|
||||
}
|
||||
|
||||
host0 := job.Resources[0].Hostname
|
||||
@@ -140,7 +142,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("no subcluster found for cluster %#v and host %#v", job.Cluster, host0)
|
||||
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %v and host %v", job.Cluster, host0)
|
||||
}
|
||||
|
||||
func GetSubClusterByNode(cluster, hostname string) (string, error) {
|
||||
@@ -153,12 +155,12 @@ func GetSubClusterByNode(cluster, hostname string) (string, error) {
|
||||
|
||||
c := GetCluster(cluster)
|
||||
if c == nil {
|
||||
return "", fmt.Errorf("unkown cluster: %#v", cluster)
|
||||
return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", cluster)
|
||||
}
|
||||
|
||||
if c.SubClusters[0].Nodes == "" {
|
||||
return c.SubClusters[0].Name, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no subcluster found for cluster %#v and host %#v", cluster, hostname)
|
||||
return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %v and host %v", cluster, hostname)
|
||||
}
|
||||
|
@@ -55,7 +55,7 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
|
||||
|
||||
b, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Errorf("fsBackend loadJobMeta()- %v", err)
|
||||
log.Errorf("loadJobMeta() > open file error: %v", err)
|
||||
return &schema.JobMeta{}, err
|
||||
}
|
||||
if config.Keys.Validate {
|
||||
@@ -105,12 +105,12 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
|
||||
|
||||
var config FsArchiveConfig
|
||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||
log.Errorf("fsBackend Init()- %v", err)
|
||||
log.Warnf("Init() > Unmarshal error: %#v", err)
|
||||
return 0, err
|
||||
}
|
||||
if config.Path == "" {
|
||||
err := fmt.Errorf("fsBackend Init()- empty path")
|
||||
log.Errorf("fsBackend Init()- %v", err)
|
||||
err := fmt.Errorf("Init() : empty config.Path")
|
||||
log.Errorf("Init() > config.Path error: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
fsa.path = config.Path
|
||||
@@ -133,7 +133,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
|
||||
|
||||
entries, err := os.ReadDir(fsa.path)
|
||||
if err != nil {
|
||||
log.Errorf("fsBackend Init()- %v", err)
|
||||
log.Errorf("Init() > ReadDir() error: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -150,10 +150,10 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
|
||||
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
|
||||
var isCompressed bool = true
|
||||
filename := getPath(job, fsa.path, "data.json.gz")
|
||||
|
||||
if !checkFileExists(filename) {
|
||||
filename = getPath(job, fsa.path, "data.json")
|
||||
isCompressed = false
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return loadJobData(filename, isCompressed)
|
||||
@@ -169,12 +169,13 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
|
||||
|
||||
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
|
||||
if err != nil {
|
||||
log.Errorf("fsBackend LoadClusterCfg()- %v", err)
|
||||
log.Errorf("LoadClusterCfg() > open file error: %v", err)
|
||||
return &schema.Cluster{}, err
|
||||
}
|
||||
// if config.Keys.Validate {
|
||||
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
|
||||
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
|
||||
// if config.Keys.Validate {
|
||||
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
|
||||
log.Warnf("Validate cluster config: %v\n", err)
|
||||
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
|
||||
}
|
||||
}
|
||||
// }
|
||||
return DecodeCluster(bytes.NewReader(b))
|
||||
@@ -186,7 +187,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
||||
go func() {
|
||||
clustersDir, err := os.ReadDir(fsa.path)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading clusters failed: %s", err.Error())
|
||||
log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, clusterDir := range clustersDir {
|
||||
@@ -195,7 +196,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
||||
}
|
||||
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed: %s", err.Error())
|
||||
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, lvl1Dir := range lvl1Dirs {
|
||||
@@ -206,19 +207,18 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
||||
|
||||
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed: %s", err.Error())
|
||||
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, lvl2Dir := range lvl2Dirs {
|
||||
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
||||
startTimeDirs, err := os.ReadDir(dirpath)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed: %s", err.Error())
|
||||
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, startTimeDir := range startTimeDirs {
|
||||
if startTimeDir.IsDir() {
|
||||
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
|
||||
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
||||
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||
}
|
||||
@@ -237,6 +237,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
||||
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||
}
|
||||
ch <- JobContainer{Meta: job, Data: &data}
|
||||
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||
} else {
|
||||
ch <- JobContainer{Meta: job, Data: nil}
|
||||
}
|
||||
@@ -259,12 +260,15 @@ func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error {
|
||||
}
|
||||
f, err := os.Create(getPath(&job, fsa.path, "meta.json"))
|
||||
if err != nil {
|
||||
log.Error("Error while creating filepath for meta.json")
|
||||
return err
|
||||
}
|
||||
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
||||
log.Error("Error while encoding job metadata to meta.json file")
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Warn("Error while closing meta.json file")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -287,17 +291,21 @@ func (fsa *FsArchive) ImportJob(
|
||||
}
|
||||
dir := getPath(&job, fsa.path, "")
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
log.Error("Error while creating job archive path")
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Create(path.Join(dir, "meta.json"))
|
||||
if err != nil {
|
||||
log.Error("Error while creating filepath for meta.json")
|
||||
return err
|
||||
}
|
||||
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
||||
log.Error("Error while encoding job metadata to meta.json file")
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Warn("Error while closing meta.json file")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -325,11 +333,17 @@ func (fsa *FsArchive) ImportJob(
|
||||
|
||||
f, err = os.Create(path.Join(dir, "data.json"))
|
||||
if err != nil {
|
||||
log.Error("Error while creating filepath for data.json")
|
||||
return err
|
||||
}
|
||||
if err := EncodeJobData(f, jobData); err != nil {
|
||||
log.Error("Error while encoding job metricdata to data.json file")
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Warn("Error while closing data.json file")
|
||||
return err
|
||||
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
@@ -10,9 +10,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.Init("info", true)
|
||||
}
|
||||
|
||||
func TestInitEmptyPath(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("{\"kind\":\"../../test/archive\"}"))
|
||||
|
@@ -10,12 +10,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
)
|
||||
|
||||
func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
||||
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
|
||||
var d schema.JobData
|
||||
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
||||
log.Warn("Error while decoding raw job data json")
|
||||
return err, 0, 1000
|
||||
}
|
||||
|
||||
@@ -23,6 +25,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
||||
})
|
||||
|
||||
if err, ok := data.(error); ok {
|
||||
log.Warn("Error in decoded job data set")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -32,6 +35,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
||||
func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
|
||||
var d schema.JobMeta
|
||||
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
||||
log.Warn("Error while decoding raw job meta json")
|
||||
return &d, err
|
||||
}
|
||||
|
||||
@@ -43,6 +47,7 @@ func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
|
||||
func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
|
||||
var c schema.Cluster
|
||||
if err := json.NewDecoder(r).Decode(&c); err != nil {
|
||||
log.Warn("Error while decoding raw cluster json")
|
||||
return &c, err
|
||||
}
|
||||
|
||||
@@ -54,6 +59,7 @@ func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
|
||||
func EncodeJobData(w io.Writer, d *schema.JobData) error {
|
||||
// Sanitize parameters
|
||||
if err := json.NewEncoder(w).Encode(d); err != nil {
|
||||
log.Warn("Error while encoding new job data json")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -63,6 +69,7 @@ func EncodeJobData(w io.Writer, d *schema.JobData) error {
|
||||
func EncodeJobMeta(w io.Writer, d *schema.JobMeta) error {
|
||||
// Sanitize parameters
|
||||
if err := json.NewEncoder(w).Encode(d); err != nil {
|
||||
log.Warn("Error while encoding new job meta json")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@@ -129,7 +129,7 @@ type NLExprIntRange struct {
|
||||
|
||||
func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
|
||||
if !nle.zeroPadded || nle.digits < 1 {
|
||||
log.Error("node list: only zero-padded ranges are allowed")
|
||||
log.Error("only zero-padded ranges are allowed")
|
||||
return "", false
|
||||
}
|
||||
|
||||
@@ -178,6 +178,7 @@ func (nles NLExprIntRange) prefix() string {
|
||||
func ParseNodeList(raw string) (NodeList, error) {
|
||||
isLetter := func(r byte) bool { return ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') }
|
||||
isDigit := func(r byte) bool { return '0' <= r && r <= '9' }
|
||||
isDash := func(r byte) bool { return r == '-' }
|
||||
|
||||
rawterms := []string{}
|
||||
prevterm := 0
|
||||
@@ -187,7 +188,7 @@ func ParseNodeList(raw string) (NodeList, error) {
|
||||
i++
|
||||
}
|
||||
if i == len(raw) {
|
||||
return nil, fmt.Errorf("node list: unclosed '['")
|
||||
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
|
||||
}
|
||||
} else if raw[i] == ',' {
|
||||
rawterms = append(rawterms, raw[prevterm:i])
|
||||
@@ -205,41 +206,47 @@ func ParseNodeList(raw string) (NodeList, error) {
|
||||
limits() []map[string]int
|
||||
prefix() string
|
||||
}{}
|
||||
|
||||
for i := 0; i < len(rawterm); i++ {
|
||||
c := rawterm[i]
|
||||
if isLetter(c) || isDigit(c) {
|
||||
j := i
|
||||
for j < len(rawterm) && (isLetter(rawterm[j]) || isDigit(rawterm[j])) {
|
||||
for j < len(rawterm) &&
|
||||
(isLetter(rawterm[j]) ||
|
||||
isDigit(rawterm[j]) ||
|
||||
isDash(rawterm[j])) {
|
||||
j++
|
||||
}
|
||||
exprs = append(exprs, NLExprString(rawterm[i:j]))
|
||||
i = j - 1
|
||||
} else if c == '[' {
|
||||
end := strings.Index(rawterm[i:], "]")
|
||||
|
||||
if end == -1 {
|
||||
return nil, fmt.Errorf("node list: unclosed '['")
|
||||
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
|
||||
}
|
||||
|
||||
parts := strings.Split(rawterm[i+1:i+end], ",")
|
||||
nles := NLExprIntRanges{}
|
||||
|
||||
for _, part := range parts {
|
||||
minus := strings.Index(part, "-")
|
||||
if minus == -1 {
|
||||
return nil, fmt.Errorf("node list: no '-' found inside '[...]'")
|
||||
return nil, fmt.Errorf("ARCHIVE/NODELIST > no '-' found inside '[...]'")
|
||||
}
|
||||
|
||||
s1, s2 := part[0:minus], part[minus+1:]
|
||||
if len(s1) != len(s2) || len(s1) == 0 {
|
||||
return nil, fmt.Errorf("node list: %#v and %#v are not of equal length or of length zero", s1, s2)
|
||||
return nil, fmt.Errorf("ARCHIVE/NODELIST > %v and %v are not of equal length or of length zero", s1, s2)
|
||||
}
|
||||
|
||||
x1, err := strconv.ParseInt(s1, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("node list: %w", err)
|
||||
return nil, fmt.Errorf("ARCHIVE/NODELIST > could not parse int: %w", err)
|
||||
}
|
||||
x2, err := strconv.ParseInt(s2, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("node list: %w", err)
|
||||
return nil, fmt.Errorf("ARCHIVE/NODELIST > could not parse int: %w", err)
|
||||
}
|
||||
|
||||
nles = append(nles, NLExprIntRange{
|
||||
@@ -253,7 +260,7 @@ func ParseNodeList(raw string) (NodeList, error) {
|
||||
exprs = append(exprs, nles)
|
||||
i += end
|
||||
} else {
|
||||
return nil, fmt.Errorf("node list: invalid character: %#v", rune(c))
|
||||
return nil, fmt.Errorf("ARCHIVE/NODELIST > invalid character: %#v", rune(c))
|
||||
}
|
||||
}
|
||||
nl = append(nl, exprs)
|
||||
|
@@ -57,3 +57,19 @@ func TestNodeListCommasInBrackets(t *testing.T) {
|
||||
t.Fatal("4")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeListCommasOutsideBrackets(t *testing.T) {
|
||||
nl, err := ParseNodeList("cn-0010,cn0011,cn-00[13-18,22-24]")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !nl.Contains("cn-0010") || !nl.Contains("cn0011") {
|
||||
t.Fatal("1")
|
||||
}
|
||||
if !nl.Contains("cn-0013") ||
|
||||
!nl.Contains("cn-0015") ||
|
||||
!nl.Contains("cn-0022") ||
|
||||
!nl.Contains("cn-0018") {
|
||||
t.Fatal("2")
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user