Merge branch 'master' into import-data-sanitation

This commit is contained in:
Jan Eitzinger
2023-04-07 08:57:42 +02:00
110 changed files with 8191 additions and 1464 deletions

View File

@@ -40,12 +40,15 @@ type JobContainer struct {
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
var ar ArchiveBackend
var useArchive bool
func Init(rawConfig json.RawMessage) error {
func Init(rawConfig json.RawMessage, disableArchive bool) error {
useArchive = !disableArchive
var kind struct {
Kind string `json:"kind"`
}
if err := json.Unmarshal(rawConfig, &kind); err != nil {
log.Warn("Error while unmarshaling raw config json")
return err
}
@@ -55,11 +58,12 @@ func Init(rawConfig json.RawMessage) error {
// case "s3":
// ar = &S3Archive{}
default:
return fmt.Errorf("unkown archive backend '%s''", kind.Kind)
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", kind.Kind)
}
version, err := ar.Init(rawConfig)
if err != nil {
log.Error("Error while initializing archiveBackend")
return err
}
log.Infof("Load archive version %d", version)
@@ -78,6 +82,7 @@ func LoadAveragesFromArchive(
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
log.Warn("Error while loading job metadata from archiveBackend")
return err
}
@@ -96,6 +101,7 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
log.Warn("Error while loading job metadata from archiveBackend")
return nil, err
}
@@ -106,12 +112,13 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
// in that JSON file. If the job is not archived, nothing is done.
func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
if job.State == schema.JobStateRunning {
if job.State == schema.JobStateRunning || !useArchive {
return nil
}
jobMeta, err := ar.LoadJobMeta(job)
if err != nil {
log.Warn("Error while loading job metadata from archiveBackend")
return err
}

View File

@@ -9,6 +9,7 @@ import (
"fmt"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
var Clusters []*schema.Cluster
@@ -23,6 +24,7 @@ func initClusterConfig() error {
cluster, err := ar.LoadClusterCfg(c)
if err != nil {
log.Warnf("Error while loading cluster config for cluster '%v'", c)
return err
}
@@ -59,7 +61,7 @@ func initClusterConfig() error {
nl, err := ParseNodeList(sc.Nodes)
if err != nil {
return fmt.Errorf("in %s/cluster.json: %w", cluster.Name, err)
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > in %s/cluster.json: %w", cluster.Name, err)
}
nodeLists[cluster.Name][sc.Name] = nl
}
@@ -111,7 +113,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
cluster := GetCluster(job.Cluster)
if cluster == nil {
return fmt.Errorf("unkown cluster: %#v", job.Cluster)
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", job.Cluster)
}
if job.SubCluster != "" {
@@ -120,11 +122,11 @@ func AssignSubCluster(job *schema.BaseJob) error {
return nil
}
}
return fmt.Errorf("already assigned subcluster %#v unkown (cluster: %#v)", job.SubCluster, job.Cluster)
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > already assigned subcluster %v unkown (cluster: %v)", job.SubCluster, job.Cluster)
}
if len(job.Resources) == 0 {
return fmt.Errorf("job without any resources/hosts")
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > job without any resources/hosts")
}
host0 := job.Resources[0].Hostname
@@ -140,7 +142,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
return nil
}
return fmt.Errorf("no subcluster found for cluster %#v and host %#v", job.Cluster, host0)
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %v and host %v", job.Cluster, host0)
}
func GetSubClusterByNode(cluster, hostname string) (string, error) {
@@ -153,12 +155,12 @@ func GetSubClusterByNode(cluster, hostname string) (string, error) {
c := GetCluster(cluster)
if c == nil {
return "", fmt.Errorf("unkown cluster: %#v", cluster)
return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", cluster)
}
if c.SubClusters[0].Nodes == "" {
return c.SubClusters[0].Name, nil
}
return "", fmt.Errorf("no subcluster found for cluster %#v and host %#v", cluster, hostname)
return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %v and host %v", cluster, hostname)
}

View File

@@ -55,7 +55,7 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
b, err := os.ReadFile(filename)
if err != nil {
log.Errorf("fsBackend loadJobMeta()- %v", err)
log.Errorf("loadJobMeta() > open file error: %v", err)
return &schema.JobMeta{}, err
}
if config.Keys.Validate {
@@ -105,12 +105,12 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Errorf("fsBackend Init()- %v", err)
log.Warnf("Init() > Unmarshal error: %#v", err)
return 0, err
}
if config.Path == "" {
err := fmt.Errorf("fsBackend Init()- empty path")
log.Errorf("fsBackend Init()- %v", err)
err := fmt.Errorf("Init() : empty config.Path")
log.Errorf("Init() > config.Path error: %v", err)
return 0, err
}
fsa.path = config.Path
@@ -133,7 +133,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
entries, err := os.ReadDir(fsa.path)
if err != nil {
log.Errorf("fsBackend Init()- %v", err)
log.Errorf("Init() > ReadDir() error: %v", err)
return 0, err
}
@@ -150,10 +150,10 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
var isCompressed bool = true
filename := getPath(job, fsa.path, "data.json.gz")
if !checkFileExists(filename) {
filename = getPath(job, fsa.path, "data.json")
isCompressed = false
return nil, err
}
return loadJobData(filename, isCompressed)
@@ -169,12 +169,13 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil {
log.Errorf("fsBackend LoadClusterCfg()- %v", err)
log.Errorf("LoadClusterCfg() > open file error: %v", err)
return &schema.Cluster{}, err
}
// if config.Keys.Validate {
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
// if config.Keys.Validate {
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
log.Warnf("Validate cluster config: %v\n", err)
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
}
}
// }
return DecodeCluster(bytes.NewReader(b))
@@ -186,7 +187,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
go func() {
clustersDir, err := os.ReadDir(fsa.path)
if err != nil {
log.Fatalf("Reading clusters failed: %s", err.Error())
log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
}
for _, clusterDir := range clustersDir {
@@ -195,7 +196,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
}
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error())
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
@@ -206,19 +207,18 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error())
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
log.Fatalf("Reading jobs failed: %s", err.Error())
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
}
@@ -237,6 +237,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
}
ch <- JobContainer{Meta: job, Data: &data}
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
} else {
ch <- JobContainer{Meta: job, Data: nil}
}
@@ -259,12 +260,15 @@ func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error {
}
f, err := os.Create(getPath(&job, fsa.path, "meta.json"))
if err != nil {
log.Error("Error while creating filepath for meta.json")
return err
}
if err := EncodeJobMeta(f, jobMeta); err != nil {
log.Error("Error while encoding job metadata to meta.json file")
return err
}
if err := f.Close(); err != nil {
log.Warn("Error while closing meta.json file")
return err
}
@@ -287,17 +291,21 @@ func (fsa *FsArchive) ImportJob(
}
dir := getPath(&job, fsa.path, "")
if err := os.MkdirAll(dir, 0777); err != nil {
log.Error("Error while creating job archive path")
return err
}
f, err := os.Create(path.Join(dir, "meta.json"))
if err != nil {
log.Error("Error while creating filepath for meta.json")
return err
}
if err := EncodeJobMeta(f, jobMeta); err != nil {
log.Error("Error while encoding job metadata to meta.json file")
return err
}
if err := f.Close(); err != nil {
log.Warn("Error while closing meta.json file")
return err
}
@@ -325,11 +333,17 @@ func (fsa *FsArchive) ImportJob(
f, err = os.Create(path.Join(dir, "data.json"))
if err != nil {
log.Error("Error while creating filepath for data.json")
return err
}
if err := EncodeJobData(f, jobData); err != nil {
log.Error("Error while encoding job metricdata to data.json file")
return err
}
if err := f.Close(); err != nil {
log.Warn("Error while closing data.json file")
return err
}
return f.Close()
}

View File

@@ -10,9 +10,14 @@ import (
"testing"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
func init() {
log.Init("info", true)
}
func TestInitEmptyPath(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"kind\":\"../../test/archive\"}"))

View File

@@ -10,12 +10,14 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
var d schema.JobData
if err := json.NewDecoder(r).Decode(&d); err != nil {
log.Warn("Error while decoding raw job data json")
return err, 0, 1000
}
@@ -23,6 +25,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
})
if err, ok := data.(error); ok {
log.Warn("Error in decoded job data set")
return nil, err
}
@@ -32,6 +35,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
var d schema.JobMeta
if err := json.NewDecoder(r).Decode(&d); err != nil {
log.Warn("Error while decoding raw job meta json")
return &d, err
}
@@ -43,6 +47,7 @@ func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
var c schema.Cluster
if err := json.NewDecoder(r).Decode(&c); err != nil {
log.Warn("Error while decoding raw cluster json")
return &c, err
}
@@ -54,6 +59,7 @@ func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
func EncodeJobData(w io.Writer, d *schema.JobData) error {
// Sanitize parameters
if err := json.NewEncoder(w).Encode(d); err != nil {
log.Warn("Error while encoding new job data json")
return err
}
@@ -63,6 +69,7 @@ func EncodeJobData(w io.Writer, d *schema.JobData) error {
func EncodeJobMeta(w io.Writer, d *schema.JobMeta) error {
// Sanitize parameters
if err := json.NewEncoder(w).Encode(d); err != nil {
log.Warn("Error while encoding new job meta json")
return err
}

View File

@@ -129,7 +129,7 @@ type NLExprIntRange struct {
func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
if !nle.zeroPadded || nle.digits < 1 {
log.Error("node list: only zero-padded ranges are allowed")
log.Error("only zero-padded ranges are allowed")
return "", false
}
@@ -178,6 +178,7 @@ func (nles NLExprIntRange) prefix() string {
func ParseNodeList(raw string) (NodeList, error) {
isLetter := func(r byte) bool { return ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') }
isDigit := func(r byte) bool { return '0' <= r && r <= '9' }
isDash := func(r byte) bool { return r == '-' }
rawterms := []string{}
prevterm := 0
@@ -187,7 +188,7 @@ func ParseNodeList(raw string) (NodeList, error) {
i++
}
if i == len(raw) {
return nil, fmt.Errorf("node list: unclosed '['")
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
}
} else if raw[i] == ',' {
rawterms = append(rawterms, raw[prevterm:i])
@@ -205,41 +206,47 @@ func ParseNodeList(raw string) (NodeList, error) {
limits() []map[string]int
prefix() string
}{}
for i := 0; i < len(rawterm); i++ {
c := rawterm[i]
if isLetter(c) || isDigit(c) {
j := i
for j < len(rawterm) && (isLetter(rawterm[j]) || isDigit(rawterm[j])) {
for j < len(rawterm) &&
(isLetter(rawterm[j]) ||
isDigit(rawterm[j]) ||
isDash(rawterm[j])) {
j++
}
exprs = append(exprs, NLExprString(rawterm[i:j]))
i = j - 1
} else if c == '[' {
end := strings.Index(rawterm[i:], "]")
if end == -1 {
return nil, fmt.Errorf("node list: unclosed '['")
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
}
parts := strings.Split(rawterm[i+1:i+end], ",")
nles := NLExprIntRanges{}
for _, part := range parts {
minus := strings.Index(part, "-")
if minus == -1 {
return nil, fmt.Errorf("node list: no '-' found inside '[...]'")
return nil, fmt.Errorf("ARCHIVE/NODELIST > no '-' found inside '[...]'")
}
s1, s2 := part[0:minus], part[minus+1:]
if len(s1) != len(s2) || len(s1) == 0 {
return nil, fmt.Errorf("node list: %#v and %#v are not of equal length or of length zero", s1, s2)
return nil, fmt.Errorf("ARCHIVE/NODELIST > %v and %v are not of equal length or of length zero", s1, s2)
}
x1, err := strconv.ParseInt(s1, 10, 32)
if err != nil {
return nil, fmt.Errorf("node list: %w", err)
return nil, fmt.Errorf("ARCHIVE/NODELIST > could not parse int: %w", err)
}
x2, err := strconv.ParseInt(s2, 10, 32)
if err != nil {
return nil, fmt.Errorf("node list: %w", err)
return nil, fmt.Errorf("ARCHIVE/NODELIST > could not parse int: %w", err)
}
nles = append(nles, NLExprIntRange{
@@ -253,7 +260,7 @@ func ParseNodeList(raw string) (NodeList, error) {
exprs = append(exprs, nles)
i += end
} else {
return nil, fmt.Errorf("node list: invalid character: %#v", rune(c))
return nil, fmt.Errorf("ARCHIVE/NODELIST > invalid character: %#v", rune(c))
}
}
nl = append(nl, exprs)

View File

@@ -57,3 +57,19 @@ func TestNodeListCommasInBrackets(t *testing.T) {
t.Fatal("4")
}
}
func TestNodeListCommasOutsideBrackets(t *testing.T) {
nl, err := ParseNodeList("cn-0010,cn0011,cn-00[13-18,22-24]")
if err != nil {
t.Fatal(err)
}
if !nl.Contains("cn-0010") || !nl.Contains("cn0011") {
t.Fatal("1")
}
if !nl.Contains("cn-0013") ||
!nl.Contains("cn-0015") ||
!nl.Contains("cn-0022") ||
!nl.Contains("cn-0018") {
t.Fatal("2")
}
}

View File

@@ -12,8 +12,8 @@ import (
)
// Provides a simple way of logging with different levels.
// Time/Data are not logged on purpose because systemd adds
// them for us.
// Time/Date are not logged because systemd adds
// them for us (Default, can be changed by flag '--logdate true').
//
// Uses these prefixes: https://www.freedesktop.org/software/systemd/man/sd-daemon.html
@@ -22,109 +22,162 @@ var (
InfoWriter io.Writer = os.Stderr
WarnWriter io.Writer = os.Stderr
ErrWriter io.Writer = os.Stderr
CritWriter io.Writer = os.Stderr
)
var (
DebugPrefix string = "<7>[DEBUG] "
InfoPrefix string = "<6>[INFO] "
WarnPrefix string = "<4>[WARNING] "
ErrPrefix string = "<3>[ERROR] "
DebugPrefix string = "<7>[DEBUG] "
InfoPrefix string = "<6>[INFO] "
WarnPrefix string = "<4>[WARNING] "
ErrPrefix string = "<3>[ERROR] "
CritPrefix string = "<2>[CRITICAL] "
)
var (
DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, 0)
InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, 0)
WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, 0)
ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, 0)
DebugLog *log.Logger
InfoLog *log.Logger
WarnLog *log.Logger
ErrLog *log.Logger
CritLog *log.Logger
)
func init() {
if lvl, ok := os.LookupEnv("LOGLEVEL"); ok {
switch lvl {
case "err", "fatal":
WarnWriter = io.Discard
fallthrough
case "warn":
InfoWriter = io.Discard
fallthrough
case "info":
DebugWriter = io.Discard
case "debug":
// Nothing to do...
default:
Warnf("environment variable LOGLEVEL has invalid value %#v", lvl)
}
/* CONFIG */
func Init(lvl string, logdate bool) {
switch lvl {
case "crit":
ErrWriter = io.Discard
fallthrough
case "err", "fatal":
WarnWriter = io.Discard
fallthrough
case "warn":
InfoWriter = io.Discard
fallthrough
case "info":
DebugWriter = io.Discard
case "debug":
// Nothing to do...
break
default:
fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel 'debug'\n", lvl)
//SetLogLevel("debug")
}
if !logdate {
DebugLog = log.New(DebugWriter, DebugPrefix, 0)
InfoLog = log.New(InfoWriter, InfoPrefix, log.Lshortfile)
WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
} else {
DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
}
}
func Debug(v ...interface{}) {
if DebugWriter != io.Discard {
DebugLog.Print(v...)
}
}
func Info(v ...interface{}) {
if InfoWriter != io.Discard {
InfoLog.Print(v...)
}
/* PRINT */
// Private helper
func printStr(v ...interface{}) string {
return fmt.Sprint(v...)
}
// Uses Info() -> If errorpath required at some point:
// Will need own writer with 'Output(2, out)' to correctly render path
func Print(v ...interface{}) {
Info(v...)
}
func Debug(v ...interface{}) {
DebugLog.Output(2, printStr(v...))
}
func Info(v ...interface{}) {
InfoLog.Output(2, printStr(v...))
}
func Warn(v ...interface{}) {
if WarnWriter != io.Discard {
WarnLog.Print(v...)
}
WarnLog.Output(2, printStr(v...))
}
func Error(v ...interface{}) {
if ErrWriter != io.Discard {
ErrLog.Print(v...)
}
ErrLog.Output(2, printStr(v...))
}
// Writes panic stacktrace, but keeps application alive
func Panic(v ...interface{}) {
ErrLog.Output(2, printStr(v...))
panic("Panic triggered ...")
}
func Crit(v ...interface{}) {
CritLog.Output(2, printStr(v...))
}
// Writes critical log, stops application
func Fatal(v ...interface{}) {
Error(v...)
CritLog.Output(2, printStr(v...))
os.Exit(1)
}
func Debugf(format string, v ...interface{}) {
if DebugWriter != io.Discard {
DebugLog.Printf(format, v...)
}
}
func Infof(format string, v ...interface{}) {
if InfoWriter != io.Discard {
InfoLog.Printf(format, v...)
}
/* PRINT FORMAT*/
// Private helper
func printfStr(format string, v ...interface{}) string {
return fmt.Sprintf(format, v...)
}
// Uses Infof() -> If errorpath required at some point:
// Will need own writer with 'Output(2, out)' to correctly render path
func Printf(format string, v ...interface{}) {
Infof(format, v...)
}
func Finfof(w io.Writer, format string, v ...interface{}) {
if w != io.Discard {
fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...)
}
func Debugf(format string, v ...interface{}) {
DebugLog.Output(2, printfStr(format, v...))
}
func Infof(format string, v ...interface{}) {
InfoLog.Output(2, printfStr(format, v...))
}
func Warnf(format string, v ...interface{}) {
if WarnWriter != io.Discard {
WarnLog.Printf(format, v...)
}
WarnLog.Output(2, printfStr(format, v...))
}
func Errorf(format string, v ...interface{}) {
if ErrWriter != io.Discard {
ErrLog.Printf(format, v...)
}
ErrLog.Output(2, printfStr(format, v...))
}
// Writes panic stacktrace, but keeps application alive
func Panicf(format string, v ...interface{}) {
ErrLog.Output(2, printfStr(format, v...))
panic("Panic triggered ...")
}
func Critf(format string, v ...interface{}) {
CritLog.Output(2, printfStr(format, v...))
}
// Writes crit log, stops application
func Fatalf(format string, v ...interface{}) {
Errorf(format, v...)
CritLog.Output(2, printfStr(format, v...))
os.Exit(1)
}
/* SPECIAL */
// func Finfof(w io.Writer, format string, v ...interface{}) {
// if w != io.Discard {
// if logDateTime {
// currentTime := time.Now()
// fmt.Fprintf(InfoWriter, currentTime.String()+InfoPrefix+format+"\n", v...)
// } else {
// fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...)
// }
// }
// }

View File

@@ -69,7 +69,7 @@ func (c *Cache) Get(key string, computeValue ComputeValue) interface{} {
if now.After(entry.expiration) {
if !c.evictEntry(entry) {
if entry.expiration.IsZero() {
panic("cache entry that shoud have been waited for could not be evicted.")
panic("LRUCACHE/CACHE > cache entry that shoud have been waited for could not be evicted.")
}
c.mutex.Unlock()
return entry.value
@@ -208,7 +208,7 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
size := 0
for key, e := range c.entries {
if key != e.key {
panic("key mismatch")
panic("LRUCACHE/CACHE > key mismatch")
}
if now.After(e.expiration) {
@@ -219,13 +219,13 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
if e.prev != nil {
if e.prev.next != e {
panic("list corrupted")
panic("LRUCACHE/CACHE > list corrupted")
}
}
if e.next != nil {
if e.next.prev != e {
panic("list corrupted")
panic("LRUCACHE/CACHE > list corrupted")
}
}
@@ -234,18 +234,18 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
}
if size != c.usedmemory {
panic("size calculations failed")
panic("LRUCACHE/CACHE > size calculations failed")
}
if c.head != nil {
if c.tail == nil || c.head.prev != nil {
panic("head/tail corrupted")
panic("LRUCACHE/CACHE > head/tail corrupted")
}
}
if c.tail != nil {
if c.head == nil || c.tail.next != nil {
panic("head/tail corrupted")
panic("LRUCACHE/CACHE > head/tail corrupted")
}
}
}
@@ -281,7 +281,7 @@ func (c *Cache) unlinkEntry(e *cacheEntry) {
func (c *Cache) evictEntry(e *cacheEntry) bool {
if e.waitingForComputation != 0 {
// panic("cannot evict this entry as other goroutines need the value")
// panic("LRUCACHE/CACHE > cannot evict this entry as other goroutines need the value")
return false
}

View File

@@ -23,6 +23,16 @@ type JWTAuthConfig struct {
// Specifies for how long a session or JWT shall be valid
// as a string parsable by time.ParseDuration().
MaxAge int64 `json:"max-age"`
// Specifies which cookie should be checked for a JWT token (if no authorization header is present)
CookieName string `json:"cookieName"`
// Deny login for users not in database (but defined in JWT).
// Ignore user roles defined in JWTs ('roles' claim), get them from db.
ForceJWTValidationViaDatabase bool `json:"forceJWTValidationViaDatabase"`
// Specifies which issuer should be accepted when validating external JWTs ('iss' claim)
TrustedExternalIssuer string `json:"trustedExternalIssuer"`
}
type IntRange struct {
@@ -106,6 +116,9 @@ type ProgramConfig struct {
// If not zero, automatically mark jobs as stopped running X seconds longer than their walltime.
StopJobsExceedingWalltime int `json:"stop-jobs-exceeding-walltime"`
// Defines time X in seconds in which jobs are considered to be "short" and will be filtered in specific views.
ShortRunningJobsDuration int `json:"short-running-jobs-duration"`
// Array of Clusters
Clusters []*ClusterConfig `json:"clusters"`
}

View File

@@ -9,6 +9,8 @@ import (
"io"
"math"
"strconv"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
// A custom float type is used so that (Un)MarshalJSON and
@@ -43,6 +45,7 @@ func (f *Float) UnmarshalJSON(input []byte) error {
val, err := strconv.ParseFloat(s, 64)
if err != nil {
log.Warn("Error while parsing custom float")
return err
}
*f = Float(val)

View File

@@ -58,6 +58,15 @@ type Job struct {
NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64
FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type
MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64
FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64
MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64
LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64
NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64
NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64
FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
}
// Non-Swaggered Comment: JobMeta
@@ -75,6 +84,7 @@ type JobMeta struct {
BaseJob
StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0)
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
}
const (
@@ -106,15 +116,20 @@ type JobStatistics struct {
// Tag model
// @Description Defines a tag using name and type.
type Tag struct {
// The unique DB identifier of a tag
// The unique DB identifier of a tag
ID int64 `json:"id" db:"id"`
Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type
Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type
Name string `json:"name" db:"tag_name" example:"Testjob"` // Tag Name
}
// Resource model
// @Description A resource used by a job
type Resource struct {
Hostname string `json:"hostname"` // Name of the host (= node)
HWThreads []int `json:"hwthreads,omitempty"` // List of OS processor ids
Accelerators []string `json:"accelerators,omitempty"` // List of of accelerator device ids
Hostname string `json:"hostname"` // Name of the host (= node)
HWThreads []int `json:"hwthreads,omitempty"` // List of OS processor ids
Accelerators []string `json:"accelerators,omitempty"` // List of of accelerator device ids
@@ -137,12 +152,12 @@ const (
func (e *JobState) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
return fmt.Errorf("SCHEMA/JOB > enums must be strings")
}
*e = JobState(str)
if !e.Valid() {
return errors.New("invalid job state")
return errors.New("SCHEMA/JOB > invalid job state")
}
return nil

View File

@@ -91,12 +91,12 @@ func (e *MetricScope) Max(other MetricScope) MetricScope {
func (e *MetricScope) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
return fmt.Errorf("SCHEMA/METRICS > enums must be strings")
}
*e = MetricScope(str)
if !e.Valid() {
return fmt.Errorf("%s is not a valid MetricScope", str)
return fmt.Errorf("SCHEMA/METRICS > %s is not a valid MetricScope", str)
}
return nil
}
@@ -297,7 +297,7 @@ func (jm *JobMetric) AddPercentiles(ps []int) bool {
for _, p := range ps {
if p < 1 || p > 99 {
panic("invalid percentile")
panic("SCHEMA/METRICS > invalid percentile")
}
if _, ok := jm.StatisticsSeries.Percentiles[p]; ok {

View File

@@ -76,6 +76,10 @@
"description": "If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job.",
"type": "integer"
},
"short-running-jobs-duration": {
"description": "Do not show running jobs shorter than X seconds.",
"type": "integer"
},
"": {
"description": "",
"type": "string"
@@ -138,7 +142,7 @@
"kind": {
"type": "string",
"enum": [
"influxdb-v2",
"influxdb",
"prometheus",
"cc-metric-store",
"test"
@@ -241,10 +245,6 @@
"description": "Jobs shown per page in job lists",
"type": "integer"
},
"plot_list_hideShortRunningJobs": {
"description": "Do not show running jobs shorter than X seconds",
"type": "integer"
},
"plot_view_plotsPerRow": {
"description": "Number of plots per row in single job view",
"type": "integer"
@@ -342,8 +342,7 @@
"job_view_polarPlotMetrics",
"job_view_selectedMetrics",
"plot_general_colorscheme",
"plot_list_selectedMetrics",
"plot_list_hideShortRunningJobs"
"plot_list_selectedMetrics"
]
}
},

View File

@@ -45,21 +45,22 @@ func Validate(k Kind, r io.Reader) (err error) {
case Config:
s, err = jsonschema.Compile("embedfs://config.schema.json")
default:
return fmt.Errorf("unkown schema kind ")
return fmt.Errorf("SCHEMA/VALIDATE > unkown schema kind: %#v", k)
}
if err != nil {
log.Errorf("Error while compiling json schema for kind '%#v'", k)
return err
}
var v interface{}
if err := json.NewDecoder(r).Decode(&v); err != nil {
log.Errorf("schema.Validate() - Failed to decode %v", err)
log.Warnf("Error while decoding raw json schema: %#v", err)
return err
}
if err = s.Validate(v); err != nil {
return fmt.Errorf("%#v", err)
return fmt.Errorf("SCHEMA/VALIDATE > %#v", err)
}
return nil

View File

@@ -281,7 +281,7 @@ func GetUnitUnitFactor(in Unit, out Unit) (func(value interface{}) interface{},
} else if in.getMeasure() == TemperatureF && out.getMeasure() == TemperatureC {
return convertTempF2TempC, nil
} else if in.getMeasure() != out.getMeasure() || in.getUnitDenominator() != out.getUnitDenominator() {
return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("invalid measures in in and out Unit")
return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("UNITS/UNITS > invalid measures in in and out Unit")
}
return GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()), nil
}