mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-07-23 12:51:40 +02:00
Merge branch 'master' into import-data-sanitation
This commit is contained in:
@@ -23,6 +23,16 @@ type JWTAuthConfig struct {
|
||||
// Specifies for how long a session or JWT shall be valid
|
||||
// as a string parsable by time.ParseDuration().
|
||||
MaxAge int64 `json:"max-age"`
|
||||
|
||||
// Specifies which cookie should be checked for a JWT token (if no authorization header is present)
|
||||
CookieName string `json:"cookieName"`
|
||||
|
||||
// Deny login for users not in database (but defined in JWT).
|
||||
// Ignore user roles defined in JWTs ('roles' claim), get them from db.
|
||||
ForceJWTValidationViaDatabase bool `json:"forceJWTValidationViaDatabase"`
|
||||
|
||||
// Specifies which issuer should be accepted when validating external JWTs ('iss' claim)
|
||||
TrustedExternalIssuer string `json:"trustedExternalIssuer"`
|
||||
}
|
||||
|
||||
type IntRange struct {
|
||||
@@ -106,6 +116,9 @@ type ProgramConfig struct {
|
||||
// If not zero, automatically mark jobs as stopped running X seconds longer than their walltime.
|
||||
StopJobsExceedingWalltime int `json:"stop-jobs-exceeding-walltime"`
|
||||
|
||||
// Defines time X in seconds in which jobs are considered to be "short" and will be filtered in specific views.
|
||||
ShortRunningJobsDuration int `json:"short-running-jobs-duration"`
|
||||
|
||||
// Array of Clusters
|
||||
Clusters []*ClusterConfig `json:"clusters"`
|
||||
}
|
||||
|
@@ -9,6 +9,8 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
)
|
||||
|
||||
// A custom float type is used so that (Un)MarshalJSON and
|
||||
@@ -43,6 +45,7 @@ func (f *Float) UnmarshalJSON(input []byte) error {
|
||||
|
||||
val, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
log.Warn("Error while parsing custom float")
|
||||
return err
|
||||
}
|
||||
*f = Float(val)
|
||||
|
@@ -58,6 +58,15 @@ type Job struct {
|
||||
NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64
|
||||
FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64
|
||||
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
|
||||
StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type
|
||||
MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64
|
||||
FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64
|
||||
MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64
|
||||
LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64
|
||||
NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64
|
||||
NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64
|
||||
FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64
|
||||
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
|
||||
}
|
||||
|
||||
// Non-Swaggered Comment: JobMeta
|
||||
@@ -75,6 +84,7 @@ type JobMeta struct {
|
||||
BaseJob
|
||||
StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0)
|
||||
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
|
||||
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -106,15 +116,20 @@ type JobStatistics struct {
|
||||
// Tag model
|
||||
// @Description Defines a tag using name and type.
|
||||
type Tag struct {
|
||||
// The unique DB identifier of a tag
|
||||
// The unique DB identifier of a tag
|
||||
ID int64 `json:"id" db:"id"`
|
||||
Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type
|
||||
Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type
|
||||
Name string `json:"name" db:"tag_name" example:"Testjob"` // Tag Name
|
||||
}
|
||||
|
||||
// Resource model
|
||||
// @Description A resource used by a job
|
||||
type Resource struct {
|
||||
Hostname string `json:"hostname"` // Name of the host (= node)
|
||||
HWThreads []int `json:"hwthreads,omitempty"` // List of OS processor ids
|
||||
Accelerators []string `json:"accelerators,omitempty"` // List of of accelerator device ids
|
||||
Hostname string `json:"hostname"` // Name of the host (= node)
|
||||
HWThreads []int `json:"hwthreads,omitempty"` // List of OS processor ids
|
||||
Accelerators []string `json:"accelerators,omitempty"` // List of of accelerator device ids
|
||||
@@ -137,12 +152,12 @@ const (
|
||||
func (e *JobState) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
return fmt.Errorf("SCHEMA/JOB > enums must be strings")
|
||||
}
|
||||
|
||||
*e = JobState(str)
|
||||
if !e.Valid() {
|
||||
return errors.New("invalid job state")
|
||||
return errors.New("SCHEMA/JOB > invalid job state")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@@ -91,12 +91,12 @@ func (e *MetricScope) Max(other MetricScope) MetricScope {
|
||||
func (e *MetricScope) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
return fmt.Errorf("SCHEMA/METRICS > enums must be strings")
|
||||
}
|
||||
|
||||
*e = MetricScope(str)
|
||||
if !e.Valid() {
|
||||
return fmt.Errorf("%s is not a valid MetricScope", str)
|
||||
return fmt.Errorf("SCHEMA/METRICS > %s is not a valid MetricScope", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func (jm *JobMetric) AddPercentiles(ps []int) bool {
|
||||
|
||||
for _, p := range ps {
|
||||
if p < 1 || p > 99 {
|
||||
panic("invalid percentile")
|
||||
panic("SCHEMA/METRICS > invalid percentile")
|
||||
}
|
||||
|
||||
if _, ok := jm.StatisticsSeries.Percentiles[p]; ok {
|
||||
|
@@ -76,6 +76,10 @@
|
||||
"description": "If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job.",
|
||||
"type": "integer"
|
||||
},
|
||||
"short-running-jobs-duration": {
|
||||
"description": "Do not show running jobs shorter than X seconds.",
|
||||
"type": "integer"
|
||||
},
|
||||
"": {
|
||||
"description": "",
|
||||
"type": "string"
|
||||
@@ -138,7 +142,7 @@
|
||||
"kind": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"influxdb-v2",
|
||||
"influxdb",
|
||||
"prometheus",
|
||||
"cc-metric-store",
|
||||
"test"
|
||||
@@ -241,10 +245,6 @@
|
||||
"description": "Jobs shown per page in job lists",
|
||||
"type": "integer"
|
||||
},
|
||||
"plot_list_hideShortRunningJobs": {
|
||||
"description": "Do not show running jobs shorter than X seconds",
|
||||
"type": "integer"
|
||||
},
|
||||
"plot_view_plotsPerRow": {
|
||||
"description": "Number of plots per row in single job view",
|
||||
"type": "integer"
|
||||
@@ -342,8 +342,7 @@
|
||||
"job_view_polarPlotMetrics",
|
||||
"job_view_selectedMetrics",
|
||||
"plot_general_colorscheme",
|
||||
"plot_list_selectedMetrics",
|
||||
"plot_list_hideShortRunningJobs"
|
||||
"plot_list_selectedMetrics"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
@@ -45,21 +45,22 @@ func Validate(k Kind, r io.Reader) (err error) {
|
||||
case Config:
|
||||
s, err = jsonschema.Compile("embedfs://config.schema.json")
|
||||
default:
|
||||
return fmt.Errorf("unkown schema kind ")
|
||||
return fmt.Errorf("SCHEMA/VALIDATE > unkown schema kind: %#v", k)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Error while compiling json schema for kind '%#v'", k)
|
||||
return err
|
||||
}
|
||||
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(r).Decode(&v); err != nil {
|
||||
log.Errorf("schema.Validate() - Failed to decode %v", err)
|
||||
log.Warnf("Error while decoding raw json schema: %#v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = s.Validate(v); err != nil {
|
||||
return fmt.Errorf("%#v", err)
|
||||
return fmt.Errorf("SCHEMA/VALIDATE > %#v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
Reference in New Issue
Block a user