Reformat and Refactor packages. Rebuild GraphQL.

This commit is contained in:
Jan Eitzinger
2022-09-07 12:24:45 +02:00
parent b7907d33aa
commit 8856f26fb0
30 changed files with 5728 additions and 3200 deletions

View File

@@ -8,7 +8,6 @@ import (
"encoding/json"
"fmt"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@@ -21,7 +20,7 @@ type ArchiveBackend interface {
// replaces previous loadFromArchive
LoadJobData(job *schema.Job) (schema.JobData, error)
LoadClusterCfg(name string) (model.Cluster, error)
LoadClusterCfg(name string) (schema.Cluster, error)
StoreMeta(jobMeta *schema.JobMeta) error
@@ -62,7 +61,11 @@ func GetHandle() ArchiveBackend {
}
// Helper to metricdata.LoadAverages().
func LoadAveragesFromArchive(job *schema.Job, metrics []string, data [][]schema.Float) error {
func LoadAveragesFromArchive(
job *schema.Job,
metrics []string,
data [][]schema.Float) error {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
return err
@@ -80,6 +83,7 @@ func LoadAveragesFromArchive(job *schema.Job, metrics []string, data [][]schema.
}
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
return nil, err
@@ -89,12 +93,14 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
}
func Import(job *schema.JobMeta, jobData *schema.JobData) error {
return ar.Import(job, jobData)
}
// If the job is archived, find its `meta.json` file and override the tags list
// in that JSON file. If the job is not archived, nothing is done.
func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
if job.State == schema.JobStateRunning {
return nil
}

View File

@@ -7,20 +7,18 @@ package archive
import (
"errors"
"fmt"
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
var cache *lrucache.Cache = lrucache.New(1024)
var Clusters []*model.Cluster
var Clusters []*schema.Cluster
var nodeLists map[string]map[string]NodeList
func initClusterConfig() error {
Clusters = []*model.Cluster{}
Clusters = []*schema.Cluster{}
nodeLists = map[string]map[string]NodeList{}
for _, c := range ar.GetClusters() {
@@ -30,7 +28,9 @@ func initClusterConfig() error {
return err
}
if len(cluster.Name) == 0 || len(cluster.MetricConfig) == 0 || len(cluster.SubClusters) == 0 {
if len(cluster.Name) == 0 ||
len(cluster.MetricConfig) == 0 ||
len(cluster.SubClusters) == 0 {
return errors.New("cluster.name, cluster.metricConfig and cluster.SubClusters should not be empty")
}
@@ -51,10 +51,6 @@ func initClusterConfig() error {
}
}
if cluster.FilterRanges.StartTime.To.IsZero() {
cluster.FilterRanges.StartTime.To = time.Unix(0, 0)
}
Clusters = append(Clusters, &cluster)
nodeLists[cluster.Name] = make(map[string]NodeList)
@@ -74,7 +70,8 @@ func initClusterConfig() error {
return nil
}
func GetCluster(cluster string) *model.Cluster {
func GetCluster(cluster string) *schema.Cluster {
for _, c := range Clusters {
if c.Name == cluster {
return c
@@ -83,7 +80,8 @@ func GetCluster(cluster string) *model.Cluster {
return nil
}
func GetSubCluster(cluster, subcluster string) *model.SubCluster {
func GetSubCluster(cluster, subcluster string) *schema.SubCluster {
for _, c := range Clusters {
if c.Name == cluster {
for _, p := range c.SubClusters {
@@ -96,7 +94,8 @@ func GetSubCluster(cluster, subcluster string) *model.SubCluster {
return nil
}
func GetMetricConfig(cluster, metric string) *model.MetricConfig {
func GetMetricConfig(cluster, metric string) *schema.MetricConfig {
for _, c := range Clusters {
if c.Name == cluster {
for _, m := range c.MetricConfig {
@@ -112,6 +111,7 @@ func GetMetricConfig(cluster, metric string) *model.MetricConfig {
// AssignSubCluster sets the `job.subcluster` property of the job based
// on its cluster and resources.
func AssignSubCluster(job *schema.BaseJob) error {
cluster := GetCluster(job.Cluster)
if cluster == nil {
return fmt.Errorf("unkown cluster: %#v", job.Cluster)
@@ -147,6 +147,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
}
func GetSubClusterByNode(cluster, hostname string) (string, error) {
for sc, nl := range nodeLists[cluster] {
if nl != nil && nl.Contains(hostname) {
return sc, nil

View File

@@ -14,7 +14,6 @@ import (
"strconv"
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@@ -30,7 +29,11 @@ type FsArchive struct {
// For a given job, return the path of the `data.json`/`meta.json` file.
// TODO: Implement Issue ClusterCockpit/ClusterCockpit#97
func getPath(job *schema.Job, rootPath string, file string) string {
func getPath(
job *schema.Job,
rootPath string,
file string) string {
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
return filepath.Join(
rootPath,
@@ -51,6 +54,7 @@ func loadJobMeta(filename string) (schema.JobMeta, error) {
}
func (fsa *FsArchive) Init(rawConfig json.RawMessage) error {
var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
fmt.Errorf("fsBackend Init()- %w", err)
@@ -72,6 +76,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) error {
}
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
filename := getPath(job, fsa.path, "data.json")
f, err := os.Open(filename)
@@ -84,6 +89,7 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
}
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (schema.JobMeta, error) {
filename := getPath(job, fsa.path, "meta.json")
f, err := os.Open(filename)
@@ -95,10 +101,11 @@ func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (schema.JobMeta, error) {
return DecodeJobMeta(bufio.NewReader(f))
}
func (fsa *FsArchive) LoadClusterCfg(name string) (model.Cluster, error) {
func (fsa *FsArchive) LoadClusterCfg(name string) (schema.Cluster, error) {
f, err := os.Open(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil {
return model.Cluster{}, fmt.Errorf("fsBackend LoadClusterCfg()- Cannot open %s: %w", name, err)
return schema.Cluster{}, fmt.Errorf("fsBackend LoadClusterCfg()- Cannot open %s: %w", name, err)
}
defer f.Close()
@@ -106,6 +113,7 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (model.Cluster, error) {
}
func (fsa *FsArchive) Iter() <-chan *schema.JobMeta {
ch := make(chan *schema.JobMeta)
go func() {
clustersDir, err := os.ReadDir(fsa.path)
@@ -179,10 +187,13 @@ func (fsa *FsArchive) StoreMeta(jobMeta *schema.JobMeta) error {
}
func (fsa *FsArchive) GetClusters() []string {
return fsa.clusters
}
func (fsa *FsArchive) Import(jobMeta *schema.JobMeta, jobData *schema.JobData) error {
func (fsa *FsArchive) Import(
jobMeta *schema.JobMeta,
jobData *schema.JobData) error {
job := schema.Job{
BaseJob: jobMeta.BaseJob,

View File

@@ -8,7 +8,6 @@ import (
"encoding/json"
"io"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@@ -34,8 +33,8 @@ func DecodeJobMeta(r io.Reader) (schema.JobMeta, error) {
return d, nil
}
func DecodeCluster(r io.Reader) (model.Cluster, error) {
var c model.Cluster
func DecodeCluster(r io.Reader) (schema.Cluster, error) {
var c schema.Cluster
if err := json.NewDecoder(r).Decode(&c); err != nil {
return c, err
}

174
pkg/schema/cluster.go Normal file
View File

@@ -0,0 +1,174 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package schema
import "strconv"
type Accelerator struct {
ID string `json:"id"`
Type string `json:"type"`
Model string `json:"model"`
}
type Topology struct {
Node []int `json:"node"`
Socket [][]int `json:"socket"`
MemoryDomain [][]int `json:"memoryDomain"`
Die [][]int `json:"die"`
Core [][]int `json:"core"`
Accelerators []*Accelerator `json:"accelerators"`
}
type SubCluster struct {
Name string `json:"name"`
Nodes string `json:"nodes"`
NumberOfNodes int `json:"numberOfNodes"`
ProcessorType string `json:"processorType"`
SocketsPerNode int `json:"socketsPerNode"`
CoresPerSocket int `json:"coresPerSocket"`
ThreadsPerCore int `json:"threadsPerCore"`
FlopRateScalar int `json:"flopRateScalar"`
FlopRateSimd int `json:"flopRateSimd"`
MemoryBandwidth int `json:"memoryBandwidth"`
Topology *Topology `json:"topology"`
}
type SubClusterConfig struct {
Name string `json:"name"`
Peak float64 `json:"peak"`
Normal float64 `json:"normal"`
Caution float64 `json:"caution"`
Alert float64 `json:"alert"`
}
type MetricConfig struct {
Name string `json:"name"`
Unit string `json:"unit"`
Scope MetricScope `json:"scope"`
Aggregation *string `json:"aggregation"`
Timestep int `json:"timestep"`
Peak *float64 `json:"peak"`
Normal *float64 `json:"normal"`
Caution *float64 `json:"caution"`
Alert *float64 `json:"alert"`
SubClusters []*SubClusterConfig `json:"subClusters"`
}
type Cluster struct {
Name string `json:"name"`
MetricConfig []*MetricConfig `json:"metricConfig"`
SubClusters []*SubCluster `json:"subClusters"`
}
// Return a list of socket IDs given a list of hwthread IDs. Even if just one
// hwthread is in that socket, add it to the list. If no hwthreads other than
// those in the argument list are assigned to one of the sockets in the first
// return value, return true as the second value. TODO: Optimize this, there
// must be a more efficient way/algorithm.
func (topo *Topology) GetSocketsFromHWThreads(
hwthreads []int) (sockets []int, exclusive bool) {
socketsMap := map[int]int{}
for _, hwthread := range hwthreads {
for socket, hwthreadsInSocket := range topo.Socket {
for _, hwthreadInSocket := range hwthreadsInSocket {
if hwthread == hwthreadInSocket {
socketsMap[socket] += 1
}
}
}
}
exclusive = true
hwthreadsPerSocket := len(topo.Node) / len(topo.Socket)
sockets = make([]int, 0, len(socketsMap))
for socket, count := range socketsMap {
sockets = append(sockets, socket)
exclusive = exclusive && count == hwthreadsPerSocket
}
return sockets, exclusive
}
// Return a list of core IDs given a list of hwthread IDs. Even if just one
// hwthread is in that core, add it to the list. If no hwthreads other than
// those in the argument list are assigned to one of the cores in the first
// return value, return true as the second value. TODO: Optimize this, there
// must be a more efficient way/algorithm.
func (topo *Topology) GetCoresFromHWThreads(
hwthreads []int) (cores []int, exclusive bool) {
coresMap := map[int]int{}
for _, hwthread := range hwthreads {
for core, hwthreadsInCore := range topo.Core {
for _, hwthreadInCore := range hwthreadsInCore {
if hwthread == hwthreadInCore {
coresMap[core] += 1
}
}
}
}
exclusive = true
hwthreadsPerCore := len(topo.Node) / len(topo.Core)
cores = make([]int, 0, len(coresMap))
for core, count := range coresMap {
cores = append(cores, core)
exclusive = exclusive && count == hwthreadsPerCore
}
return cores, exclusive
}
// Return a list of memory domain IDs given a list of hwthread IDs. Even if
// just one hwthread is in that memory domain, add it to the list. If no
// hwthreads other than those in the argument list are assigned to one of the
// memory domains in the first return value, return true as the second value.
// TODO: Optimize this, there must be a more efficient way/algorithm.
func (topo *Topology) GetMemoryDomainsFromHWThreads(
hwthreads []int) (memDoms []int, exclusive bool) {
memDomsMap := map[int]int{}
for _, hwthread := range hwthreads {
for memDom, hwthreadsInmemDom := range topo.MemoryDomain {
for _, hwthreadInmemDom := range hwthreadsInmemDom {
if hwthread == hwthreadInmemDom {
memDomsMap[memDom] += 1
}
}
}
}
exclusive = true
hwthreadsPermemDom := len(topo.Node) / len(topo.MemoryDomain)
memDoms = make([]int, 0, len(memDomsMap))
for memDom, count := range memDomsMap {
memDoms = append(memDoms, memDom)
exclusive = exclusive && count == hwthreadsPermemDom
}
return memDoms, exclusive
}
func (topo *Topology) GetAcceleratorIDs() ([]int, error) {
accels := make([]int, 0)
for _, accel := range topo.Accelerators {
id, err := strconv.Atoi(accel.ID)
if err != nil {
return nil, err
}
accels = append(accels, id)
}
return accels, nil
}
func (topo *Topology) GetAcceleratorIndex(id string) (int, bool) {
for idx, accel := range topo.Accelerators {
if accel.ID == id {
return idx, true
}
}
return -1, false
}

108
pkg/schema/config.go Normal file
View File

@@ -0,0 +1,108 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package schema
import (
"encoding/json"
"time"
)
type LdapConfig struct {
Url string `json:"url"`
UserBase string `json:"user_base"`
SearchDN string `json:"search_dn"`
UserBind string `json:"user_bind"`
UserFilter string `json:"user_filter"`
SyncInterval string `json:"sync_interval"` // Parsed using time.ParseDuration.
SyncDelOldUsers bool `json:"sync_del_old_users"`
}
type JWTAuthConfig struct {
// Specifies for how long a session or JWT shall be valid
// as a string parsable by time.ParseDuration().
MaxAge int64 `json:"max-age"`
}
type IntRange struct {
From int `json:"from"`
To int `json:"to"`
}
type TimeRange struct {
From *time.Time `json:"from"`
To *time.Time `json:"to"`
}
type FilterRanges struct {
Duration *IntRange `json:"duration"`
NumNodes *IntRange `json:"numNodes"`
StartTime *TimeRange `json:"startTime"`
}
type ClusterConfig struct {
Name string `json:"name"`
FilterRanges *FilterRanges `json:"filterRanges"`
MetricDataRepository json.RawMessage `json:"metricDataRepository"`
}
// Format of the configuration (file). See below for the defaults.
type ProgramConfig struct {
// Address where the http (or https) server will listen on (for example: 'localhost:80').
Addr string `json:"addr"`
// Drop root permissions once .env was read and the port was taken.
User string `json:"user"`
Group string `json:"group"`
// Disable authentication (for everything: API, Web-UI, ...)
DisableAuthentication bool `json:"disable-authentication"`
// If `embed-static-files` is true (default), the frontend files are directly
// embeded into the go binary and expected to be in web/frontend. Only if
// it is false the files in `static-files` are served instead.
EmbedStaticFiles bool `json:"embed-static-files"`
StaticFiles string `json:"static-files"`
// 'sqlite3' or 'mysql' (mysql will work for mariadb as well)
DBDriver string `json:"db-driver"`
// For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).
DB string `json:"db"`
// Config for job archive
Archive json.RawMessage `json:"archive"`
// Keep all metric data in the metric data repositories,
// do not write to the job-archive.
DisableArchive bool `json:"disable-archive"`
// For LDAP Authentication and user synchronisation.
LdapConfig *LdapConfig `json:"ldap"`
JwtConfig *JWTAuthConfig `json:"jwts"`
// If 0 or empty, the session/token does not expire!
SessionMaxAge string `json:"session-max-age"`
// If both those options are not empty, use HTTPS using those certificates.
HttpsCertFile string `json:"https-cert-file"`
HttpsKeyFile string `json:"https-key-file"`
// If not the empty string and `addr` does not end in ":80",
// redirect every request incoming at port 80 to that url.
RedirectHttpTo string `json:"redirect-http-to"`
// If overwriten, at least all the options in the defaults below must
// be provided! Most options here can be overwritten by the user.
UiDefaults map[string]interface{} `json:"ui-defaults"`
// Where to store MachineState files
MachineStateDir string `json:"machine-state-dir"`
// If not zero, automatically mark jobs as stopped running X seconds longer than their walltime.
StopJobsExceedingWalltime int `json:"stop-jobs-exceeding-walltime"`
// Array of Clusters
Clusters []*ClusterConfig `json:"clusters"`
}

View File

@@ -2,7 +2,7 @@
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
package schema
import (
"fmt"
@@ -16,7 +16,7 @@ type Kind int
const (
Meta Kind = iota + 1
Data
Cluster
ClusterCfg
)
func Validate(k Kind, v io.Reader) (err error) {
@@ -27,7 +27,7 @@ func Validate(k Kind, v io.Reader) (err error) {
s, err = jsonschema.Compile("https://raw.githubusercontent.com/ClusterCockpit/cc-specifications/master/datastructures/job-meta.schema.json")
case Data:
s, err = jsonschema.Compile("https://raw.githubusercontent.com/ClusterCockpit/cc-specifications/master/datastructures/job-data.schema.json")
case Cluster:
case ClusterCfg:
s, err = jsonschema.Compile("https://raw.githubusercontent.com/ClusterCockpit/cc-specifications/master/datastructures/cluster.schema.json")
default:
return fmt.Errorf("unkown schema kind ")