Reformat and Refactor packages. Rebuild GraphQL.

This commit is contained in:
Jan Eitzinger
2022-09-07 12:24:45 +02:00
parent b7907d33aa
commit 8856f26fb0
30 changed files with 5728 additions and 3200 deletions

View File

@@ -8,7 +8,6 @@ import (
"encoding/json"
"fmt"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@@ -21,7 +20,7 @@ type ArchiveBackend interface {
// replaces previous loadFromArchive
LoadJobData(job *schema.Job) (schema.JobData, error)
LoadClusterCfg(name string) (model.Cluster, error)
LoadClusterCfg(name string) (schema.Cluster, error)
StoreMeta(jobMeta *schema.JobMeta) error
@@ -62,7 +61,11 @@ func GetHandle() ArchiveBackend {
}
// Helper to metricdata.LoadAverages().
func LoadAveragesFromArchive(job *schema.Job, metrics []string, data [][]schema.Float) error {
func LoadAveragesFromArchive(
job *schema.Job,
metrics []string,
data [][]schema.Float) error {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
return err
@@ -80,6 +83,7 @@ func LoadAveragesFromArchive(job *schema.Job, metrics []string, data [][]schema.
}
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
return nil, err
@@ -89,12 +93,14 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
}
func Import(job *schema.JobMeta, jobData *schema.JobData) error {
return ar.Import(job, jobData)
}
// If the job is archived, find its `meta.json` file and override the tags list
// in that JSON file. If the job is not archived, nothing is done.
func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
if job.State == schema.JobStateRunning {
return nil
}

View File

@@ -7,20 +7,18 @@ package archive
import (
"errors"
"fmt"
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
var cache *lrucache.Cache = lrucache.New(1024)
var Clusters []*model.Cluster
var Clusters []*schema.Cluster
var nodeLists map[string]map[string]NodeList
func initClusterConfig() error {
Clusters = []*model.Cluster{}
Clusters = []*schema.Cluster{}
nodeLists = map[string]map[string]NodeList{}
for _, c := range ar.GetClusters() {
@@ -30,7 +28,9 @@ func initClusterConfig() error {
return err
}
if len(cluster.Name) == 0 || len(cluster.MetricConfig) == 0 || len(cluster.SubClusters) == 0 {
if len(cluster.Name) == 0 ||
len(cluster.MetricConfig) == 0 ||
len(cluster.SubClusters) == 0 {
return errors.New("cluster.name, cluster.metricConfig and cluster.SubClusters should not be empty")
}
@@ -51,10 +51,6 @@ func initClusterConfig() error {
}
}
if cluster.FilterRanges.StartTime.To.IsZero() {
cluster.FilterRanges.StartTime.To = time.Unix(0, 0)
}
Clusters = append(Clusters, &cluster)
nodeLists[cluster.Name] = make(map[string]NodeList)
@@ -74,7 +70,8 @@ func initClusterConfig() error {
return nil
}
func GetCluster(cluster string) *model.Cluster {
func GetCluster(cluster string) *schema.Cluster {
for _, c := range Clusters {
if c.Name == cluster {
return c
@@ -83,7 +80,8 @@ func GetCluster(cluster string) *model.Cluster {
return nil
}
func GetSubCluster(cluster, subcluster string) *model.SubCluster {
func GetSubCluster(cluster, subcluster string) *schema.SubCluster {
for _, c := range Clusters {
if c.Name == cluster {
for _, p := range c.SubClusters {
@@ -96,7 +94,8 @@ func GetSubCluster(cluster, subcluster string) *model.SubCluster {
return nil
}
func GetMetricConfig(cluster, metric string) *model.MetricConfig {
func GetMetricConfig(cluster, metric string) *schema.MetricConfig {
for _, c := range Clusters {
if c.Name == cluster {
for _, m := range c.MetricConfig {
@@ -112,6 +111,7 @@ func GetMetricConfig(cluster, metric string) *model.MetricConfig {
// AssignSubCluster sets the `job.subcluster` property of the job based
// on its cluster and resources.
func AssignSubCluster(job *schema.BaseJob) error {
cluster := GetCluster(job.Cluster)
if cluster == nil {
return fmt.Errorf("unkown cluster: %#v", job.Cluster)
@@ -147,6 +147,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
}
func GetSubClusterByNode(cluster, hostname string) (string, error) {
for sc, nl := range nodeLists[cluster] {
if nl != nil && nl.Contains(hostname) {
return sc, nil

View File

@@ -14,7 +14,6 @@ import (
"strconv"
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@@ -30,7 +29,11 @@ type FsArchive struct {
// For a given job, return the path of the `data.json`/`meta.json` file.
// TODO: Implement Issue ClusterCockpit/ClusterCockpit#97
func getPath(job *schema.Job, rootPath string, file string) string {
func getPath(
job *schema.Job,
rootPath string,
file string) string {
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
return filepath.Join(
rootPath,
@@ -51,6 +54,7 @@ func loadJobMeta(filename string) (schema.JobMeta, error) {
}
func (fsa *FsArchive) Init(rawConfig json.RawMessage) error {
var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
fmt.Errorf("fsBackend Init()- %w", err)
@@ -72,6 +76,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) error {
}
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
filename := getPath(job, fsa.path, "data.json")
f, err := os.Open(filename)
@@ -84,6 +89,7 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
}
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (schema.JobMeta, error) {
filename := getPath(job, fsa.path, "meta.json")
f, err := os.Open(filename)
@@ -95,10 +101,11 @@ func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (schema.JobMeta, error) {
return DecodeJobMeta(bufio.NewReader(f))
}
func (fsa *FsArchive) LoadClusterCfg(name string) (model.Cluster, error) {
func (fsa *FsArchive) LoadClusterCfg(name string) (schema.Cluster, error) {
f, err := os.Open(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil {
return model.Cluster{}, fmt.Errorf("fsBackend LoadClusterCfg()- Cannot open %s: %w", name, err)
return schema.Cluster{}, fmt.Errorf("fsBackend LoadClusterCfg()- Cannot open %s: %w", name, err)
}
defer f.Close()
@@ -106,6 +113,7 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (model.Cluster, error) {
}
func (fsa *FsArchive) Iter() <-chan *schema.JobMeta {
ch := make(chan *schema.JobMeta)
go func() {
clustersDir, err := os.ReadDir(fsa.path)
@@ -179,10 +187,13 @@ func (fsa *FsArchive) StoreMeta(jobMeta *schema.JobMeta) error {
}
func (fsa *FsArchive) GetClusters() []string {
return fsa.clusters
}
func (fsa *FsArchive) Import(jobMeta *schema.JobMeta, jobData *schema.JobData) error {
func (fsa *FsArchive) Import(
jobMeta *schema.JobMeta,
jobData *schema.JobData) error {
job := schema.Job{
BaseJob: jobMeta.BaseJob,

View File

@@ -8,7 +8,6 @@ import (
"encoding/json"
"io"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@@ -34,8 +33,8 @@ func DecodeJobMeta(r io.Reader) (schema.JobMeta, error) {
return d, nil
}
func DecodeCluster(r io.Reader) (model.Cluster, error) {
var c model.Cluster
func DecodeCluster(r io.Reader) (schema.Cluster, error) {
var c schema.Cluster
if err := json.NewDecoder(r).Decode(&c); err != nil {
return c, err
}

View File

@@ -1,45 +0,0 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
import (
"fmt"
"io"
"github.com/santhosh-tekuri/jsonschema"
)
type Kind int
const (
Meta Kind = iota + 1
Data
Cluster
)
func Validate(k Kind, v io.Reader) (err error) {
var s *jsonschema.Schema
switch k {
case Meta:
s, err = jsonschema.Compile("https://raw.githubusercontent.com/ClusterCockpit/cc-specifications/master/datastructures/job-meta.schema.json")
case Data:
s, err = jsonschema.Compile("https://raw.githubusercontent.com/ClusterCockpit/cc-specifications/master/datastructures/job-data.schema.json")
case Cluster:
s, err = jsonschema.Compile("https://raw.githubusercontent.com/ClusterCockpit/cc-specifications/master/datastructures/cluster.schema.json")
default:
return fmt.Errorf("unkown schema kind ")
}
if err != nil {
return err
}
if err = s.Validate(v); err != nil {
return err
}
return nil
}