List of slurm partitions via GraphQL

This commit is contained in:
Lou Knauer 2022-03-14 10:24:27 +01:00
parent 85ad6d9543
commit 839db9fdae
4 changed files with 75 additions and 21 deletions

View File

@ -61,6 +61,10 @@ models:
resolver: true resolver: true
metaData: metaData:
resolver: true resolver: true
Cluster:
fields:
partitions:
resolver: true
NullableFloat: { model: "github.com/ClusterCockpit/cc-backend/schema.Float" } NullableFloat: { model: "github.com/ClusterCockpit/cc-backend/schema.Float" }
MetricScope: { model: "github.com/ClusterCockpit/cc-backend/schema.MetricScope" } MetricScope: { model: "github.com/ClusterCockpit/cc-backend/schema.MetricScope" }
JobStatistics: { model: "github.com/ClusterCockpit/cc-backend/schema.JobStatistics" } JobStatistics: { model: "github.com/ClusterCockpit/cc-backend/schema.JobStatistics" }

View File

@ -57,6 +57,7 @@ type ComplexityRoot struct {
FilterRanges func(childComplexity int) int FilterRanges func(childComplexity int) int
MetricConfig func(childComplexity int) int MetricConfig func(childComplexity int) int
Name func(childComplexity int) int Name func(childComplexity int) int
Partitions func(childComplexity int) int
SubClusters func(childComplexity int) int SubClusters func(childComplexity int) int
} }
@ -238,7 +239,7 @@ type ComplexityRoot struct {
} }
type ClusterResolver interface { type ClusterResolver interface {
SubClusters(ctx context.Context, obj *model.Cluster) ([]*model.SubCluster, error) Partitions(ctx context.Context, obj *model.Cluster) ([]string, error)
} }
type JobResolver interface { type JobResolver interface {
MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error)
@ -321,6 +322,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Cluster.Name(childComplexity), true return e.complexity.Cluster.Name(childComplexity), true
case "Cluster.partitions":
if e.complexity.Cluster.Partitions == nil {
break
}
return e.complexity.Cluster.Partitions(childComplexity), true
case "Cluster.subClusters": case "Cluster.subClusters":
if e.complexity.Cluster.SubClusters == nil { if e.complexity.Cluster.SubClusters == nil {
break break
@ -1260,9 +1268,10 @@ type Job {
type Cluster { type Cluster {
name: String! name: String!
partitions: [String!]! # Slurm partitions
metricConfig: [MetricConfig!]! metricConfig: [MetricConfig!]!
filterRanges: FilterRanges! filterRanges: FilterRanges!
subClusters: [SubCluster!]! subClusters: [SubCluster!]! # Hardware partitions/subclusters
} }
type SubCluster { type SubCluster {
@ -2084,6 +2093,41 @@ func (ec *executionContext) _Cluster_name(ctx context.Context, field graphql.Col
return ec.marshalNString2string(ctx, field.Selections, res) return ec.marshalNString2string(ctx, field.Selections, res)
} }
func (ec *executionContext) _Cluster_partitions(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Cluster",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Cluster().Partitions(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Cluster_metricConfig(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { func (ec *executionContext) _Cluster_metricConfig(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
@ -2165,14 +2209,14 @@ func (ec *executionContext) _Cluster_subClusters(ctx context.Context, field grap
Object: "Cluster", Object: "Cluster",
Field: field, Field: field,
Args: nil, Args: nil,
IsMethod: true, IsMethod: false,
IsResolver: true, IsResolver: false,
} }
ctx = graphql.WithFieldContext(ctx, fc) ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children ctx = rctx // use context from middleware stack in children
return ec.resolvers.Cluster().SubClusters(rctx, obj) return obj.SubClusters, nil
}) })
if err != nil { if err != nil {
ec.Error(ctx, err) ec.Error(ctx, err)
@ -7636,6 +7680,20 @@ func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet,
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1) atomic.AddUint32(&invalids, 1)
} }
case "partitions":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Cluster_partitions(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "metricConfig": case "metricConfig":
out.Values[i] = ec._Cluster_metricConfig(ctx, field, obj) out.Values[i] = ec._Cluster_metricConfig(ctx, field, obj)
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
@ -7647,19 +7705,10 @@ func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet,
atomic.AddUint32(&invalids, 1) atomic.AddUint32(&invalids, 1)
} }
case "subClusters": case "subClusters":
field := field out.Values[i] = ec._Cluster_subClusters(ctx, field, obj)
out.Concurrently(i, func() (res graphql.Marshaler) { if out.Values[i] == graphql.Null {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Cluster_subClusters(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1) atomic.AddUint32(&invalids, 1)
} }
return res
})
default: default:
panic("unknown field " + strconv.Quote(field.Name)) panic("unknown field " + strconv.Quote(field.Name))
} }

View File

@ -31,9 +31,10 @@ type Job {
type Cluster { type Cluster {
name: String! name: String!
partitions: [String!]! # Slurm partitions
metricConfig: [MetricConfig!]! metricConfig: [MetricConfig!]!
filterRanges: FilterRanges! filterRanges: FilterRanges!
subClusters: [SubCluster!]! subClusters: [SubCluster!]! # Hardware partitions/subclusters
} }
type SubCluster { type SubCluster {

View File

@ -18,8 +18,8 @@ import (
"github.com/ClusterCockpit/cc-backend/schema" "github.com/ClusterCockpit/cc-backend/schema"
) )
func (r *clusterResolver) SubClusters(ctx context.Context, obj *model.Cluster) ([]*model.SubCluster, error) { func (r *clusterResolver) Partitions(ctx context.Context, obj *model.Cluster) ([]string, error) {
panic(fmt.Errorf("not implemented")) return r.Repo.Partitions(obj.Name)
} }
func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) { func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) {