Add global metric list including graphQL query

This commit is contained in:
Jan Eitzinger 2024-07-11 11:09:14 +02:00
parent bf6b87d65c
commit f1427d5272
Signed by: moebiusband
GPG Key ID: 2574BA29B90D6DD5
9 changed files with 770 additions and 50 deletions

View File

@ -178,6 +178,18 @@ type NodeMetrics {
metrics: [JobMetricWithName!]! metrics: [JobMetricWithName!]!
} }
type ClusterSupport {
cluster: String!
subClusters: [String!]!
}
type GlobalMetricListItem {
name: String!
unit: Unit!
scope: MetricScope!
availability: [ClusterSupport!]!
}
type Count { type Count {
name: String! name: String!
count: Int! count: Int!
@ -192,6 +204,7 @@ type User {
type Query { type Query {
clusters: [Cluster!]! # List of all clusters clusters: [Cluster!]! # List of all clusters
tags: [Tag!]! # List of all tags tags: [Tag!]! # List of all tags
globalMetrics: [GlobalMetricListItem!]!
user(username: String!): User user(username: String!): User
allocatedNodes(cluster: String!): [Count!]! allocatedNodes(cluster: String!): [Count!]!

1
go.mod
View File

@ -7,7 +7,6 @@ require (
github.com/ClusterCockpit/cc-units v0.4.0 github.com/ClusterCockpit/cc-units v0.4.0
github.com/Masterminds/squirrel v1.5.3 github.com/Masterminds/squirrel v1.5.3
github.com/coreos/go-oidc/v3 v3.9.0 github.com/coreos/go-oidc/v3 v3.9.0
github.com/davecgh/go-spew v1.1.1
github.com/go-co-op/gocron v1.25.0 github.com/go-co-op/gocron v1.25.0
github.com/go-ldap/ldap/v3 v3.4.4 github.com/go-ldap/ldap/v3 v3.4.4
github.com/go-sql-driver/mysql v1.7.0 github.com/go-sql-driver/mysql v1.7.0

View File

@ -61,23 +61,50 @@ models:
fields: fields:
partitions: partitions:
resolver: true resolver: true
NullableFloat: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Float" } NullableFloat:
MetricScope: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricScope" } { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Float" }
MetricValue: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricValue" } MetricScope:
JobStatistics: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobStatistics" } { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricScope" }
MetricValue:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricValue" }
JobStatistics:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobStatistics" }
GlobalMetricListItem:
{
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.GlobalMetricListItem",
}
ClusterSupport:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.ClusterSupport" }
Tag: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Tag" } Tag: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Tag" }
Resource: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" } Resource:
JobState: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" } { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" }
TimeRange: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.TimeRange" } JobState:
IntRange: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.IntRange" } { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" }
JobMetric: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" } TimeRange:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.TimeRange" }
IntRange:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.IntRange" }
JobMetric:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" }
Series: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Series" } Series: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Series" }
MetricStatistics: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics" } MetricStatistics:
MetricConfig: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricConfig" } {
SubClusterConfig: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubClusterConfig" } model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics",
Accelerator: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Accelerator" } }
Topology: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Topology" } MetricConfig:
FilterRanges: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.FilterRanges" } { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricConfig" }
SubCluster: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubCluster" } SubClusterConfig:
StatsSeries: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" } {
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubClusterConfig",
}
Accelerator:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Accelerator" }
Topology:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Topology" }
FilterRanges:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.FilterRanges" }
SubCluster:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubCluster" }
StatsSeries:
{ model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" }
Unit: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Unit" } Unit: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Unit" }

View File

@ -64,6 +64,11 @@ type ComplexityRoot struct {
SubClusters func(childComplexity int) int SubClusters func(childComplexity int) int
} }
ClusterSupport struct {
Cluster func(childComplexity int) int
SubClusters func(childComplexity int) int
}
Count struct { Count struct {
Count func(childComplexity int) int Count func(childComplexity int) int
Name func(childComplexity int) int Name func(childComplexity int) int
@ -74,6 +79,13 @@ type ComplexityRoot struct {
TimeWeights func(childComplexity int) int TimeWeights func(childComplexity int) int
} }
GlobalMetricListItem struct {
Availability func(childComplexity int) int
Name func(childComplexity int) int
Scope func(childComplexity int) int
Unit func(childComplexity int) int
}
HistoPoint struct { HistoPoint struct {
Count func(childComplexity int) int Count func(childComplexity int) int
Value func(childComplexity int) int Value func(childComplexity int) int
@ -223,6 +235,7 @@ type ComplexityRoot struct {
Query struct { Query struct {
AllocatedNodes func(childComplexity int, cluster string) int AllocatedNodes func(childComplexity int, cluster string) int
Clusters func(childComplexity int) int Clusters func(childComplexity int) int
GlobalMetrics func(childComplexity int) int
Job func(childComplexity int, id string) int Job func(childComplexity int, id string) int
JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int
Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int
@ -342,6 +355,7 @@ type MutationResolver interface {
type QueryResolver interface { type QueryResolver interface {
Clusters(ctx context.Context) ([]*schema.Cluster, error) Clusters(ctx context.Context) ([]*schema.Cluster, error)
Tags(ctx context.Context) ([]*schema.Tag, error) Tags(ctx context.Context) ([]*schema.Tag, error)
GlobalMetrics(ctx context.Context) ([]*schema.GlobalMetricListItem, error)
User(ctx context.Context, username string) (*model.User, error) User(ctx context.Context, username string) (*model.User, error)
AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error)
Job(ctx context.Context, id string) (*schema.Job, error) Job(ctx context.Context, id string) (*schema.Job, error)
@ -417,6 +431,20 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Cluster.SubClusters(childComplexity), true return e.complexity.Cluster.SubClusters(childComplexity), true
case "ClusterSupport.cluster":
if e.complexity.ClusterSupport.Cluster == nil {
break
}
return e.complexity.ClusterSupport.Cluster(childComplexity), true
case "ClusterSupport.subClusters":
if e.complexity.ClusterSupport.SubClusters == nil {
break
}
return e.complexity.ClusterSupport.SubClusters(childComplexity), true
case "Count.count": case "Count.count":
if e.complexity.Count.Count == nil { if e.complexity.Count.Count == nil {
break break
@ -445,6 +473,34 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Footprints.TimeWeights(childComplexity), true return e.complexity.Footprints.TimeWeights(childComplexity), true
case "GlobalMetricListItem.availability":
if e.complexity.GlobalMetricListItem.Availability == nil {
break
}
return e.complexity.GlobalMetricListItem.Availability(childComplexity), true
case "GlobalMetricListItem.name":
if e.complexity.GlobalMetricListItem.Name == nil {
break
}
return e.complexity.GlobalMetricListItem.Name(childComplexity), true
case "GlobalMetricListItem.scope":
if e.complexity.GlobalMetricListItem.Scope == nil {
break
}
return e.complexity.GlobalMetricListItem.Scope(childComplexity), true
case "GlobalMetricListItem.unit":
if e.complexity.GlobalMetricListItem.Unit == nil {
break
}
return e.complexity.GlobalMetricListItem.Unit(childComplexity), true
case "HistoPoint.count": case "HistoPoint.count":
if e.complexity.HistoPoint.Count == nil { if e.complexity.HistoPoint.Count == nil {
break break
@ -1154,6 +1210,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Query.Clusters(childComplexity), true return e.complexity.Query.Clusters(childComplexity), true
case "Query.globalMetrics":
if e.complexity.Query.GlobalMetrics == nil {
break
}
return e.complexity.Query.GlobalMetrics(childComplexity), true
case "Query.job": case "Query.job":
if e.complexity.Query.Job == nil { if e.complexity.Query.Job == nil {
break break
@ -1899,6 +1962,18 @@ type NodeMetrics {
metrics: [JobMetricWithName!]! metrics: [JobMetricWithName!]!
} }
type ClusterSupport {
cluster: String!
subClusters: [String!]!
}
type GlobalMetricListItem {
name: String!
unit: Unit!
scope: MetricScope!
availability: [ClusterSupport!]!
}
type Count { type Count {
name: String! name: String!
count: Int! count: Int!
@ -1913,6 +1988,7 @@ type User {
type Query { type Query {
clusters: [Cluster!]! # List of all clusters clusters: [Cluster!]! # List of all clusters
tags: [Tag!]! # List of all tags tags: [Tag!]! # List of all tags
globalMetrics: [GlobalMetricListItem!]!
user(username: String!): User user(username: String!): User
allocatedNodes(cluster: String!): [Count!]! allocatedNodes(cluster: String!): [Count!]!
@ -2827,6 +2903,94 @@ func (ec *executionContext) fieldContext_Cluster_subClusters(ctx context.Context
return fc, nil return fc, nil
} }
func (ec *executionContext) _ClusterSupport_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.ClusterSupport) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_ClusterSupport_cluster(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Cluster, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_ClusterSupport_cluster(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "ClusterSupport",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type String does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _ClusterSupport_subClusters(ctx context.Context, field graphql.CollectedField, obj *schema.ClusterSupport) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_ClusterSupport_subClusters(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SubClusters, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_ClusterSupport_subClusters(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "ClusterSupport",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type String does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _Count_name(ctx context.Context, field graphql.CollectedField, obj *model.Count) (ret graphql.Marshaler) { func (ec *executionContext) _Count_name(ctx context.Context, field graphql.CollectedField, obj *model.Count) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Count_name(ctx, field) fc, err := ec.fieldContext_Count_name(ctx, field)
if err != nil { if err != nil {
@ -3017,6 +3181,194 @@ func (ec *executionContext) fieldContext_Footprints_metrics(ctx context.Context,
return fc, nil return fc, nil
} }
func (ec *executionContext) _GlobalMetricListItem_name(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_GlobalMetricListItem_name(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_GlobalMetricListItem_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "GlobalMetricListItem",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type String does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _GlobalMetricListItem_unit(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_GlobalMetricListItem_unit(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Unit, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(schema.Unit)
fc.Result = res
return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_GlobalMetricListItem_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "GlobalMetricListItem",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
case "base":
return ec.fieldContext_Unit_base(ctx, field)
case "prefix":
return ec.fieldContext_Unit_prefix(ctx, field)
}
return nil, fmt.Errorf("no field named %q was found under type Unit", field.Name)
},
}
return fc, nil
}
func (ec *executionContext) _GlobalMetricListItem_scope(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_GlobalMetricListItem_scope(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Scope, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(schema.MetricScope)
fc.Result = res
return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_GlobalMetricListItem_scope(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "GlobalMetricListItem",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type MetricScope does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _GlobalMetricListItem_availability(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_GlobalMetricListItem_availability(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Availability, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]schema.ClusterSupport)
fc.Result = res
return ec.marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_GlobalMetricListItem_availability(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "GlobalMetricListItem",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
case "cluster":
return ec.fieldContext_ClusterSupport_cluster(ctx, field)
case "subClusters":
return ec.fieldContext_ClusterSupport_subClusters(ctx, field)
}
return nil, fmt.Errorf("no field named %q was found under type ClusterSupport", field.Name)
},
}
return fc, nil
}
func (ec *executionContext) _HistoPoint_count(ctx context.Context, field graphql.CollectedField, obj *model.HistoPoint) (ret graphql.Marshaler) { func (ec *executionContext) _HistoPoint_count(ctx context.Context, field graphql.CollectedField, obj *model.HistoPoint) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_HistoPoint_count(ctx, field) fc, err := ec.fieldContext_HistoPoint_count(ctx, field)
if err != nil { if err != nil {
@ -7522,6 +7874,60 @@ func (ec *executionContext) fieldContext_Query_tags(ctx context.Context, field g
return fc, nil return fc, nil
} }
func (ec *executionContext) _Query_globalMetrics(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Query_globalMetrics(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().GlobalMetrics(rctx)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]*schema.GlobalMetricListItem)
fc.Result = res
return ec.marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Query_globalMetrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Query",
Field: field,
IsMethod: true,
IsResolver: true,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
case "name":
return ec.fieldContext_GlobalMetricListItem_name(ctx, field)
case "unit":
return ec.fieldContext_GlobalMetricListItem_unit(ctx, field)
case "scope":
return ec.fieldContext_GlobalMetricListItem_scope(ctx, field)
case "availability":
return ec.fieldContext_GlobalMetricListItem_availability(ctx, field)
}
return nil, fmt.Errorf("no field named %q was found under type GlobalMetricListItem", field.Name)
},
}
return fc, nil
}
func (ec *executionContext) _Query_user(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { func (ec *executionContext) _Query_user(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Query_user(ctx, field) fc, err := ec.fieldContext_Query_user(ctx, field)
if err != nil { if err != nil {
@ -12809,6 +13215,50 @@ func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet,
return out return out
} }
var clusterSupportImplementors = []string{"ClusterSupport"}
func (ec *executionContext) _ClusterSupport(ctx context.Context, sel ast.SelectionSet, obj *schema.ClusterSupport) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, clusterSupportImplementors)
out := graphql.NewFieldSet(fields)
deferred := make(map[string]*graphql.FieldSet)
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ClusterSupport")
case "cluster":
out.Values[i] = ec._ClusterSupport_cluster(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "subClusters":
out.Values[i] = ec._ClusterSupport_subClusters(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch(ctx)
if out.Invalids > 0 {
return graphql.Null
}
atomic.AddInt32(&ec.deferred, int32(len(deferred)))
for label, dfs := range deferred {
ec.processDeferredGroup(graphql.DeferredGroup{
Label: label,
Path: graphql.GetPath(ctx),
FieldSet: dfs,
Context: ctx,
})
}
return out
}
var countImplementors = []string{"Count"} var countImplementors = []string{"Count"}
func (ec *executionContext) _Count(ctx context.Context, sel ast.SelectionSet, obj *model.Count) graphql.Marshaler { func (ec *executionContext) _Count(ctx context.Context, sel ast.SelectionSet, obj *model.Count) graphql.Marshaler {
@ -12897,6 +13347,60 @@ func (ec *executionContext) _Footprints(ctx context.Context, sel ast.SelectionSe
return out return out
} }
var globalMetricListItemImplementors = []string{"GlobalMetricListItem"}
func (ec *executionContext) _GlobalMetricListItem(ctx context.Context, sel ast.SelectionSet, obj *schema.GlobalMetricListItem) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, globalMetricListItemImplementors)
out := graphql.NewFieldSet(fields)
deferred := make(map[string]*graphql.FieldSet)
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("GlobalMetricListItem")
case "name":
out.Values[i] = ec._GlobalMetricListItem_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "unit":
out.Values[i] = ec._GlobalMetricListItem_unit(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "scope":
out.Values[i] = ec._GlobalMetricListItem_scope(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "availability":
out.Values[i] = ec._GlobalMetricListItem_availability(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch(ctx)
if out.Invalids > 0 {
return graphql.Null
}
atomic.AddInt32(&ec.deferred, int32(len(deferred)))
for label, dfs := range deferred {
ec.processDeferredGroup(graphql.DeferredGroup{
Label: label,
Path: graphql.GetPath(ctx),
FieldSet: dfs,
Context: ctx,
})
}
return out
}
var histoPointImplementors = []string{"HistoPoint"} var histoPointImplementors = []string{"HistoPoint"}
func (ec *executionContext) _HistoPoint(ctx context.Context, sel ast.SelectionSet, obj *model.HistoPoint) graphql.Marshaler { func (ec *executionContext) _HistoPoint(ctx context.Context, sel ast.SelectionSet, obj *model.HistoPoint) graphql.Marshaler {
@ -14156,6 +14660,28 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
} }
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
case "globalMetrics":
field := field
innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Query_globalMetrics(ctx, field)
if res == graphql.Null {
atomic.AddUint32(&fs.Invalids, 1)
}
return res
}
rrm := func(ctx context.Context) graphql.Marshaler {
return ec.OperationContext.RootResolverMiddleware(ctx,
func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
case "user": case "user":
field := field field := field
@ -15386,6 +15912,54 @@ func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋc
return ec._Cluster(ctx, sel, v) return ec._Cluster(ctx, sel, v)
} }
func (ec *executionContext) marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupport(ctx context.Context, sel ast.SelectionSet, v schema.ClusterSupport) graphql.Marshaler {
return ec._ClusterSupport(ctx, sel, &v)
}
func (ec *executionContext) marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.ClusterSupport) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupport(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalNCount2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Count) graphql.Marshaler { func (ec *executionContext) marshalNCount2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Count) graphql.Marshaler {
ret := make(graphql.Array, len(v)) ret := make(graphql.Array, len(v))
var wg sync.WaitGroup var wg sync.WaitGroup
@ -15519,6 +16093,60 @@ func (ec *executionContext) marshalNFloat2ᚕᚕfloat64ᚄ(ctx context.Context,
return ret return ret
} }
func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.GlobalMetricListItem) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItem(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItem(ctx context.Context, sel ast.SelectionSet, v *schema.GlobalMetricListItem) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
return ec._GlobalMetricListItem(ctx, sel, v)
}
func (ec *executionContext) marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.HistoPoint) graphql.Marshaler { func (ec *executionContext) marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.HistoPoint) graphql.Marshaler {
ret := make(graphql.Array, len(v)) ret := make(graphql.Array, len(v))
var wg sync.WaitGroup var wg sync.WaitGroup

View File

@ -150,6 +150,11 @@ func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) {
return r.Repo.GetTags(nil) return r.Repo.GetTags(nil)
} }
// GlobalMetrics is the resolver for the globalMetrics field.
func (r *queryResolver) GlobalMetrics(ctx context.Context) ([]*schema.GlobalMetricListItem, error) {
return archive.GlobalMetricList, nil
}
// User is the resolver for the user field. // User is the resolver for the user field.
func (r *queryResolver) User(ctx context.Context, username string) (*model.User, error) { func (r *queryResolver) User(ctx context.Context, username string) (*model.User, error) {
return repository.GetUserRepository().FetchUserInCtx(ctx, username) return repository.GetUserRepository().FetchUserInCtx(ctx, username)
@ -414,9 +419,11 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
// SubCluster returns generated.SubClusterResolver implementation. // SubCluster returns generated.SubClusterResolver implementation.
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
type clusterResolver struct{ *Resolver } type (
type jobResolver struct{ *Resolver } clusterResolver struct{ *Resolver }
type metricValueResolver struct{ *Resolver } jobResolver struct{ *Resolver }
type mutationResolver struct{ *Resolver } metricValueResolver struct{ *Resolver }
type queryResolver struct{ *Resolver } mutationResolver struct{ *Resolver }
type subClusterResolver struct{ *Resolver } queryResolver struct{ *Resolver }
subClusterResolver struct{ *Resolver }
)

View File

@ -7,6 +7,7 @@ package archive
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"sync"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/lrucache" "github.com/ClusterCockpit/cc-backend/pkg/lrucache"
@ -53,21 +54,25 @@ type JobContainer struct {
} }
var ( var (
initOnce sync.Once
cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024) cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
ar ArchiveBackend ar ArchiveBackend
useArchive bool useArchive bool
) )
func Init(rawConfig json.RawMessage, disableArchive bool) error { func Init(rawConfig json.RawMessage, disableArchive bool) error {
var err error
initOnce.Do(func() {
useArchive = !disableArchive useArchive = !disableArchive
var cfg struct { var cfg struct {
Kind string `json:"kind"` Kind string `json:"kind"`
} }
if err := json.Unmarshal(rawConfig, &cfg); err != nil { if err = json.Unmarshal(rawConfig, &cfg); err != nil {
log.Warn("Error while unmarshaling raw config json") log.Warn("Error while unmarshaling raw config json")
return err return
} }
switch cfg.Kind { switch cfg.Kind {
@ -76,17 +81,21 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
// case "s3": // case "s3":
// ar = &S3Archive{} // ar = &S3Archive{}
default: default:
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", cfg.Kind) err = fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", cfg.Kind)
} }
version, err := ar.Init(rawConfig) var version uint64
version, err = ar.Init(rawConfig)
if err != nil { if err != nil {
log.Error("Error while initializing archiveBackend") log.Error("Error while initializing archiveBackend")
return err return
} }
log.Infof("Load archive version %d", version) log.Infof("Load archive version %d", version)
return initClusterConfig() err = initClusterConfig()
})
return err
} }
func GetHandle() ArchiveBackend { func GetHandle() ArchiveBackend {

View File

@ -14,12 +14,14 @@ import (
var ( var (
Clusters []*schema.Cluster Clusters []*schema.Cluster
GlobalMetricList []*schema.GlobalMetricListItem
nodeLists map[string]map[string]NodeList nodeLists map[string]map[string]NodeList
) )
func initClusterConfig() error { func initClusterConfig() error {
Clusters = []*schema.Cluster{} Clusters = []*schema.Cluster{}
nodeLists = map[string]map[string]NodeList{} nodeLists = map[string]map[string]NodeList{}
metricLookup := make(map[string]schema.GlobalMetricListItem)
for _, c := range ar.GetClusters() { for _, c := range ar.GetClusters() {
@ -51,6 +53,12 @@ func initClusterConfig() error {
return errors.New("cluster.metricConfig.scope must be a valid scope ('node', 'scocket', ...)") return errors.New("cluster.metricConfig.scope must be a valid scope ('node', 'scocket', ...)")
} }
ml, ok := metricLookup[mc.Name]
if !ok {
metricLookup[mc.Name] = schema.GlobalMetricListItem{Name: mc.Name, Scope: mc.Scope, Unit: mc.Unit}
ml = metricLookup[mc.Name]
}
availability := schema.ClusterSupport{Cluster: cluster.Name}
scLookup := make(map[string]*schema.SubClusterConfig) scLookup := make(map[string]*schema.SubClusterConfig)
for _, scc := range mc.SubClusters { for _, scc := range mc.SubClusters {
@ -63,6 +71,7 @@ func initClusterConfig() error {
if cfg, ok := scLookup[sc.Name]; ok { if cfg, ok := scLookup[sc.Name]; ok {
if !cfg.Remove { if !cfg.Remove {
availability.SubClusters = append(availability.SubClusters, sc.Name)
newMetric.Peak = cfg.Peak newMetric.Peak = cfg.Peak
newMetric.Peak = cfg.Peak newMetric.Peak = cfg.Peak
newMetric.Normal = cfg.Normal newMetric.Normal = cfg.Normal
@ -74,17 +83,25 @@ func initClusterConfig() error {
if newMetric.Footprint { if newMetric.Footprint {
sc.Footprint = append(sc.Footprint, newMetric.Name) sc.Footprint = append(sc.Footprint, newMetric.Name)
} }
if newMetric.Energy {
sc.EnergyFootprint = append(sc.EnergyFootprint, newMetric.Name)
}
} }
} else { } else {
availability.SubClusters = append(availability.SubClusters, sc.Name)
sc.MetricConfig = append(sc.MetricConfig, *newMetric) sc.MetricConfig = append(sc.MetricConfig, *newMetric)
if newMetric.Footprint { if newMetric.Footprint {
sc.Footprint = append(sc.Footprint, newMetric.Name) sc.Footprint = append(sc.Footprint, newMetric.Name)
} }
if newMetric.Energy {
sc.EnergyFootprint = append(sc.EnergyFootprint, newMetric.Name)
} }
} }
} }
ml.Availability = append(metricLookup[mc.Name].Availability, availability)
metricLookup[mc.Name] = ml
}
Clusters = append(Clusters, cluster) Clusters = append(Clusters, cluster)
@ -102,6 +119,10 @@ func initClusterConfig() error {
} }
} }
for _, ml := range metricLookup {
GlobalMetricList = append(GlobalMetricList, &ml)
}
return nil return nil
} }

View File

@ -27,4 +27,6 @@ func TestClusterConfig(t *testing.T) {
if len(sc.MetricConfig) != 15 { if len(sc.MetricConfig) != 15 {
t.Fail() t.Fail()
} }
// spew.Dump(archive.GlobalMetricList)
} }

View File

@ -39,6 +39,7 @@ type SubCluster struct {
MemoryBandwidth MetricValue `json:"memoryBandwidth"` MemoryBandwidth MetricValue `json:"memoryBandwidth"`
MetricConfig []MetricConfig `json:"metricConfig,omitempty"` MetricConfig []MetricConfig `json:"metricConfig,omitempty"`
Footprint []string `json:"footprint,omitempty"` Footprint []string `json:"footprint,omitempty"`
EnergyFootprint []string `json:"energyFootprint,omitempty"`
SocketsPerNode int `json:"socketsPerNode"` SocketsPerNode int `json:"socketsPerNode"`
CoresPerSocket int `json:"coresPerSocket"` CoresPerSocket int `json:"coresPerSocket"`
ThreadsPerCore int `json:"threadsPerCore"` ThreadsPerCore int `json:"threadsPerCore"`
@ -66,6 +67,7 @@ type MetricConfig struct {
Caution float64 `json:"caution"` Caution float64 `json:"caution"`
Alert float64 `json:"alert"` Alert float64 `json:"alert"`
Footprint bool `json:"footprint"` Footprint bool `json:"footprint"`
Energy bool `json:"energy"`
} }
type Cluster struct { type Cluster struct {
@ -74,6 +76,18 @@ type Cluster struct {
SubClusters []*SubCluster `json:"subClusters"` SubClusters []*SubCluster `json:"subClusters"`
} }
type ClusterSupport struct {
Cluster string `json:"cluster"`
SubClusters []string `json:"subclusters"`
}
type GlobalMetricListItem struct {
Name string `json:"name"`
Unit Unit `json:"unit"`
Scope MetricScope `json:"scope"`
Availability []ClusterSupport `json:"availability"`
}
// Return a list of socket IDs given a list of hwthread IDs. Even if just one // Return a list of socket IDs given a list of hwthread IDs. Even if just one
// hwthread is in that socket, add it to the list. If no hwthreads other than // hwthread is in that socket, add it to the list. If no hwthreads other than
// those in the argument list are assigned to one of the sockets in the first // those in the argument list are assigned to one of the sockets in the first