mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-11-10 08:57:25 +01:00
fix: analysis metric histogram normalized by scope
- native acc metrics normalized by accHours - native core metrics normalized by coreHours
This commit is contained in:
parent
2f35482aff
commit
6a1e35107f
@ -156,12 +156,18 @@ type MetricFootprints {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Footprints {
|
type Footprints {
|
||||||
timeweights: [NullableFloat!]!
|
timeWeights: TimeWeights!
|
||||||
metrics: [MetricFootprints!]!
|
metrics: [MetricFootprints!]!
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TimeWeights {
|
||||||
|
nodeHours: [NullableFloat!]!
|
||||||
|
accHours: [NullableFloat!]!
|
||||||
|
coreHours: [NullableFloat!]!
|
||||||
|
}
|
||||||
|
|
||||||
enum Aggregate { USER, PROJECT, CLUSTER }
|
enum Aggregate { USER, PROJECT, CLUSTER }
|
||||||
enum Weights { NODE_COUNT, NODE_HOURS }
|
enum Weights { NODE_COUNT, NODE_HOURS, CORE_COUNT, CORE_HOURS }
|
||||||
|
|
||||||
type NodeMetrics {
|
type NodeMetrics {
|
||||||
host: String!
|
host: String!
|
||||||
|
@ -69,7 +69,7 @@ type ComplexityRoot struct {
|
|||||||
|
|
||||||
Footprints struct {
|
Footprints struct {
|
||||||
Metrics func(childComplexity int) int
|
Metrics func(childComplexity int) int
|
||||||
Timeweights func(childComplexity int) int
|
TimeWeights func(childComplexity int) int
|
||||||
}
|
}
|
||||||
|
|
||||||
HistoPoint struct {
|
HistoPoint struct {
|
||||||
@ -265,6 +265,12 @@ type ComplexityRoot struct {
|
|||||||
To func(childComplexity int) int
|
To func(childComplexity int) int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TimeWeights struct {
|
||||||
|
AccHours func(childComplexity int) int
|
||||||
|
CoreHours func(childComplexity int) int
|
||||||
|
NodeHours func(childComplexity int) int
|
||||||
|
}
|
||||||
|
|
||||||
Topology struct {
|
Topology struct {
|
||||||
Accelerators func(childComplexity int) int
|
Accelerators func(childComplexity int) int
|
||||||
Core func(childComplexity int) int
|
Core func(childComplexity int) int
|
||||||
@ -406,12 +412,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||||||
|
|
||||||
return e.complexity.Footprints.Metrics(childComplexity), true
|
return e.complexity.Footprints.Metrics(childComplexity), true
|
||||||
|
|
||||||
case "Footprints.timeweights":
|
case "Footprints.timeWeights":
|
||||||
if e.complexity.Footprints.Timeweights == nil {
|
if e.complexity.Footprints.TimeWeights == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
return e.complexity.Footprints.Timeweights(childComplexity), true
|
return e.complexity.Footprints.TimeWeights(childComplexity), true
|
||||||
|
|
||||||
case "HistoPoint.count":
|
case "HistoPoint.count":
|
||||||
if e.complexity.HistoPoint.Count == nil {
|
if e.complexity.HistoPoint.Count == nil {
|
||||||
@ -1356,6 +1362,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||||||
|
|
||||||
return e.complexity.TimeRangeOutput.To(childComplexity), true
|
return e.complexity.TimeRangeOutput.To(childComplexity), true
|
||||||
|
|
||||||
|
case "TimeWeights.accHours":
|
||||||
|
if e.complexity.TimeWeights.AccHours == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.complexity.TimeWeights.AccHours(childComplexity), true
|
||||||
|
|
||||||
|
case "TimeWeights.coreHours":
|
||||||
|
if e.complexity.TimeWeights.CoreHours == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.complexity.TimeWeights.CoreHours(childComplexity), true
|
||||||
|
|
||||||
|
case "TimeWeights.nodeHours":
|
||||||
|
if e.complexity.TimeWeights.NodeHours == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.complexity.TimeWeights.NodeHours(childComplexity), true
|
||||||
|
|
||||||
case "Topology.accelerators":
|
case "Topology.accelerators":
|
||||||
if e.complexity.Topology.Accelerators == nil {
|
if e.complexity.Topology.Accelerators == nil {
|
||||||
break
|
break
|
||||||
@ -1703,12 +1730,18 @@ type MetricFootprints {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Footprints {
|
type Footprints {
|
||||||
timeweights: [NullableFloat!]!
|
timeWeights: TimeWeights!
|
||||||
metrics: [MetricFootprints!]!
|
metrics: [MetricFootprints!]!
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TimeWeights {
|
||||||
|
nodeHours: [NullableFloat!]!
|
||||||
|
accHours: [NullableFloat!]!
|
||||||
|
coreHours: [NullableFloat!]!
|
||||||
|
}
|
||||||
|
|
||||||
enum Aggregate { USER, PROJECT, CLUSTER }
|
enum Aggregate { USER, PROJECT, CLUSTER }
|
||||||
enum Weights { NODE_COUNT, NODE_HOURS }
|
enum Weights { NODE_COUNT, NODE_HOURS, CORE_COUNT, CORE_HOURS }
|
||||||
|
|
||||||
type NodeMetrics {
|
type NodeMetrics {
|
||||||
host: String!
|
host: String!
|
||||||
@ -1836,7 +1869,7 @@ type JobsStatistics {
|
|||||||
shortJobs: Int! # Number of jobs with a duration of less than duration
|
shortJobs: Int! # Number of jobs with a duration of less than duration
|
||||||
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||||
totalNodeHours: Int! # Sum of the node hours of all matched jobs
|
totalNodeHours: Int! # Sum of the node hours of all matched jobs
|
||||||
totalCoreHours: Int! # Sum of the core hours of all matched jobs
|
totalCoreHours: Int! # Sum of the core hours of all matched jobs <-- Das nehmen statt totaljobs in hsitograms mit totaljobs + bei analysis metric histos weighted
|
||||||
totalAccHours: Int! # Sum of the gpu hours of all matched jobs
|
totalAccHours: Int! # Sum of the gpu hours of all matched jobs
|
||||||
histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
|
histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
|
||||||
histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
|
histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
|
||||||
@ -2790,8 +2823,8 @@ func (ec *executionContext) fieldContext_Count_count(ctx context.Context, field
|
|||||||
return fc, nil
|
return fc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *executionContext) _Footprints_timeweights(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) {
|
func (ec *executionContext) _Footprints_timeWeights(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) {
|
||||||
fc, err := ec.fieldContext_Footprints_timeweights(ctx, field)
|
fc, err := ec.fieldContext_Footprints_timeWeights(ctx, field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
}
|
}
|
||||||
@ -2804,7 +2837,7 @@ func (ec *executionContext) _Footprints_timeweights(ctx context.Context, field g
|
|||||||
}()
|
}()
|
||||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||||
ctx = rctx // use context from middleware stack in children
|
ctx = rctx // use context from middleware stack in children
|
||||||
return obj.Timeweights, nil
|
return obj.TimeWeights, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
@ -2816,19 +2849,27 @@ func (ec *executionContext) _Footprints_timeweights(ctx context.Context, field g
|
|||||||
}
|
}
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
}
|
}
|
||||||
res := resTmp.([]schema.Float)
|
res := resTmp.(*model.TimeWeights)
|
||||||
fc.Result = res
|
fc.Result = res
|
||||||
return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
|
return ec.marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐTimeWeights(ctx, field.Selections, res)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *executionContext) fieldContext_Footprints_timeweights(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
func (ec *executionContext) fieldContext_Footprints_timeWeights(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
fc = &graphql.FieldContext{
|
fc = &graphql.FieldContext{
|
||||||
Object: "Footprints",
|
Object: "Footprints",
|
||||||
Field: field,
|
Field: field,
|
||||||
IsMethod: false,
|
IsMethod: false,
|
||||||
IsResolver: false,
|
IsResolver: false,
|
||||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||||
return nil, errors.New("field of type NullableFloat does not have child fields")
|
switch field.Name {
|
||||||
|
case "nodeHours":
|
||||||
|
return ec.fieldContext_TimeWeights_nodeHours(ctx, field)
|
||||||
|
case "accHours":
|
||||||
|
return ec.fieldContext_TimeWeights_accHours(ctx, field)
|
||||||
|
case "coreHours":
|
||||||
|
return ec.fieldContext_TimeWeights_coreHours(ctx, field)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("no field named %q was found under type TimeWeights", field.Name)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return fc, nil
|
return fc, nil
|
||||||
@ -6994,8 +7035,8 @@ func (ec *executionContext) fieldContext_Query_jobsFootprints(ctx context.Contex
|
|||||||
IsResolver: true,
|
IsResolver: true,
|
||||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||||
switch field.Name {
|
switch field.Name {
|
||||||
case "timeweights":
|
case "timeWeights":
|
||||||
return ec.fieldContext_Footprints_timeweights(ctx, field)
|
return ec.fieldContext_Footprints_timeWeights(ctx, field)
|
||||||
case "metrics":
|
case "metrics":
|
||||||
return ec.fieldContext_Footprints_metrics(ctx, field)
|
return ec.fieldContext_Footprints_metrics(ctx, field)
|
||||||
}
|
}
|
||||||
@ -8930,6 +8971,138 @@ func (ec *executionContext) fieldContext_TimeRangeOutput_to(ctx context.Context,
|
|||||||
return fc, nil
|
return fc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) _TimeWeights_nodeHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) {
|
||||||
|
fc, err := ec.fieldContext_TimeWeights_nodeHours(ctx, field)
|
||||||
|
if err != nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
ctx = graphql.WithFieldContext(ctx, fc)
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ec.Error(ctx, ec.Recover(ctx, r))
|
||||||
|
ret = graphql.Null
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||||
|
ctx = rctx // use context from middleware stack in children
|
||||||
|
return obj.NodeHours, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ec.Error(ctx, err)
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
if resTmp == nil {
|
||||||
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
|
ec.Errorf(ctx, "must not be null")
|
||||||
|
}
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
res := resTmp.([]schema.Float)
|
||||||
|
fc.Result = res
|
||||||
|
return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) fieldContext_TimeWeights_nodeHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
|
fc = &graphql.FieldContext{
|
||||||
|
Object: "TimeWeights",
|
||||||
|
Field: field,
|
||||||
|
IsMethod: false,
|
||||||
|
IsResolver: false,
|
||||||
|
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||||
|
return nil, errors.New("field of type NullableFloat does not have child fields")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return fc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) _TimeWeights_accHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) {
|
||||||
|
fc, err := ec.fieldContext_TimeWeights_accHours(ctx, field)
|
||||||
|
if err != nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
ctx = graphql.WithFieldContext(ctx, fc)
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ec.Error(ctx, ec.Recover(ctx, r))
|
||||||
|
ret = graphql.Null
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||||
|
ctx = rctx // use context from middleware stack in children
|
||||||
|
return obj.AccHours, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ec.Error(ctx, err)
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
if resTmp == nil {
|
||||||
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
|
ec.Errorf(ctx, "must not be null")
|
||||||
|
}
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
res := resTmp.([]schema.Float)
|
||||||
|
fc.Result = res
|
||||||
|
return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) fieldContext_TimeWeights_accHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
|
fc = &graphql.FieldContext{
|
||||||
|
Object: "TimeWeights",
|
||||||
|
Field: field,
|
||||||
|
IsMethod: false,
|
||||||
|
IsResolver: false,
|
||||||
|
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||||
|
return nil, errors.New("field of type NullableFloat does not have child fields")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return fc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) _TimeWeights_coreHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) {
|
||||||
|
fc, err := ec.fieldContext_TimeWeights_coreHours(ctx, field)
|
||||||
|
if err != nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
ctx = graphql.WithFieldContext(ctx, fc)
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ec.Error(ctx, ec.Recover(ctx, r))
|
||||||
|
ret = graphql.Null
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||||
|
ctx = rctx // use context from middleware stack in children
|
||||||
|
return obj.CoreHours, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ec.Error(ctx, err)
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
if resTmp == nil {
|
||||||
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
|
ec.Errorf(ctx, "must not be null")
|
||||||
|
}
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
res := resTmp.([]schema.Float)
|
||||||
|
fc.Result = res
|
||||||
|
return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) fieldContext_TimeWeights_coreHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
|
fc = &graphql.FieldContext{
|
||||||
|
Object: "TimeWeights",
|
||||||
|
Field: field,
|
||||||
|
IsMethod: false,
|
||||||
|
IsResolver: false,
|
||||||
|
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||||
|
return nil, errors.New("field of type NullableFloat does not have child fields")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return fc, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ec *executionContext) _Topology_node(ctx context.Context, field graphql.CollectedField, obj *schema.Topology) (ret graphql.Marshaler) {
|
func (ec *executionContext) _Topology_node(ctx context.Context, field graphql.CollectedField, obj *schema.Topology) (ret graphql.Marshaler) {
|
||||||
fc, err := ec.fieldContext_Topology_node(ctx, field)
|
fc, err := ec.fieldContext_Topology_node(ctx, field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -11848,10 +12021,8 @@ func (ec *executionContext) _Footprints(ctx context.Context, sel ast.SelectionSe
|
|||||||
switch field.Name {
|
switch field.Name {
|
||||||
case "__typename":
|
case "__typename":
|
||||||
out.Values[i] = graphql.MarshalString("Footprints")
|
out.Values[i] = graphql.MarshalString("Footprints")
|
||||||
case "timeweights":
|
case "timeWeights":
|
||||||
|
out.Values[i] = ec._Footprints_timeWeights(ctx, field, obj)
|
||||||
out.Values[i] = ec._Footprints_timeweights(ctx, field, obj)
|
|
||||||
|
|
||||||
if out.Values[i] == graphql.Null {
|
if out.Values[i] == graphql.Null {
|
||||||
out.Invalids++
|
out.Invalids++
|
||||||
}
|
}
|
||||||
@ -13600,6 +13771,55 @@ func (ec *executionContext) _TimeRangeOutput(ctx context.Context, sel ast.Select
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var timeWeightsImplementors = []string{"TimeWeights"}
|
||||||
|
|
||||||
|
func (ec *executionContext) _TimeWeights(ctx context.Context, sel ast.SelectionSet, obj *model.TimeWeights) graphql.Marshaler {
|
||||||
|
fields := graphql.CollectFields(ec.OperationContext, sel, timeWeightsImplementors)
|
||||||
|
|
||||||
|
out := graphql.NewFieldSet(fields)
|
||||||
|
deferred := make(map[string]*graphql.FieldSet)
|
||||||
|
for i, field := range fields {
|
||||||
|
switch field.Name {
|
||||||
|
case "__typename":
|
||||||
|
out.Values[i] = graphql.MarshalString("TimeWeights")
|
||||||
|
case "nodeHours":
|
||||||
|
out.Values[i] = ec._TimeWeights_nodeHours(ctx, field, obj)
|
||||||
|
if out.Values[i] == graphql.Null {
|
||||||
|
out.Invalids++
|
||||||
|
}
|
||||||
|
case "accHours":
|
||||||
|
out.Values[i] = ec._TimeWeights_accHours(ctx, field, obj)
|
||||||
|
if out.Values[i] == graphql.Null {
|
||||||
|
out.Invalids++
|
||||||
|
}
|
||||||
|
case "coreHours":
|
||||||
|
out.Values[i] = ec._TimeWeights_coreHours(ctx, field, obj)
|
||||||
|
if out.Values[i] == graphql.Null {
|
||||||
|
out.Invalids++
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("unknown field " + strconv.Quote(field.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out.Dispatch(ctx)
|
||||||
|
if out.Invalids > 0 {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.AddInt32(&ec.deferred, int32(len(deferred)))
|
||||||
|
|
||||||
|
for label, dfs := range deferred {
|
||||||
|
ec.processDeferredGroup(graphql.DeferredGroup{
|
||||||
|
Label: label,
|
||||||
|
Path: graphql.GetPath(ctx),
|
||||||
|
FieldSet: dfs,
|
||||||
|
Context: ctx,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
var topologyImplementors = []string{"Topology"}
|
var topologyImplementors = []string{"Topology"}
|
||||||
|
|
||||||
func (ec *executionContext) _Topology(ctx context.Context, sel ast.SelectionSet, obj *schema.Topology) graphql.Marshaler {
|
func (ec *executionContext) _Topology(ctx context.Context, sel ast.SelectionSet, obj *schema.Topology) graphql.Marshaler {
|
||||||
@ -15333,6 +15553,16 @@ func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel as
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐTimeWeights(ctx context.Context, sel ast.SelectionSet, v *model.TimeWeights) graphql.Marshaler {
|
||||||
|
if v == nil {
|
||||||
|
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
|
||||||
|
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
|
||||||
|
}
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
return ec._TimeWeights(ctx, sel, v)
|
||||||
|
}
|
||||||
|
|
||||||
func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler {
|
func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler {
|
||||||
return ec._Topology(ctx, sel, &v)
|
return ec._Topology(ctx, sel, &v)
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ type FloatRange struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Footprints struct {
|
type Footprints struct {
|
||||||
Timeweights []schema.Float `json:"timeweights"`
|
TimeWeights *TimeWeights `json:"timeWeights"`
|
||||||
Metrics []*MetricFootprints `json:"metrics"`
|
Metrics []*MetricFootprints `json:"metrics"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,6 +133,12 @@ type TimeRangeOutput struct {
|
|||||||
To time.Time `json:"to"`
|
To time.Time `json:"to"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TimeWeights struct {
|
||||||
|
NodeHours []schema.Float `json:"nodeHours"`
|
||||||
|
AccHours []schema.Float `json:"accHours"`
|
||||||
|
CoreHours []schema.Float `json:"coreHours"`
|
||||||
|
}
|
||||||
|
|
||||||
type User struct {
|
type User struct {
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
@ -228,16 +234,20 @@ type Weights string
|
|||||||
const (
|
const (
|
||||||
WeightsNodeCount Weights = "NODE_COUNT"
|
WeightsNodeCount Weights = "NODE_COUNT"
|
||||||
WeightsNodeHours Weights = "NODE_HOURS"
|
WeightsNodeHours Weights = "NODE_HOURS"
|
||||||
|
WeightsCoreCount Weights = "CORE_COUNT"
|
||||||
|
WeightsCoreHours Weights = "CORE_HOURS"
|
||||||
)
|
)
|
||||||
|
|
||||||
var AllWeights = []Weights{
|
var AllWeights = []Weights{
|
||||||
WeightsNodeCount,
|
WeightsNodeCount,
|
||||||
WeightsNodeHours,
|
WeightsNodeHours,
|
||||||
|
WeightsCoreCount,
|
||||||
|
WeightsCoreHours,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e Weights) IsValid() bool {
|
func (e Weights) IsValid() bool {
|
||||||
switch e {
|
switch e {
|
||||||
case WeightsNodeCount, WeightsNodeHours:
|
case WeightsNodeCount, WeightsNodeHours, WeightsCoreCount, WeightsCoreHours:
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/99designs/gqlgen/graphql"
|
"github.com/99designs/gqlgen/graphql"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
)
|
)
|
||||||
@ -106,9 +107,11 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
|
|||||||
avgs[i] = make([]schema.Float, 0, len(jobs))
|
avgs[i] = make([]schema.Float, 0, len(jobs))
|
||||||
}
|
}
|
||||||
|
|
||||||
nodehours := make([]schema.Float, 0, len(jobs))
|
timeweights := new(model.TimeWeights)
|
||||||
acchours := make([]schema.Float, 0, len(jobs))
|
timeweights.NodeHours = make([]schema.Float, 0, len(jobs))
|
||||||
hwthours := make([]schema.Float, 0, len(jobs))
|
timeweights.AccHours = make([]schema.Float, 0, len(jobs))
|
||||||
|
timeweights.CoreHours = make([]schema.Float, 0, len(jobs))
|
||||||
|
|
||||||
for _, job := range jobs {
|
for _, job := range jobs {
|
||||||
if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed {
|
if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed {
|
||||||
continue
|
continue
|
||||||
@ -120,16 +123,16 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
|
|||||||
}
|
}
|
||||||
|
|
||||||
// #166 collect arrays: Null values or no null values?
|
// #166 collect arrays: Null values or no null values?
|
||||||
nodehours = append(nodehours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes)))
|
timeweights.NodeHours = append(timeweights.NodeHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes)))
|
||||||
if job.NumAcc > 0 {
|
if job.NumAcc > 0 {
|
||||||
acchours = append(acchours, schema.Float(float64(job.Duration)/60.0*float64(job.NumAcc)))
|
timeweights.AccHours = append(timeweights.AccHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumAcc)))
|
||||||
} else {
|
} else {
|
||||||
acchours = append(acchours, schema.Float(0.0))
|
timeweights.AccHours = append(timeweights.AccHours, schema.Float(1.0))
|
||||||
}
|
}
|
||||||
if job.NumHWThreads > 0 {
|
if job.NumHWThreads > 0 {
|
||||||
hwthours = append(hwthours, schema.Float(float64(job.Duration)/60.0*float64(job.NumHWThreads)))
|
timeweights.CoreHours = append(timeweights.CoreHours, schema.Float(float64(job.Duration)/60.0*float64(numCoresForJob(job))))
|
||||||
} else {
|
} else {
|
||||||
hwthours = append(hwthours, schema.Float(0.0))
|
timeweights.CoreHours = append(timeweights.CoreHours, schema.Float(1.0))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,11 +145,34 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &model.Footprints{
|
return &model.Footprints{
|
||||||
Timeweights: nodehours,
|
TimeWeights: timeweights,
|
||||||
Metrics: res,
|
Metrics: res,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func numCoresForJob(job *schema.Job) (numCores int) {
|
||||||
|
|
||||||
|
subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster)
|
||||||
|
if scerr != nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
totalJobCores := 0
|
||||||
|
topology := subcluster.Topology
|
||||||
|
|
||||||
|
for _, host := range job.Resources {
|
||||||
|
hwthreads := host.HWThreads
|
||||||
|
if hwthreads == nil {
|
||||||
|
hwthreads = topology.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
hostCores, _ := topology.GetCoresFromHWThreads(hwthreads)
|
||||||
|
totalJobCores += len(hostCores)
|
||||||
|
}
|
||||||
|
|
||||||
|
return totalJobCores
|
||||||
|
}
|
||||||
|
|
||||||
func requireField(ctx context.Context, name string) bool {
|
func requireField(ctx context.Context, name string) bool {
|
||||||
fields := graphql.CollectAllFields(ctx)
|
fields := graphql.CollectAllFields(ctx)
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ func LoadAverages(
|
|||||||
ctx context.Context) error {
|
ctx context.Context) error {
|
||||||
|
|
||||||
if job.State != schema.JobStateRunning && useArchive {
|
if job.State != schema.JobStateRunning && useArchive {
|
||||||
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here
|
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here?
|
||||||
}
|
}
|
||||||
|
|
||||||
repo, ok := metricDataRepos[job.Cluster]
|
repo, ok := metricDataRepos[job.Cluster]
|
||||||
|
@ -78,7 +78,7 @@
|
|||||||
query: gql`
|
query: gql`
|
||||||
query($jobFilters: [JobFilter!]!, $metrics: [String!]!) {
|
query($jobFilters: [JobFilter!]!, $metrics: [String!]!) {
|
||||||
footprints: jobsFootprints(filter: $jobFilters, metrics: $metrics) {
|
footprints: jobsFootprints(filter: $jobFilters, metrics: $metrics) {
|
||||||
timeweights,
|
timeWeights { nodeHours, accHours, coreHours },
|
||||||
metrics { metric, data }
|
metrics { metric, data }
|
||||||
}
|
}
|
||||||
}`,
|
}`,
|
||||||
@ -244,8 +244,9 @@
|
|||||||
<Row>
|
<Row>
|
||||||
<Col>
|
<Col>
|
||||||
<Card body>
|
<Card body>
|
||||||
These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours.
|
These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours by default
|
||||||
Note that some metrics could be disabled for specific subclusters as per metriConfig and thus could affect shown average values.
|
(Accelerator hours for native accelerator scope metrics, coreHours for native core scope metrics).
|
||||||
|
Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values.
|
||||||
</Card>
|
</Card>
|
||||||
<br/>
|
<br/>
|
||||||
</Col>
|
</Col>
|
||||||
@ -257,7 +258,8 @@
|
|||||||
let:width
|
let:width
|
||||||
renderFor="analysis"
|
renderFor="analysis"
|
||||||
items={metricsInHistograms.map(metric => ({ metric, ...binsFromFootprint(
|
items={metricsInHistograms.map(metric => ({ metric, ...binsFromFootprint(
|
||||||
$footprintsQuery.data.footprints.timeweights,
|
$footprintsQuery.data.footprints.timeWeights,
|
||||||
|
metricConfig(cluster.name, metric)?.scope,
|
||||||
$footprintsQuery.data.footprints.metrics.find(f => f.metric == metric).data, numBins) }))}
|
$footprintsQuery.data.footprints.metrics.find(f => f.metric == metric).data, numBins) }))}
|
||||||
itemsPerRow={ccconfig.plot_view_plotsPerRow}>
|
itemsPerRow={ccconfig.plot_view_plotsPerRow}>
|
||||||
|
|
||||||
@ -265,11 +267,11 @@
|
|||||||
data={convert2uplot(item.bins)}
|
data={convert2uplot(item.bins)}
|
||||||
width={width} height={250}
|
width={width} height={250}
|
||||||
title="Average Distribution of '{item.metric}'"
|
title="Average Distribution of '{item.metric}'"
|
||||||
xlabel={`${item.metric} average [${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
|
xlabel={`${item.metric} bin maximum [${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
|
||||||
(metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}]`}
|
(metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}]`}
|
||||||
xunit={`${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
|
xunit={`${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
|
||||||
(metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}`}
|
(metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}`}
|
||||||
ylabel="Node Hours"
|
ylabel="Normalized Hours"
|
||||||
yunit="Hours"/>
|
yunit="Hours"/>
|
||||||
</PlotTable>
|
</PlotTable>
|
||||||
</Col>
|
</Col>
|
||||||
@ -279,7 +281,7 @@
|
|||||||
<Col>
|
<Col>
|
||||||
<Card body>
|
<Card body>
|
||||||
Each circle represents one job. The size of a circle is proportional to its node hours. Darker circles mean multiple jobs have the same averages for the respective metrics.
|
Each circle represents one job. The size of a circle is proportional to its node hours. Darker circles mean multiple jobs have the same averages for the respective metrics.
|
||||||
Note that some metrics could be disabled for specific subclusters as per metriConfig and thus could affect shown average values.
|
Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values.
|
||||||
</Card>
|
</Card>
|
||||||
<br/>
|
<br/>
|
||||||
</Col>
|
</Col>
|
||||||
@ -301,7 +303,7 @@
|
|||||||
(metricConfig(cluster.name, item.m1)?.unit?.base ? metricConfig(cluster.name, item.m1)?.unit?.base : '')}]`}
|
(metricConfig(cluster.name, item.m1)?.unit?.base ? metricConfig(cluster.name, item.m1)?.unit?.base : '')}]`}
|
||||||
yLabel={`${item.m2} [${(metricConfig(cluster.name, item.m2)?.unit?.prefix ? metricConfig(cluster.name, item.m2)?.unit?.prefix : '') +
|
yLabel={`${item.m2} [${(metricConfig(cluster.name, item.m2)?.unit?.prefix ? metricConfig(cluster.name, item.m2)?.unit?.prefix : '') +
|
||||||
(metricConfig(cluster.name, item.m2)?.unit?.base ? metricConfig(cluster.name, item.m2)?.unit?.base : '')}]`}
|
(metricConfig(cluster.name, item.m2)?.unit?.base ? metricConfig(cluster.name, item.m2)?.unit?.base : '')}]`}
|
||||||
X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.timeweights} />
|
X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.timeWeights.nodeHours} />
|
||||||
</PlotTable>
|
</PlotTable>
|
||||||
</Col>
|
</Col>
|
||||||
</Row>
|
</Row>
|
||||||
|
@ -325,7 +325,7 @@ export function convert2uplot(canvasData) {
|
|||||||
return uplotData
|
return uplotData
|
||||||
}
|
}
|
||||||
|
|
||||||
export function binsFromFootprint(weights, values, numBins) {
|
export function binsFromFootprint(weights, scope, values, numBins) {
|
||||||
let min = 0, max = 0
|
let min = 0, max = 0
|
||||||
if (values.length != 0) {
|
if (values.length != 0) {
|
||||||
for (let x of values) {
|
for (let x of values) {
|
||||||
@ -338,10 +338,23 @@ export function binsFromFootprint(weights, values, numBins) {
|
|||||||
if (numBins == null || numBins < 3)
|
if (numBins == null || numBins < 3)
|
||||||
numBins = 3
|
numBins = 3
|
||||||
|
|
||||||
|
let scopeWeights
|
||||||
|
switch (scope) {
|
||||||
|
case 'core':
|
||||||
|
scopeWeights = weights.coreHours
|
||||||
|
break
|
||||||
|
case 'accelerator':
|
||||||
|
scopeWeights = weights.accHours
|
||||||
|
break
|
||||||
|
default: // every other scope: use 'node'
|
||||||
|
scopeWeights = weights.nodeHours
|
||||||
|
}
|
||||||
|
|
||||||
const bins = new Array(numBins).fill(0)
|
const bins = new Array(numBins).fill(0)
|
||||||
for (let i = 0; i < values.length; i++)
|
for (let i = 0; i < values.length; i++)
|
||||||
bins[Math.floor(((values[i] - min) / (max - min)) * numBins)] += weights ? weights[i] : 1
|
bins[Math.floor(((values[i] - min) / (max - min)) * numBins)] += scopeWeights ? scopeWeights[i] : 1
|
||||||
|
|
||||||
|
// Manual Canvas Original
|
||||||
// return {
|
// return {
|
||||||
// label: idx => {
|
// label: idx => {
|
||||||
// let start = min + (idx / numBins) * (max - min)
|
// let start = min + (idx / numBins) * (max - min)
|
||||||
@ -355,14 +368,13 @@ export function binsFromFootprint(weights, values, numBins) {
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
bins: bins.map((count, idx) => ({
|
bins: bins.map((count, idx) => ({
|
||||||
value: idx => { // Get rounded down next integer to bins' Start-Stop Mean Value
|
value: idx => { // Use bins' max value instead of mean
|
||||||
let start = min + (idx / numBins) * (max - min)
|
// let start = min + (idx / numBins) * (max - min)
|
||||||
let stop = min + ((idx + 1) / numBins) * (max - min)
|
let stop = min + ((idx + 1) / numBins) * (max - min)
|
||||||
return `${formatNumber(Math.floor((start+stop)/2))}`
|
// return `${formatNumber(Math.floor((start+stop)/2))}`
|
||||||
|
return Math.floor(stop)
|
||||||
},
|
},
|
||||||
count: count
|
count: count
|
||||||
})),
|
}))
|
||||||
min: min,
|
|
||||||
max: max
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user