Refactor schema

* Remove redundant scope
* Change Series Id type to string
* Adopt golang structs to schema
This commit is contained in:
Jan Eitzinger 2023-03-22 19:21:11 +01:00
parent def35a551a
commit 1f8c6064e2
15 changed files with 247 additions and 342 deletions

View File

@ -41,7 +41,6 @@ type Cluster {
type SubCluster { type SubCluster {
name: String! name: String!
nodes: String! nodes: String!
numberOfNodes: Int!
processorType: String! processorType: String!
socketsPerNode: Int! socketsPerNode: Int!
coresPerSocket: Int! coresPerSocket: Int!
@ -114,7 +113,6 @@ type JobMetricWithName {
type JobMetric { type JobMetric {
unit: Unit unit: Unit
scope: MetricScope!
timestep: Int! timestep: Int!
series: [Series!] series: [Series!]
statisticsSeries: StatsSeries statisticsSeries: StatsSeries
@ -122,7 +120,7 @@ type JobMetric {
type Series { type Series {
hostname: String! hostname: String!
id: Int id: String
statistics: MetricStatistics statistics: MetricStatistics
data: [NullableFloat!]! data: [NullableFloat!]!
} }

View File

@ -107,7 +107,6 @@ type ComplexityRoot struct {
} }
JobMetric struct { JobMetric struct {
Scope func(childComplexity int) int
Series func(childComplexity int) int Series func(childComplexity int) int
StatisticsSeries func(childComplexity int) int StatisticsSeries func(childComplexity int) int
Timestep func(childComplexity int) int Timestep func(childComplexity int) int
@ -221,7 +220,6 @@ type ComplexityRoot struct {
MemoryBandwidth func(childComplexity int) int MemoryBandwidth func(childComplexity int) int
Name func(childComplexity int) int Name func(childComplexity int) int
Nodes func(childComplexity int) int Nodes func(childComplexity int) int
NumberOfNodes func(childComplexity int) int
ProcessorType func(childComplexity int) int ProcessorType func(childComplexity int) int
SocketsPerNode func(childComplexity int) int SocketsPerNode func(childComplexity int) int
ThreadsPerCore func(childComplexity int) int ThreadsPerCore func(childComplexity int) int
@ -574,13 +572,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Job.Walltime(childComplexity), true return e.complexity.Job.Walltime(childComplexity), true
case "JobMetric.scope":
if e.complexity.JobMetric.Scope == nil {
break
}
return e.complexity.JobMetric.Scope(childComplexity), true
case "JobMetric.series": case "JobMetric.series":
if e.complexity.JobMetric.Series == nil { if e.complexity.JobMetric.Series == nil {
break break
@ -1153,13 +1144,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.SubCluster.Nodes(childComplexity), true return e.complexity.SubCluster.Nodes(childComplexity), true
case "SubCluster.numberOfNodes":
if e.complexity.SubCluster.NumberOfNodes == nil {
break
}
return e.complexity.SubCluster.NumberOfNodes(childComplexity), true
case "SubCluster.processorType": case "SubCluster.processorType":
if e.complexity.SubCluster.ProcessorType == nil { if e.complexity.SubCluster.ProcessorType == nil {
break break
@ -1460,7 +1444,6 @@ type Cluster {
type SubCluster { type SubCluster {
name: String! name: String!
nodes: String! nodes: String!
numberOfNodes: Int!
processorType: String! processorType: String!
socketsPerNode: Int! socketsPerNode: Int!
coresPerSocket: Int! coresPerSocket: Int!
@ -1533,7 +1516,6 @@ type JobMetricWithName {
type JobMetric { type JobMetric {
unit: Unit unit: Unit
scope: MetricScope!
timestep: Int! timestep: Int!
series: [Series!] series: [Series!]
statisticsSeries: StatsSeries statisticsSeries: StatsSeries
@ -1541,7 +1523,7 @@ type JobMetric {
type Series { type Series {
hostname: String! hostname: String!
id: Int id: String
statistics: MetricStatistics statistics: MetricStatistics
data: [NullableFloat!]! data: [NullableFloat!]!
} }
@ -2527,8 +2509,6 @@ func (ec *executionContext) fieldContext_Cluster_subClusters(ctx context.Context
return ec.fieldContext_SubCluster_name(ctx, field) return ec.fieldContext_SubCluster_name(ctx, field)
case "nodes": case "nodes":
return ec.fieldContext_SubCluster_nodes(ctx, field) return ec.fieldContext_SubCluster_nodes(ctx, field)
case "numberOfNodes":
return ec.fieldContext_SubCluster_numberOfNodes(ctx, field)
case "processorType": case "processorType":
return ec.fieldContext_SubCluster_processorType(ctx, field) return ec.fieldContext_SubCluster_processorType(ctx, field)
case "socketsPerNode": case "socketsPerNode":
@ -3945,50 +3925,6 @@ func (ec *executionContext) fieldContext_JobMetric_unit(ctx context.Context, fie
return fc, nil return fc, nil
} }
func (ec *executionContext) _JobMetric_scope(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_JobMetric_scope(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Scope, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(schema.MetricScope)
fc.Result = res
return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobMetric_scope(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "JobMetric",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type MetricScope does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _JobMetric_timestep(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { func (ec *executionContext) _JobMetric_timestep(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_JobMetric_timestep(ctx, field) fc, err := ec.fieldContext_JobMetric_timestep(ctx, field)
if err != nil { if err != nil {
@ -4218,8 +4154,6 @@ func (ec *executionContext) fieldContext_JobMetricWithName_metric(ctx context.Co
switch field.Name { switch field.Name {
case "unit": case "unit":
return ec.fieldContext_JobMetric_unit(ctx, field) return ec.fieldContext_JobMetric_unit(ctx, field)
case "scope":
return ec.fieldContext_JobMetric_scope(ctx, field)
case "timestep": case "timestep":
return ec.fieldContext_JobMetric_timestep(ctx, field) return ec.fieldContext_JobMetric_timestep(ctx, field)
case "series": case "series":
@ -4930,9 +4864,9 @@ func (ec *executionContext) _MetricConfig_aggregation(ctx context.Context, field
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*string) res := resTmp.(string)
fc.Result = res fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res) return ec.marshalNString2string(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_MetricConfig_aggregation(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_MetricConfig_aggregation(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -5018,9 +4952,9 @@ func (ec *executionContext) _MetricConfig_peak(ctx context.Context, field graphq
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*float64) res := resTmp.(float64)
fc.Result = res fc.Result = res
return ec.marshalNFloat2float64(ctx, field.Selections, res) return ec.marshalNFloat2float64(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_MetricConfig_peak(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_MetricConfig_peak(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -5059,9 +4993,9 @@ func (ec *executionContext) _MetricConfig_normal(ctx context.Context, field grap
if resTmp == nil { if resTmp == nil {
return graphql.Null return graphql.Null
} }
res := resTmp.(*float64) res := resTmp.(float64)
fc.Result = res fc.Result = res
return ec.marshalOFloat2float64(ctx, field.Selections, res) return ec.marshalOFloat2float64(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_MetricConfig_normal(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_MetricConfig_normal(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -5103,9 +5037,9 @@ func (ec *executionContext) _MetricConfig_caution(ctx context.Context, field gra
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*float64) res := resTmp.(float64)
fc.Result = res fc.Result = res
return ec.marshalNFloat2float64(ctx, field.Selections, res) return ec.marshalNFloat2float64(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_MetricConfig_caution(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_MetricConfig_caution(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -5147,9 +5081,9 @@ func (ec *executionContext) _MetricConfig_alert(ctx context.Context, field graph
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*float64) res := resTmp.(float64)
fc.Result = res fc.Result = res
return ec.marshalNFloat2float64(ctx, field.Selections, res) return ec.marshalNFloat2float64(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_MetricConfig_alert(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_MetricConfig_alert(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -7093,9 +7027,9 @@ func (ec *executionContext) _Series_id(ctx context.Context, field graphql.Collec
if resTmp == nil { if resTmp == nil {
return graphql.Null return graphql.Null
} }
res := resTmp.(*int) res := resTmp.(*string)
fc.Result = res fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res) return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Series_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Series_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -7105,7 +7039,7 @@ func (ec *executionContext) fieldContext_Series_id(ctx context.Context, field gr
IsMethod: false, IsMethod: false,
IsResolver: false, IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Int does not have child fields") return nil, errors.New("field of type String does not have child fields")
}, },
} }
return fc, nil return fc, nil
@ -7134,9 +7068,9 @@ func (ec *executionContext) _Series_statistics(ctx context.Context, field graphq
if resTmp == nil { if resTmp == nil {
return graphql.Null return graphql.Null
} }
res := resTmp.(*schema.MetricStatistics) res := resTmp.(schema.MetricStatistics)
fc.Result = res fc.Result = res
return ec.marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) return ec.marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Series_statistics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Series_statistics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -7424,50 +7358,6 @@ func (ec *executionContext) fieldContext_SubCluster_nodes(ctx context.Context, f
return fc, nil return fc, nil
} }
func (ec *executionContext) _SubCluster_numberOfNodes(ctx context.Context, field graphql.CollectedField, obj *schema.SubCluster) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_SubCluster_numberOfNodes(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.NumberOfNodes, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_SubCluster_numberOfNodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "SubCluster",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Int does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _SubCluster_processorType(ctx context.Context, field graphql.CollectedField, obj *schema.SubCluster) (ret graphql.Marshaler) { func (ec *executionContext) _SubCluster_processorType(ctx context.Context, field graphql.CollectedField, obj *schema.SubCluster) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_SubCluster_processorType(ctx, field) fc, err := ec.fieldContext_SubCluster_processorType(ctx, field)
if err != nil { if err != nil {
@ -7670,9 +7560,9 @@ func (ec *executionContext) _SubCluster_flopRateScalar(ctx context.Context, fiel
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*schema.MetricValue) res := resTmp.(schema.MetricValue)
fc.Result = res fc.Result = res
return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_SubCluster_flopRateScalar(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_SubCluster_flopRateScalar(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -7720,9 +7610,9 @@ func (ec *executionContext) _SubCluster_flopRateSimd(ctx context.Context, field
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*schema.MetricValue) res := resTmp.(schema.MetricValue)
fc.Result = res fc.Result = res
return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_SubCluster_flopRateSimd(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_SubCluster_flopRateSimd(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -7770,9 +7660,9 @@ func (ec *executionContext) _SubCluster_memoryBandwidth(ctx context.Context, fie
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*schema.MetricValue) res := resTmp.(schema.MetricValue)
fc.Result = res fc.Result = res
return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_SubCluster_memoryBandwidth(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_SubCluster_memoryBandwidth(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -7820,9 +7710,9 @@ func (ec *executionContext) _SubCluster_topology(ctx context.Context, field grap
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*schema.Topology) res := resTmp.(schema.Topology)
fc.Result = res fc.Result = res
return ec.marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx, field.Selections, res) return ec.marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_SubCluster_topology(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_SubCluster_topology(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -8467,9 +8357,9 @@ func (ec *executionContext) _Topology_die(ctx context.Context, field graphql.Col
if resTmp == nil { if resTmp == nil {
return graphql.Null return graphql.Null
} }
res := resTmp.([][]int) res := resTmp.([][]*int)
fc.Result = res fc.Result = res
return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res) return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Topology_die(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Topology_die(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -8642,9 +8532,9 @@ func (ec *executionContext) _Unit_prefix(ctx context.Context, field graphql.Coll
if resTmp == nil { if resTmp == nil {
return graphql.Null return graphql.Null
} }
res := resTmp.(string) res := resTmp.(*string)
fc.Result = res fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res) return ec.marshalOString2string(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Unit_prefix(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Unit_prefix(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -11439,13 +11329,6 @@ func (ec *executionContext) _JobMetric(ctx context.Context, sel ast.SelectionSet
out.Values[i] = ec._JobMetric_unit(ctx, field, obj) out.Values[i] = ec._JobMetric_unit(ctx, field, obj)
case "scope":
out.Values[i] = ec._JobMetric_scope(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "timestep": case "timestep":
out.Values[i] = ec._JobMetric_timestep(ctx, field, obj) out.Values[i] = ec._JobMetric_timestep(ctx, field, obj)
@ -12386,13 +12269,6 @@ func (ec *executionContext) _SubCluster(ctx context.Context, sel ast.SelectionSe
out.Values[i] = ec._SubCluster_nodes(ctx, field, obj) out.Values[i] = ec._SubCluster_nodes(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "numberOfNodes":
out.Values[i] = ec._SubCluster_numberOfNodes(ctx, field, obj)
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
invalids++ invalids++
} }
@ -13247,27 +13123,6 @@ func (ec *executionContext) marshalNFloat2ᚕᚕfloat64ᚄ(ctx context.Context,
return ret return ret
} }
func (ec *executionContext) unmarshalNFloat2ᚖfloat64(ctx context.Context, v interface{}) (*float64, error) {
res, err := graphql.UnmarshalFloatContext(ctx, v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNFloat2ᚖfloat64(ctx context.Context, sel ast.SelectionSet, v *float64) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
res := graphql.MarshalFloatContext(*v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
}
return graphql.WrapContextMarshaler(ctx, res)
}
func (ec *executionContext) marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.HistoPoint) graphql.Marshaler { func (ec *executionContext) marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.HistoPoint) graphql.Marshaler {
ret := make(graphql.Array, len(v)) ret := make(graphql.Array, len(v))
var wg sync.WaitGroup var wg sync.WaitGroup
@ -13461,6 +13316,59 @@ func (ec *executionContext) marshalNInt2ᚕintᚄ(ctx context.Context, sel ast.S
return ret return ret
} }
func (ec *executionContext) unmarshalNInt2ᚕᚖintᚄ(ctx context.Context, v interface{}) ([]*int, error) {
var vSlice []interface{}
if v != nil {
vSlice = graphql.CoerceList(v)
}
var err error
res := make([]*int, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalNInt2ᚖint(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalNInt2ᚕᚖintᚄ(ctx context.Context, sel ast.SelectionSet, v []*int) graphql.Marshaler {
ret := make(graphql.Array, len(v))
for i := range v {
ret[i] = ec.marshalNInt2ᚖint(ctx, sel, v[i])
}
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) unmarshalNInt2ᚖint(ctx context.Context, v interface{}) (*int, error) {
res, err := graphql.UnmarshalInt(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNInt2ᚖint(ctx context.Context, sel ast.SelectionSet, v *int) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
res := graphql.MarshalInt(*v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
}
return res
}
func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler { func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler {
ret := make(graphql.Array, len(v)) ret := make(graphql.Array, len(v))
var wg sync.WaitGroup var wg sync.WaitGroup
@ -13814,14 +13722,8 @@ func (ec *executionContext) marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋ
return v return v
} }
func (ec *executionContext) marshalNMetricValue2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v *schema.MetricValue) graphql.Marshaler { func (ec *executionContext) marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v schema.MetricValue) graphql.Marshaler {
if v == nil { return ec._MetricValue(ctx, sel, &v)
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
return ec._MetricValue(ctx, sel, v)
} }
func (ec *executionContext) marshalNNodeMetrics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeMetricsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeMetrics) graphql.Marshaler { func (ec *executionContext) marshalNNodeMetrics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeMetricsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeMetrics) graphql.Marshaler {
@ -14035,27 +13937,6 @@ func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel
return ret return ret
} }
func (ec *executionContext) unmarshalNString2ᚖstring(ctx context.Context, v interface{}) (*string, error) {
res, err := graphql.UnmarshalString(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
res := graphql.MarshalString(*v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
}
return res
}
func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubCluster) graphql.Marshaler { func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubCluster) graphql.Marshaler {
ret := make(graphql.Array, len(v)) ret := make(graphql.Array, len(v))
var wg sync.WaitGroup var wg sync.WaitGroup
@ -14237,14 +14118,8 @@ func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel as
return res return res
} }
func (ec *executionContext) marshalNTopology2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v *schema.Topology) graphql.Marshaler { func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler {
if v == nil { return ec._Topology(ctx, sel, &v)
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
return ec._Topology(ctx, sel, v)
} }
func (ec *executionContext) marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler { func (ec *executionContext) marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler {
@ -14619,22 +14494,6 @@ func (ec *executionContext) marshalOFloat2float64(ctx context.Context, sel ast.S
return graphql.WrapContextMarshaler(ctx, res) return graphql.WrapContextMarshaler(ctx, res)
} }
func (ec *executionContext) unmarshalOFloat2ᚖfloat64(ctx context.Context, v interface{}) (*float64, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalFloatContext(ctx, v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOFloat2ᚖfloat64(ctx context.Context, sel ast.SelectionSet, v *float64) graphql.Marshaler {
if v == nil {
return graphql.Null
}
res := graphql.MarshalFloatContext(*v)
return graphql.WrapContextMarshaler(ctx, res)
}
func (ec *executionContext) unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx context.Context, v interface{}) (*model.FloatRange, error) { func (ec *executionContext) unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx context.Context, v interface{}) (*model.FloatRange, error) {
if v == nil { if v == nil {
return nil, nil return nil, nil
@ -14764,6 +14623,44 @@ func (ec *executionContext) marshalOInt2ᚕᚕintᚄ(ctx context.Context, sel as
return ret return ret
} }
func (ec *executionContext) unmarshalOInt2ᚕᚕᚖintᚄ(ctx context.Context, v interface{}) ([][]*int, error) {
if v == nil {
return nil, nil
}
var vSlice []interface{}
if v != nil {
vSlice = graphql.CoerceList(v)
}
var err error
res := make([][]*int, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalNInt2ᚕᚖintᚄ(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalOInt2ᚕᚕᚖintᚄ(ctx context.Context, sel ast.SelectionSet, v [][]*int) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
for i := range v {
ret[i] = ec.marshalNInt2ᚕᚖintᚄ(ctx, sel, v[i])
}
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) unmarshalOInt2ᚖint(ctx context.Context, v interface{}) (*int, error) { func (ec *executionContext) unmarshalOInt2ᚖint(ctx context.Context, v interface{}) (*int, error) {
if v == nil { if v == nil {
return nil, nil return nil, nil
@ -14899,11 +14796,8 @@ func (ec *executionContext) marshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpit
return ret return ret
} }
func (ec *executionContext) marshalOMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler { func (ec *executionContext) marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v schema.MetricStatistics) graphql.Marshaler {
if v == nil { return ec._MetricStatistics(ctx, sel, &v)
return graphql.Null
}
return ec._MetricStatistics(ctx, sel, v)
} }
func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v interface{}) (*model.OrderByInput, error) { func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v interface{}) (*model.OrderByInput, error) {

View File

@ -173,11 +173,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
res := []*model.JobMetricWithName{} res := []*model.JobMetricWithName{}
for name, md := range data { for name, md := range data {
for scope, metric := range md { for _, metric := range md {
if metric.Scope != schema.MetricScope(scope) {
panic("WTF?")
}
res = append(res, &model.JobMetricWithName{ res = append(res, &model.JobMetricWithName{
Name: name, Name: name,
Metric: metric, Metric: metric,

View File

@ -158,7 +158,6 @@ func (ccms *CCMetricStore) LoadData(
scopes []schema.MetricScope, scopes []schema.MetricScope,
ctx context.Context) (schema.JobData, error) { ctx context.Context) (schema.JobData, error) {
topology := archive.GetSubCluster(job.Cluster, job.SubCluster).Topology
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes) queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes)
if err != nil { if err != nil {
return nil, err return nil, err
@ -193,7 +192,6 @@ func (ccms *CCMetricStore) LoadData(
if !ok { if !ok {
jobMetric = &schema.JobMetric{ jobMetric = &schema.JobMetric{
Unit: mc.Unit, Unit: mc.Unit,
Scope: scope,
Timestep: mc.Timestep, Timestep: mc.Timestep,
Series: make([]schema.Series, 0), Series: make([]schema.Series, 0),
} }
@ -206,13 +204,10 @@ func (ccms *CCMetricStore) LoadData(
continue continue
} }
id := (*int)(nil) id := (*string)(nil)
if query.Type != nil { if query.Type != nil {
id = new(int) id = new(string)
*id, err = strconv.Atoi(query.TypeIds[0]) *id = query.TypeIds[0]
if err != nil || *query.Type == acceleratorString {
*id, _ = topology.GetAcceleratorIndex(query.TypeIds[0])
}
} }
if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() {
@ -226,7 +221,7 @@ func (ccms *CCMetricStore) LoadData(
jobMetric.Series = append(jobMetric.Series, schema.Series{ jobMetric.Series = append(jobMetric.Series, schema.Series{
Hostname: query.Hostname, Hostname: query.Hostname,
Id: id, Id: id,
Statistics: &schema.MetricStatistics{ Statistics: schema.MetricStatistics{
Avg: float64(res.Avg), Avg: float64(res.Avg),
Min: float64(res.Min), Min: float64(res.Min),
Max: float64(res.Max), Max: float64(res.Max),
@ -610,13 +605,12 @@ func (ccms *CCMetricStore) LoadNodeData(
mc := archive.GetMetricConfig(cluster, metric) mc := archive.GetMetricConfig(cluster, metric)
hostdata[metric] = append(hostdata[metric], &schema.JobMetric{ hostdata[metric] = append(hostdata[metric], &schema.JobMetric{
Unit: mc.Unit, Unit: mc.Unit,
Scope: schema.MetricScopeNode,
Timestep: mc.Timestep, Timestep: mc.Timestep,
Series: []schema.Series{ Series: []schema.Series{
{ {
Hostname: query.Hostname, Hostname: query.Hostname,
Data: qdata.Data, Data: qdata.Data,
Statistics: &schema.MetricStatistics{ Statistics: schema.MetricStatistics{
Avg: float64(qdata.Avg), Avg: float64(qdata.Avg),
Min: float64(qdata.Min), Min: float64(qdata.Min),
Max: float64(qdata.Max), Max: float64(qdata.Max),

View File

@ -132,7 +132,6 @@ func (idb *InfluxDBv2DataRepository) LoadData(
jobMetric = map[schema.MetricScope]*schema.JobMetric{ jobMetric = map[schema.MetricScope]*schema.JobMetric{
scope: { // uses scope var from above! scope: { // uses scope var from above!
Unit: mc.Unit, Unit: mc.Unit,
Scope: scope,
Timestep: mc.Timestep, Timestep: mc.Timestep,
Series: make([]schema.Series, 0, len(job.Resources)), Series: make([]schema.Series, 0, len(job.Resources)),
StatisticsSeries: nil, // Should be: &schema.StatsSeries{}, StatisticsSeries: nil, // Should be: &schema.StatsSeries{},
@ -157,7 +156,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
field, host = row.Measurement(), row.ValueByKey("hostname").(string) field, host = row.Measurement(), row.ValueByKey("hostname").(string)
hostSeries = schema.Series{ hostSeries = schema.Series{
Hostname: host, Hostname: host,
Statistics: nil, Statistics: schema.MetricStatistics{}, //TODO Add Statistics
Data: make([]schema.Float, 0), Data: make([]schema.Float, 0),
} }
} }
@ -215,7 +214,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
// log.Println(fmt.Sprintf("<< Try to add Stats to Series in Position %d >>", index)) // log.Println(fmt.Sprintf("<< Try to add Stats to Series in Position %d >>", index))
if jobData[metric][scope].Series[index].Hostname == node { if jobData[metric][scope].Series[index].Hostname == node {
// log.Println(fmt.Sprintf("<< Match for Series in Position %d : Host %s >>", index, jobData[metric][scope].Series[index].Hostname)) // log.Println(fmt.Sprintf("<< Match for Series in Position %d : Host %s >>", index, jobData[metric][scope].Series[index].Hostname))
jobData[metric][scope].Series[index].Statistics = &schema.MetricStatistics{Avg: stats.Avg, Min: stats.Min, Max: stats.Max} jobData[metric][scope].Series[index].Statistics = schema.MetricStatistics{Avg: stats.Avg, Min: stats.Min, Max: stats.Max}
// log.Println(fmt.Sprintf("<< Result Inner: Min %.2f, Max %.2f, Avg %.2f >>", jobData[metric][scope].Series[index].Statistics.Min, jobData[metric][scope].Series[index].Statistics.Max, jobData[metric][scope].Series[index].Statistics.Avg)) // log.Println(fmt.Sprintf("<< Result Inner: Min %.2f, Max %.2f, Avg %.2f >>", jobData[metric][scope].Series[index].Statistics.Min, jobData[metric][scope].Series[index].Statistics.Max, jobData[metric][scope].Series[index].Statistics.Avg))
} }
} }

View File

@ -16,9 +16,9 @@ type Topology struct {
Node []int `json:"node"` Node []int `json:"node"`
Socket [][]int `json:"socket"` Socket [][]int `json:"socket"`
MemoryDomain [][]int `json:"memoryDomain"` MemoryDomain [][]int `json:"memoryDomain"`
Die [][]int `json:"die"` Die [][]*int `json:"die,omitempty"`
Core [][]int `json:"core"` Core [][]int `json:"core"`
Accelerators []*Accelerator `json:"accelerators"` Accelerators []*Accelerator `json:"accelerators,omitempty"`
} }
type MetricValue struct { type MetricValue struct {
@ -29,15 +29,14 @@ type MetricValue struct {
type SubCluster struct { type SubCluster struct {
Name string `json:"name"` Name string `json:"name"`
Nodes string `json:"nodes"` Nodes string `json:"nodes"`
NumberOfNodes int `json:"numberOfNodes"`
ProcessorType string `json:"processorType"` ProcessorType string `json:"processorType"`
SocketsPerNode int `json:"socketsPerNode"` SocketsPerNode int `json:"socketsPerNode"`
CoresPerSocket int `json:"coresPerSocket"` CoresPerSocket int `json:"coresPerSocket"`
ThreadsPerCore int `json:"threadsPerCore"` ThreadsPerCore int `json:"threadsPerCore"`
FlopRateScalar *MetricValue `json:"flopRateScalar"` FlopRateScalar MetricValue `json:"flopRateScalar"`
FlopRateSimd *MetricValue `json:"flopRateSimd"` FlopRateSimd MetricValue `json:"flopRateSimd"`
MemoryBandwidth *MetricValue `json:"memoryBandwidth"` MemoryBandwidth MetricValue `json:"memoryBandwidth"`
Topology *Topology `json:"topology"` Topology Topology `json:"topology"`
} }
type SubClusterConfig struct { type SubClusterConfig struct {
@ -53,13 +52,13 @@ type MetricConfig struct {
Name string `json:"name"` Name string `json:"name"`
Unit Unit `json:"unit"` Unit Unit `json:"unit"`
Scope MetricScope `json:"scope"` Scope MetricScope `json:"scope"`
Aggregation *string `json:"aggregation"` Aggregation string `json:"aggregation"`
Timestep int `json:"timestep"` Timestep int `json:"timestep"`
Peak *float64 `json:"peak"` Peak float64 `json:"peak"`
Normal *float64 `json:"normal"` Normal float64 `json:"normal"`
Caution *float64 `json:"caution"` Caution float64 `json:"caution"`
Alert *float64 `json:"alert"` Alert float64 `json:"alert"`
SubClusters []*SubClusterConfig `json:"subClusters"` SubClusters []*SubClusterConfig `json:"subClusters,omitempty"`
} }
type Cluster struct { type Cluster struct {
@ -169,12 +168,3 @@ func (topo *Topology) GetAcceleratorIDs() ([]int, error) {
} }
return accels, nil return accels, nil
} }
func (topo *Topology) GetAcceleratorIndex(id string) (int, bool) {
for idx, accel := range topo.Accelerators {
if accel.ID == id {
return idx, true
}
}
return -1, false
}

View File

@ -80,10 +80,10 @@ func (s *Series) MarshalJSON() ([]byte, error) {
buf = append(buf, s.Hostname...) buf = append(buf, s.Hostname...)
buf = append(buf, '"') buf = append(buf, '"')
if s.Id != nil { if s.Id != nil {
buf = append(buf, `,"id":`...) buf = append(buf, `,"id":"`...)
buf = strconv.AppendInt(buf, int64(*s.Id), 10) buf = append(buf, *s.Id...)
buf = append(buf, '"')
} }
if s.Statistics != nil {
buf = append(buf, `,"statistics":{"min":`...) buf = append(buf, `,"statistics":{"min":`...)
buf = strconv.AppendFloat(buf, s.Statistics.Min, 'f', 2, 64) buf = strconv.AppendFloat(buf, s.Statistics.Min, 'f', 2, 64)
buf = append(buf, `,"avg":`...) buf = append(buf, `,"avg":`...)
@ -91,7 +91,6 @@ func (s *Series) MarshalJSON() ([]byte, error) {
buf = append(buf, `,"max":`...) buf = append(buf, `,"max":`...)
buf = strconv.AppendFloat(buf, s.Statistics.Max, 'f', 2, 64) buf = strconv.AppendFloat(buf, s.Statistics.Max, 'f', 2, 64)
buf = append(buf, '}') buf = append(buf, '}')
}
buf = append(buf, `,"data":[`...) buf = append(buf, `,"data":[`...)
for i := 0; i < len(s.Data); i++ { for i := 0; i < len(s.Data); i++ {
if i != 0 { if i != 0 {

View File

@ -91,7 +91,7 @@ var JobDefaults BaseJob = BaseJob{
type Unit struct { type Unit struct {
Base string `json:"base"` Base string `json:"base"`
Prefix string `json:"prefix"` Prefix *string `json:"prefix,omitempty"`
} }
// JobStatistics model // JobStatistics model

View File

@ -16,16 +16,15 @@ type JobData map[string]map[MetricScope]*JobMetric
type JobMetric struct { type JobMetric struct {
Unit Unit `json:"unit"` Unit Unit `json:"unit"`
Scope MetricScope `json:"scope"`
Timestep int `json:"timestep"` Timestep int `json:"timestep"`
Series []Series `json:"series"` Series []Series `json:"series"`
StatisticsSeries *StatsSeries `json:"statisticsSeries"` StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"`
} }
type Series struct { type Series struct {
Hostname string `json:"hostname"` Hostname string `json:"hostname"`
Id *int `json:"id,omitempty"` Id *string `json:"id,omitempty"`
Statistics *MetricStatistics `json:"statistics"` Statistics MetricStatistics `json:"statistics"`
Data []Float `json:"data"` Data []Float `json:"data"`
} }
@ -218,17 +217,12 @@ func (jd *JobData) AddNodeScope(metric string) bool {
nodeJm := &JobMetric{ nodeJm := &JobMetric{
Unit: jm.Unit, Unit: jm.Unit,
Scope: MetricScopeNode,
Timestep: jm.Timestep, Timestep: jm.Timestep,
Series: make([]Series, 0, len(hosts)), Series: make([]Series, 0, len(hosts)),
} }
for hostname, series := range hosts { for hostname, series := range hosts {
min, sum, max := math.MaxFloat32, 0.0, -math.MaxFloat32 min, sum, max := math.MaxFloat32, 0.0, -math.MaxFloat32
for _, series := range series { for _, series := range series {
if series.Statistics == nil {
min, sum, max = math.NaN(), math.NaN(), math.NaN()
break
}
sum += series.Statistics.Avg sum += series.Statistics.Avg
min = math.Min(min, series.Statistics.Min) min = math.Min(min, series.Statistics.Min)
max = math.Max(max, series.Statistics.Max) max = math.Max(max, series.Statistics.Max)
@ -259,7 +253,7 @@ func (jd *JobData) AddNodeScope(metric string) bool {
nodeJm.Series = append(nodeJm.Series, Series{ nodeJm.Series = append(nodeJm.Series, Series{
Hostname: hostname, Hostname: hostname,
Statistics: &MetricStatistics{Min: min, Avg: sum / float64(len(series)), Max: max}, Statistics: MetricStatistics{Min: min, Avg: sum / float64(len(series)), Max: max},
Data: data, Data: data,
}) })
} }

View File

@ -94,6 +94,7 @@
"timestep", "timestep",
"aggregation", "aggregation",
"peak", "peak",
"normal",
"caution", "caution",
"alert" "alert"
] ]

View File

@ -146,7 +146,8 @@ func ConvertUnitString(us string) schema.Unit {
u := NewUnit(us) u := NewUnit(us)
p := u.getPrefix() p := u.getPrefix()
if p.Prefix() != "" { if p.Prefix() != "" {
nu.Prefix = p.Prefix() prefix := p.Prefix()
nu.Prefix = &prefix
} }
m := u.getMeasure() m := u.getMeasure()
d := u.getUnitDenominator() d := u.getUnitDenominator()

View File

@ -369,12 +369,11 @@ func TestRestApi(t *testing.T) {
"load_one": map[schema.MetricScope]*schema.JobMetric{ "load_one": map[schema.MetricScope]*schema.JobMetric{
schema.MetricScopeNode: { schema.MetricScopeNode: {
Unit: schema.Unit{Base: "load"}, Unit: schema.Unit{Base: "load"},
Scope: schema.MetricScopeNode,
Timestep: 60, Timestep: 60,
Series: []schema.Series{ Series: []schema.Series{
{ {
Hostname: "host123", Hostname: "host123",
Statistics: &schema.MetricStatistics{Min: 0.1, Avg: 0.2, Max: 0.3}, Statistics: schema.MetricStatistics{Min: 0.1, Avg: 0.2, Max: 0.3},
Data: []schema.Float{0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.3, 0.3, 0.3}, Data: []schema.Float{0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.3, 0.3, 0.3},
}, },
}, },

View File

@ -23,19 +23,19 @@ import (
// Accelerators []*Accelerator `json:"accelerators"` // Accelerators []*Accelerator `json:"accelerators"`
// } // }
// type SubCluster struct { type SubCluster struct {
// Name string `json:"name"` Name string `json:"name"`
// Nodes string `json:"nodes"` Nodes string `json:"nodes"`
// NumberOfNodes int `json:"numberOfNodes"` NumberOfNodes int `json:"numberOfNodes"`
// ProcessorType string `json:"processorType"` ProcessorType string `json:"processorType"`
// SocketsPerNode int `json:"socketsPerNode"` SocketsPerNode int `json:"socketsPerNode"`
// CoresPerSocket int `json:"coresPerSocket"` CoresPerSocket int `json:"coresPerSocket"`
// ThreadsPerCore int `json:"threadsPerCore"` ThreadsPerCore int `json:"threadsPerCore"`
// FlopRateScalar int `json:"flopRateScalar"` FlopRateScalar int `json:"flopRateScalar"`
// FlopRateSimd int `json:"flopRateSimd"` FlopRateSimd int `json:"flopRateSimd"`
// MemoryBandwidth int `json:"memoryBandwidth"` MemoryBandwidth int `json:"memoryBandwidth"`
// Topology *Topology `json:"topology"` Topology *schema.Topology `json:"topology"`
// } }
// type SubClusterConfig struct { // type SubClusterConfig struct {
// Name string `json:"name"` // Name string `json:"name"`
@ -49,17 +49,17 @@ type MetricConfig struct {
Name string `json:"name"` Name string `json:"name"`
Unit string `json:"unit"` Unit string `json:"unit"`
Scope schema.MetricScope `json:"scope"` Scope schema.MetricScope `json:"scope"`
Aggregation *string `json:"aggregation"` Aggregation string `json:"aggregation"`
Timestep int `json:"timestep"` Timestep int `json:"timestep"`
Peak *float64 `json:"peak"` Peak float64 `json:"peak"`
Normal *float64 `json:"normal"` Normal float64 `json:"normal"`
Caution *float64 `json:"caution"` Caution float64 `json:"caution"`
Alert *float64 `json:"alert"` Alert float64 `json:"alert"`
SubClusters []*schema.SubClusterConfig `json:"subClusters"` SubClusters []*schema.SubClusterConfig `json:"subClusters"`
} }
type Cluster struct { type Cluster struct {
Name string `json:"name"` Name string `json:"name"`
MetricConfig []*MetricConfig `json:"metricConfig"` MetricConfig []*MetricConfig `json:"metricConfig"`
SubClusters []*schema.SubCluster `json:"subClusters"` SubClusters []*SubCluster `json:"subClusters"`
} }

View File

@ -79,7 +79,7 @@ func GetCluster(cluster string) *Cluster {
return nil return nil
} }
func GetSubCluster(cluster, subcluster string) *schema.SubCluster { func GetSubCluster(cluster, subcluster string) *SubCluster {
for _, c := range Clusters { for _, c := range Clusters {
if c.Name == cluster { if c.Name == cluster {

View File

@ -11,6 +11,7 @@ import (
"fmt" "fmt"
"log" "log"
"os" "os"
"path/filepath"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/units" "github.com/ClusterCockpit/cc-backend/pkg/units"
@ -71,20 +72,21 @@ func deepCopyJobMeta(j *JobMeta) schema.JobMeta {
} }
func deepCopyJobData(d *JobData) schema.JobData { func deepCopyJobData(d *JobData) schema.JobData {
var dn schema.JobData var dn schema.JobData = make(schema.JobData)
for k, v := range *d { for k, v := range *d {
for mk, mv := range v { for mk, mv := range v {
var mn schema.JobMetric var mn schema.JobMetric
mn.Unit = units.ConvertUnitString(mv.Unit) mn.Unit = units.ConvertUnitString(mv.Unit)
mn.Scope = mv.Scope
mn.Timestep = mv.Timestep mn.Timestep = mv.Timestep
mn.Series = make([]schema.Series, len(mv.Series))
for _, v := range mv.Series { for _, v := range mv.Series {
var sn schema.Series var sn schema.Series
sn.Hostname = v.Hostname sn.Hostname = v.Hostname
sn.Id = v.Id id := fmt.Sprint(*v.Id)
sn.Statistics = &schema.MetricStatistics{ sn.Id = &id
sn.Statistics = schema.MetricStatistics{
Avg: v.Statistics.Avg, Avg: v.Statistics.Avg,
Min: v.Statistics.Min, Min: v.Statistics.Min,
Max: v.Statistics.Max} Max: v.Statistics.Max}
@ -94,6 +96,7 @@ func deepCopyJobData(d *JobData) schema.JobData {
mn.Series = append(mn.Series, sn) mn.Series = append(mn.Series, sn)
} }
dn[k] = make(map[schema.MetricScope]*schema.JobMetric)
dn[k][mk] = &mn dn[k][mk] = &mn
} }
} }
@ -105,15 +108,49 @@ func deepCopyClusterConfig(co *Cluster) schema.Cluster {
var cn schema.Cluster var cn schema.Cluster
cn.Name = co.Name cn.Name = co.Name
cn.SubClusters = co.SubClusters for _, sco := range co.SubClusters {
var scn schema.SubCluster
scn.Name = sco.Name
if sco.Nodes == "" {
scn.Nodes = "*"
} else {
scn.Nodes = sco.Nodes
}
scn.ProcessorType = sco.ProcessorType
scn.SocketsPerNode = sco.SocketsPerNode
scn.CoresPerSocket = sco.CoresPerSocket
scn.ThreadsPerCore = sco.ThreadsPerCore
prefix := "G"
scn.FlopRateScalar = schema.MetricValue{
Unit: schema.Unit{Base: "F/s", Prefix: &prefix},
Value: float64(sco.FlopRateScalar)}
scn.FlopRateSimd = schema.MetricValue{
Unit: schema.Unit{Base: "F/s", Prefix: &prefix},
Value: float64(sco.FlopRateSimd)}
scn.MemoryBandwidth = schema.MetricValue{
Unit: schema.Unit{Base: "B/s", Prefix: &prefix},
Value: float64(sco.MemoryBandwidth)}
scn.Topology = *sco.Topology
cn.SubClusters = append(cn.SubClusters, &scn)
}
for _, mco := range co.MetricConfig { for _, mco := range co.MetricConfig {
var mcn schema.MetricConfig var mcn schema.MetricConfig
mcn.Name = mco.Name mcn.Name = mco.Name
mcn.Scope = mco.Scope mcn.Scope = mco.Scope
if mco.Aggregation == "" {
fmt.Println("Property aggregation missing! Please review file!")
mcn.Aggregation = "sum"
} else {
mcn.Aggregation = mco.Aggregation mcn.Aggregation = mco.Aggregation
}
mcn.Timestep = mco.Timestep mcn.Timestep = mco.Timestep
mcn.Unit = units.ConvertUnitString(mco.Unit) mcn.Unit = units.ConvertUnitString(mco.Unit)
mcn.Peak = mco.Peak
mcn.Normal = mco.Normal
mcn.Caution = mco.Caution
mcn.Alert = mco.Alert
mcn.SubClusters = mco.SubClusters
cn.MetricConfig = append(cn.MetricConfig, &mcn) cn.MetricConfig = append(cn.MetricConfig, &mcn)
} }
@ -167,8 +204,12 @@ func main() {
for job := range ar.Iter() { for job := range ar.Iter() {
fmt.Printf("Job %d\n", job.JobID) fmt.Printf("Job %d\n", job.JobID)
root := fmt.Sprintf("%s/%s/", dstPath, job.Cluster) path := getPath(job, dstPath, "meta.json")
f, err := os.Create(getPath(job, root, "meta.json")) err = os.MkdirAll(filepath.Dir(path), 0750)
if err != nil {
log.Fatal(err)
}
f, err := os.Create(path)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -181,14 +222,13 @@ func main() {
log.Fatal(err) log.Fatal(err)
} }
f, err = os.Create(getPath(job, root, "data.json")) f, err = os.Create(getPath(job, dstPath, "data.json"))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
sroot := fmt.Sprintf("%s/%s/", srcPath, job.Cluster)
var jd *JobData var jd *JobData
jd, err = loadJobData(getPath(job, sroot, "data.json")) jd, err = loadJobData(getPath(job, srcPath, "data.json"))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }