fix: analysis metric histogram normalized by scope

- native acc metrics normalized by accHours
- native core metrics normalized by coreHours
This commit is contained in:
Christoph Kluge 2023-08-24 11:52:36 +02:00
parent 2f35482aff
commit 6a1e35107f
7 changed files with 336 additions and 50 deletions

View File

@ -156,12 +156,18 @@ type MetricFootprints {
}
type Footprints {
timeweights: [NullableFloat!]!
timeWeights: TimeWeights!
metrics: [MetricFootprints!]!
}
type TimeWeights {
nodeHours: [NullableFloat!]!
accHours: [NullableFloat!]!
coreHours: [NullableFloat!]!
}
enum Aggregate { USER, PROJECT, CLUSTER }
enum Weights { NODE_COUNT, NODE_HOURS }
enum Weights { NODE_COUNT, NODE_HOURS, CORE_COUNT, CORE_HOURS }
type NodeMetrics {
host: String!

View File

@ -69,7 +69,7 @@ type ComplexityRoot struct {
Footprints struct {
Metrics func(childComplexity int) int
Timeweights func(childComplexity int) int
TimeWeights func(childComplexity int) int
}
HistoPoint struct {
@ -265,6 +265,12 @@ type ComplexityRoot struct {
To func(childComplexity int) int
}
TimeWeights struct {
AccHours func(childComplexity int) int
CoreHours func(childComplexity int) int
NodeHours func(childComplexity int) int
}
Topology struct {
Accelerators func(childComplexity int) int
Core func(childComplexity int) int
@ -406,12 +412,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Footprints.Metrics(childComplexity), true
case "Footprints.timeweights":
if e.complexity.Footprints.Timeweights == nil {
case "Footprints.timeWeights":
if e.complexity.Footprints.TimeWeights == nil {
break
}
return e.complexity.Footprints.Timeweights(childComplexity), true
return e.complexity.Footprints.TimeWeights(childComplexity), true
case "HistoPoint.count":
if e.complexity.HistoPoint.Count == nil {
@ -1356,6 +1362,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.TimeRangeOutput.To(childComplexity), true
case "TimeWeights.accHours":
if e.complexity.TimeWeights.AccHours == nil {
break
}
return e.complexity.TimeWeights.AccHours(childComplexity), true
case "TimeWeights.coreHours":
if e.complexity.TimeWeights.CoreHours == nil {
break
}
return e.complexity.TimeWeights.CoreHours(childComplexity), true
case "TimeWeights.nodeHours":
if e.complexity.TimeWeights.NodeHours == nil {
break
}
return e.complexity.TimeWeights.NodeHours(childComplexity), true
case "Topology.accelerators":
if e.complexity.Topology.Accelerators == nil {
break
@ -1703,12 +1730,18 @@ type MetricFootprints {
}
type Footprints {
timeweights: [NullableFloat!]!
timeWeights: TimeWeights!
metrics: [MetricFootprints!]!
}
type TimeWeights {
nodeHours: [NullableFloat!]!
accHours: [NullableFloat!]!
coreHours: [NullableFloat!]!
}
enum Aggregate { USER, PROJECT, CLUSTER }
enum Weights { NODE_COUNT, NODE_HOURS }
enum Weights { NODE_COUNT, NODE_HOURS, CORE_COUNT, CORE_HOURS }
type NodeMetrics {
host: String!
@ -1836,7 +1869,7 @@ type JobsStatistics {
shortJobs: Int! # Number of jobs with a duration of less than duration
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
totalNodeHours: Int! # Sum of the node hours of all matched jobs
totalCoreHours: Int! # Sum of the core hours of all matched jobs
totalCoreHours: Int! # Sum of the core hours of all matched jobs <-- Das nehmen statt totaljobs in hsitograms mit totaljobs + bei analysis metric histos weighted
totalAccHours: Int! # Sum of the gpu hours of all matched jobs
histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
@ -2790,8 +2823,8 @@ func (ec *executionContext) fieldContext_Count_count(ctx context.Context, field
return fc, nil
}
func (ec *executionContext) _Footprints_timeweights(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Footprints_timeweights(ctx, field)
func (ec *executionContext) _Footprints_timeWeights(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Footprints_timeWeights(ctx, field)
if err != nil {
return graphql.Null
}
@ -2804,7 +2837,7 @@ func (ec *executionContext) _Footprints_timeweights(ctx context.Context, field g
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Timeweights, nil
return obj.TimeWeights, nil
})
if err != nil {
ec.Error(ctx, err)
@ -2816,19 +2849,27 @@ func (ec *executionContext) _Footprints_timeweights(ctx context.Context, field g
}
return graphql.Null
}
res := resTmp.([]schema.Float)
res := resTmp.(*model.TimeWeights)
fc.Result = res
return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
return ec.marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐTimeWeights(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Footprints_timeweights(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
func (ec *executionContext) fieldContext_Footprints_timeWeights(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Footprints",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type NullableFloat does not have child fields")
switch field.Name {
case "nodeHours":
return ec.fieldContext_TimeWeights_nodeHours(ctx, field)
case "accHours":
return ec.fieldContext_TimeWeights_accHours(ctx, field)
case "coreHours":
return ec.fieldContext_TimeWeights_coreHours(ctx, field)
}
return nil, fmt.Errorf("no field named %q was found under type TimeWeights", field.Name)
},
}
return fc, nil
@ -6994,8 +7035,8 @@ func (ec *executionContext) fieldContext_Query_jobsFootprints(ctx context.Contex
IsResolver: true,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
case "timeweights":
return ec.fieldContext_Footprints_timeweights(ctx, field)
case "timeWeights":
return ec.fieldContext_Footprints_timeWeights(ctx, field)
case "metrics":
return ec.fieldContext_Footprints_metrics(ctx, field)
}
@ -8930,6 +8971,138 @@ func (ec *executionContext) fieldContext_TimeRangeOutput_to(ctx context.Context,
return fc, nil
}
func (ec *executionContext) _TimeWeights_nodeHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_TimeWeights_nodeHours(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.NodeHours, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]schema.Float)
fc.Result = res
return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_TimeWeights_nodeHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "TimeWeights",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type NullableFloat does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _TimeWeights_accHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_TimeWeights_accHours(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.AccHours, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]schema.Float)
fc.Result = res
return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_TimeWeights_accHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "TimeWeights",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type NullableFloat does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _TimeWeights_coreHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_TimeWeights_coreHours(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.CoreHours, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]schema.Float)
fc.Result = res
return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_TimeWeights_coreHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "TimeWeights",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type NullableFloat does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _Topology_node(ctx context.Context, field graphql.CollectedField, obj *schema.Topology) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Topology_node(ctx, field)
if err != nil {
@ -11848,10 +12021,8 @@ func (ec *executionContext) _Footprints(ctx context.Context, sel ast.SelectionSe
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Footprints")
case "timeweights":
out.Values[i] = ec._Footprints_timeweights(ctx, field, obj)
case "timeWeights":
out.Values[i] = ec._Footprints_timeWeights(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
@ -13600,6 +13771,55 @@ func (ec *executionContext) _TimeRangeOutput(ctx context.Context, sel ast.Select
return out
}
var timeWeightsImplementors = []string{"TimeWeights"}
func (ec *executionContext) _TimeWeights(ctx context.Context, sel ast.SelectionSet, obj *model.TimeWeights) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, timeWeightsImplementors)
out := graphql.NewFieldSet(fields)
deferred := make(map[string]*graphql.FieldSet)
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("TimeWeights")
case "nodeHours":
out.Values[i] = ec._TimeWeights_nodeHours(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "accHours":
out.Values[i] = ec._TimeWeights_accHours(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "coreHours":
out.Values[i] = ec._TimeWeights_coreHours(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch(ctx)
if out.Invalids > 0 {
return graphql.Null
}
atomic.AddInt32(&ec.deferred, int32(len(deferred)))
for label, dfs := range deferred {
ec.processDeferredGroup(graphql.DeferredGroup{
Label: label,
Path: graphql.GetPath(ctx),
FieldSet: dfs,
Context: ctx,
})
}
return out
}
var topologyImplementors = []string{"Topology"}
func (ec *executionContext) _Topology(ctx context.Context, sel ast.SelectionSet, obj *schema.Topology) graphql.Marshaler {
@ -15333,6 +15553,16 @@ func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel as
return res
}
func (ec *executionContext) marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐTimeWeights(ctx context.Context, sel ast.SelectionSet, v *model.TimeWeights) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
return ec._TimeWeights(ctx, sel, v)
}
func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler {
return ec._Topology(ctx, sel, &v)
}

View File

@ -22,7 +22,7 @@ type FloatRange struct {
}
type Footprints struct {
Timeweights []schema.Float `json:"timeweights"`
TimeWeights *TimeWeights `json:"timeWeights"`
Metrics []*MetricFootprints `json:"metrics"`
}
@ -133,6 +133,12 @@ type TimeRangeOutput struct {
To time.Time `json:"to"`
}
type TimeWeights struct {
NodeHours []schema.Float `json:"nodeHours"`
AccHours []schema.Float `json:"accHours"`
CoreHours []schema.Float `json:"coreHours"`
}
type User struct {
Username string `json:"username"`
Name string `json:"name"`
@ -228,16 +234,20 @@ type Weights string
const (
WeightsNodeCount Weights = "NODE_COUNT"
WeightsNodeHours Weights = "NODE_HOURS"
WeightsCoreCount Weights = "CORE_COUNT"
WeightsCoreHours Weights = "CORE_HOURS"
)
var AllWeights = []Weights{
WeightsNodeCount,
WeightsNodeHours,
WeightsCoreCount,
WeightsCoreHours,
}
func (e Weights) IsValid() bool {
switch e {
case WeightsNodeCount, WeightsNodeHours:
case WeightsNodeCount, WeightsNodeHours, WeightsCoreCount, WeightsCoreHours:
return true
}
return false

View File

@ -13,6 +13,7 @@ import (
"github.com/99designs/gqlgen/graphql"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@ -106,9 +107,11 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
avgs[i] = make([]schema.Float, 0, len(jobs))
}
nodehours := make([]schema.Float, 0, len(jobs))
acchours := make([]schema.Float, 0, len(jobs))
hwthours := make([]schema.Float, 0, len(jobs))
timeweights := new(model.TimeWeights)
timeweights.NodeHours = make([]schema.Float, 0, len(jobs))
timeweights.AccHours = make([]schema.Float, 0, len(jobs))
timeweights.CoreHours = make([]schema.Float, 0, len(jobs))
for _, job := range jobs {
if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed {
continue
@ -120,16 +123,16 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
}
// #166 collect arrays: Null values or no null values?
nodehours = append(nodehours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes)))
timeweights.NodeHours = append(timeweights.NodeHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes)))
if job.NumAcc > 0 {
acchours = append(acchours, schema.Float(float64(job.Duration)/60.0*float64(job.NumAcc)))
timeweights.AccHours = append(timeweights.AccHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumAcc)))
} else {
acchours = append(acchours, schema.Float(0.0))
timeweights.AccHours = append(timeweights.AccHours, schema.Float(1.0))
}
if job.NumHWThreads > 0 {
hwthours = append(hwthours, schema.Float(float64(job.Duration)/60.0*float64(job.NumHWThreads)))
timeweights.CoreHours = append(timeweights.CoreHours, schema.Float(float64(job.Duration)/60.0*float64(numCoresForJob(job))))
} else {
hwthours = append(hwthours, schema.Float(0.0))
timeweights.CoreHours = append(timeweights.CoreHours, schema.Float(1.0))
}
}
@ -142,11 +145,34 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
}
return &model.Footprints{
Timeweights: nodehours,
TimeWeights: timeweights,
Metrics: res,
}, nil
}
func numCoresForJob(job *schema.Job) (numCores int) {
subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster)
if scerr != nil {
return 1
}
totalJobCores := 0
topology := subcluster.Topology
for _, host := range job.Resources {
hwthreads := host.HWThreads
if hwthreads == nil {
hwthreads = topology.Node
}
hostCores, _ := topology.GetCoresFromHWThreads(hwthreads)
totalJobCores += len(hostCores)
}
return totalJobCores
}
func requireField(ctx context.Context, name string) bool {
fields := graphql.CollectAllFields(ctx)

View File

@ -182,7 +182,7 @@ func LoadAverages(
ctx context.Context) error {
if job.State != schema.JobStateRunning && useArchive {
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here?
}
repo, ok := metricDataRepos[job.Cluster]

View File

@ -78,7 +78,7 @@
query: gql`
query($jobFilters: [JobFilter!]!, $metrics: [String!]!) {
footprints: jobsFootprints(filter: $jobFilters, metrics: $metrics) {
timeweights,
timeWeights { nodeHours, accHours, coreHours },
metrics { metric, data }
}
}`,
@ -244,8 +244,9 @@
<Row>
<Col>
<Card body>
These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours.
Note that some metrics could be disabled for specific subclusters as per metriConfig and thus could affect shown average values.
These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours by default
(Accelerator hours for native accelerator scope metrics, coreHours for native core scope metrics).
Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values.
</Card>
<br/>
</Col>
@ -257,7 +258,8 @@
let:width
renderFor="analysis"
items={metricsInHistograms.map(metric => ({ metric, ...binsFromFootprint(
$footprintsQuery.data.footprints.timeweights,
$footprintsQuery.data.footprints.timeWeights,
metricConfig(cluster.name, metric)?.scope,
$footprintsQuery.data.footprints.metrics.find(f => f.metric == metric).data, numBins) }))}
itemsPerRow={ccconfig.plot_view_plotsPerRow}>
@ -265,11 +267,11 @@
data={convert2uplot(item.bins)}
width={width} height={250}
title="Average Distribution of '{item.metric}'"
xlabel={`${item.metric} average [${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
xlabel={`${item.metric} bin maximum [${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
(metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}]`}
xunit={`${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
(metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}`}
ylabel="Node Hours"
ylabel="Normalized Hours"
yunit="Hours"/>
</PlotTable>
</Col>
@ -279,7 +281,7 @@
<Col>
<Card body>
Each circle represents one job. The size of a circle is proportional to its node hours. Darker circles mean multiple jobs have the same averages for the respective metrics.
Note that some metrics could be disabled for specific subclusters as per metriConfig and thus could affect shown average values.
Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values.
</Card>
<br/>
</Col>
@ -301,7 +303,7 @@
(metricConfig(cluster.name, item.m1)?.unit?.base ? metricConfig(cluster.name, item.m1)?.unit?.base : '')}]`}
yLabel={`${item.m2} [${(metricConfig(cluster.name, item.m2)?.unit?.prefix ? metricConfig(cluster.name, item.m2)?.unit?.prefix : '') +
(metricConfig(cluster.name, item.m2)?.unit?.base ? metricConfig(cluster.name, item.m2)?.unit?.base : '')}]`}
X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.timeweights} />
X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.timeWeights.nodeHours} />
</PlotTable>
</Col>
</Row>

View File

@ -325,7 +325,7 @@ export function convert2uplot(canvasData) {
return uplotData
}
export function binsFromFootprint(weights, values, numBins) {
export function binsFromFootprint(weights, scope, values, numBins) {
let min = 0, max = 0
if (values.length != 0) {
for (let x of values) {
@ -338,10 +338,23 @@ export function binsFromFootprint(weights, values, numBins) {
if (numBins == null || numBins < 3)
numBins = 3
let scopeWeights
switch (scope) {
case 'core':
scopeWeights = weights.coreHours
break
case 'accelerator':
scopeWeights = weights.accHours
break
default: // every other scope: use 'node'
scopeWeights = weights.nodeHours
}
const bins = new Array(numBins).fill(0)
for (let i = 0; i < values.length; i++)
bins[Math.floor(((values[i] - min) / (max - min)) * numBins)] += weights ? weights[i] : 1
bins[Math.floor(((values[i] - min) / (max - min)) * numBins)] += scopeWeights ? scopeWeights[i] : 1
// Manual Canvas Original
// return {
// label: idx => {
// let start = min + (idx / numBins) * (max - min)
@ -355,14 +368,13 @@ export function binsFromFootprint(weights, values, numBins) {
return {
bins: bins.map((count, idx) => ({
value: idx => { // Get rounded down next integer to bins' Start-Stop Mean Value
let start = min + (idx / numBins) * (max - min)
value: idx => { // Use bins' max value instead of mean
// let start = min + (idx / numBins) * (max - min)
let stop = min + ((idx + 1) / numBins) * (max - min)
return `${formatNumber(Math.floor((start+stop)/2))}`
// return `${formatNumber(Math.floor((start+stop)/2))}`
return Math.floor(stop)
},
count: count
})),
min: min,
max: max
}))
}
}