diff --git a/api/schema.graphqls b/api/schema.graphqls index 82681c0..4802117 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -156,7 +156,7 @@ type MetricFootprints { } type Footprints { - nodehours: [NullableFloat!]! + timeweights: [NullableFloat!]! metrics: [MetricFootprints!]! } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 229c6b5..1f3c349 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -68,8 +68,8 @@ type ComplexityRoot struct { } Footprints struct { - Metrics func(childComplexity int) int - Nodehours func(childComplexity int) int + Metrics func(childComplexity int) int + Timeweights func(childComplexity int) int } HistoPoint struct { @@ -406,12 +406,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Footprints.Metrics(childComplexity), true - case "Footprints.nodehours": - if e.complexity.Footprints.Nodehours == nil { + case "Footprints.timeweights": + if e.complexity.Footprints.Timeweights == nil { break } - return e.complexity.Footprints.Nodehours(childComplexity), true + return e.complexity.Footprints.Timeweights(childComplexity), true case "HistoPoint.count": if e.complexity.HistoPoint.Count == nil { @@ -1666,7 +1666,7 @@ type MetricFootprints { } type Footprints { - nodehours: [NullableFloat!]! + timeweights: [NullableFloat!]! metrics: [MetricFootprints!]! } @@ -2753,8 +2753,8 @@ func (ec *executionContext) fieldContext_Count_count(ctx context.Context, field return fc, nil } -func (ec *executionContext) _Footprints_nodehours(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Footprints_nodehours(ctx, field) +func (ec *executionContext) _Footprints_timeweights(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Footprints_timeweights(ctx, field) if err != nil { return graphql.Null } @@ -2767,7 +2767,7 @@ func (ec *executionContext) _Footprints_nodehours(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Nodehours, nil + return obj.Timeweights, nil }) if err != nil { ec.Error(ctx, err) @@ -2784,7 +2784,7 @@ func (ec *executionContext) _Footprints_nodehours(ctx context.Context, field gra return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Footprints_nodehours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Footprints_timeweights(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Footprints", Field: field, @@ -6945,8 +6945,8 @@ func (ec *executionContext) fieldContext_Query_jobsFootprints(ctx context.Contex IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { - case "nodehours": - return ec.fieldContext_Footprints_nodehours(ctx, field) + case "timeweights": + return ec.fieldContext_Footprints_timeweights(ctx, field) case "metrics": return ec.fieldContext_Footprints_metrics(ctx, field) } @@ -11715,9 +11715,9 @@ func (ec *executionContext) _Footprints(ctx context.Context, sel ast.SelectionSe switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Footprints") - case "nodehours": + case "timeweights": - out.Values[i] = ec._Footprints_nodehours(ctx, field, obj) + out.Values[i] = ec._Footprints_timeweights(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 8284051..7dacdf2 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -22,8 +22,8 @@ type FloatRange struct { } type Footprints struct { - Nodehours []schema.Float `json:"nodehours"` - Metrics []*MetricFootprints `json:"metrics"` + Timeweights []schema.Float `json:"timeweights"` + Metrics []*MetricFootprints `json:"metrics"` } type HistoPoint struct { diff --git a/internal/graph/util.go b/internal/graph/util.go index c9423e1..64676c8 100644 --- a/internal/graph/util.go +++ b/internal/graph/util.go @@ -107,6 +107,8 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF } nodehours := make([]schema.Float, 0, len(jobs)) + acchours := make([]schema.Float, 0, len(jobs)) + hwthours := make([]schema.Float, 0, len(jobs)) for _, job := range jobs { if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed { continue @@ -117,7 +119,18 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF return nil, err } + // #166 collect arrays: Null values or no null values? nodehours = append(nodehours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes))) + if job.NumAcc > 0 { + acchours = append(acchours, schema.Float(float64(job.Duration)/60.0*float64(job.NumAcc))) + } else { + acchours = append(acchours, schema.Float(0.0)) + } + if job.NumHWThreads > 0 { + hwthours = append(hwthours, schema.Float(float64(job.Duration)/60.0*float64(job.NumHWThreads))) + } else { + hwthours = append(hwthours, schema.Float(0.0)) + } } res := make([]*model.MetricFootprints, len(avgs)) @@ -129,8 +142,8 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF } return &model.Footprints{ - Nodehours: nodehours, - Metrics: res, + Timeweights: nodehours, + Metrics: res, }, nil } diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 6b3153f..cfaa6fd 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -506,7 +506,7 @@ func (ccms *CCMetricStore) LoadStats( metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) + queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { log.Warn("Error while building query") return nil, err diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 08898bd..fc91e7d 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -182,7 +182,7 @@ func LoadAverages( ctx context.Context) error { if job.State != schema.JobStateRunning && useArchive { - return archive.LoadAveragesFromArchive(job, metrics, data) + return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here } repo, ok := metricDataRepos[job.Cluster] @@ -190,7 +190,7 @@ func LoadAverages( return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster) } - stats, err := repo.LoadStats(job, metrics, ctx) + stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion? if err != nil { log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) return err diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index 2e6f5b5..2ecf9db 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -76,7 +76,7 @@ query: gql` query($jobFilters: [JobFilter!]!, $metrics: [String!]!) { footprints: jobsFootprints(filter: $jobFilters, metrics: $metrics) { - nodehours, + timeweights, metrics { metric, data } } }`, @@ -229,7 +229,7 @@ let:width renderFor="analysis" items={metricsInHistograms.map(metric => ({ metric, ...binsFromFootprint( - $footprintsQuery.data.footprints.nodehours, + $footprintsQuery.data.footprints.timeweights, $footprintsQuery.data.footprints.metrics.find(f => f.metric == metric).data, numBins) }))} itemsPerRow={ccconfig.plot_view_plotsPerRow}> @@ -271,7 +271,7 @@ (metricConfig(cluster.name, item.m1)?.unit?.base ? metricConfig(cluster.name, item.m1)?.unit?.base : '')}]`} yLabel={`${item.m2} [${(metricConfig(cluster.name, item.m2)?.unit?.prefix ? metricConfig(cluster.name, item.m2)?.unit?.prefix : '') + (metricConfig(cluster.name, item.m2)?.unit?.base ? metricConfig(cluster.name, item.m2)?.unit?.base : '')}]`} - X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.nodehours} /> + X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.timeweights} />