Merge branch 'dev' into add_detailed_nodelist

This commit is contained in:
2025-01-28 13:47:22 +01:00
16 changed files with 470 additions and 83 deletions

View File

@@ -48,8 +48,7 @@ type ResolverRoot interface {
SubCluster() SubClusterResolver
}
type DirectiveRoot struct {
}
type DirectiveRoot struct{}
type ComplexityRoot struct {
Accelerator struct {
@@ -266,7 +265,7 @@ type ComplexityRoot struct {
JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope, resolution *int) int
Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int
JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int
JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int
JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) int
NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int
NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int
RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int
@@ -392,7 +391,7 @@ type QueryResolver interface {
JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error)
JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error)
Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error)
JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error)
JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error)
RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error)
NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error)
NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error)
@@ -1425,7 +1424,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return 0, false
}
return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true
return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate), args["numDurationBins"].(*string), args["numMetricBins"].(*int)), true
case "Query.nodeMetrics":
if e.complexity.Query.NodeMetrics == nil {
@@ -2206,7 +2205,7 @@ type Query {
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]!
jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]!
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
@@ -2359,6 +2358,7 @@ func (ec *executionContext) field_Mutation_addTagsToJob_args(ctx context.Context
args["tagIds"] = arg1
return args, nil
}
func (ec *executionContext) field_Mutation_addTagsToJob_argsJob(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2423,6 +2423,7 @@ func (ec *executionContext) field_Mutation_createTag_args(ctx context.Context, r
args["scope"] = arg2
return args, nil
}
func (ec *executionContext) field_Mutation_createTag_argsType(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2499,6 +2500,7 @@ func (ec *executionContext) field_Mutation_deleteTag_args(ctx context.Context, r
args["id"] = arg0
return args, nil
}
func (ec *executionContext) field_Mutation_deleteTag_argsID(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2536,6 +2538,7 @@ func (ec *executionContext) field_Mutation_removeTagsFromJob_args(ctx context.Co
args["tagIds"] = arg1
return args, nil
}
func (ec *executionContext) field_Mutation_removeTagsFromJob_argsJob(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2595,6 +2598,7 @@ func (ec *executionContext) field_Mutation_updateConfiguration_args(ctx context.
args["value"] = arg1
return args, nil
}
func (ec *executionContext) field_Mutation_updateConfiguration_argsName(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2649,6 +2653,7 @@ func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Query___type_argsName(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2681,6 +2686,7 @@ func (ec *executionContext) field_Query_allocatedNodes_args(ctx context.Context,
args["cluster"] = arg0
return args, nil
}
func (ec *executionContext) field_Query_allocatedNodes_argsCluster(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2728,6 +2734,7 @@ func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, raw
args["resolution"] = arg3
return args, nil
}
func (ec *executionContext) field_Query_jobMetrics_argsID(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2826,6 +2833,7 @@ func (ec *executionContext) field_Query_job_args(ctx context.Context, rawArgs ma
args["id"] = arg0
return args, nil
}
func (ec *executionContext) field_Query_job_argsID(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2863,6 +2871,7 @@ func (ec *executionContext) field_Query_jobsFootprints_args(ctx context.Context,
args["metrics"] = arg1
return args, nil
}
func (ec *executionContext) field_Query_jobsFootprints_argsFilter(
ctx context.Context,
rawArgs map[string]interface{},
@@ -2935,8 +2944,19 @@ func (ec *executionContext) field_Query_jobsStatistics_args(ctx context.Context,
return nil, err
}
args["groupBy"] = arg4
arg5, err := ec.field_Query_jobsStatistics_argsNumDurationBins(ctx, rawArgs)
if err != nil {
return nil, err
}
args["numDurationBins"] = arg5
arg6, err := ec.field_Query_jobsStatistics_argsNumMetricBins(ctx, rawArgs)
if err != nil {
return nil, err
}
args["numMetricBins"] = arg6
return args, nil
}
func (ec *executionContext) field_Query_jobsStatistics_argsFilter(
ctx context.Context,
rawArgs map[string]interface{},
@@ -3047,6 +3067,50 @@ func (ec *executionContext) field_Query_jobsStatistics_argsGroupBy(
return zeroVal, nil
}
func (ec *executionContext) field_Query_jobsStatistics_argsNumDurationBins(
ctx context.Context,
rawArgs map[string]interface{},
) (*string, error) {
// We won't call the directive if the argument is null.
// Set call_argument_directives_with_null to true to call directives
// even if the argument is null.
_, ok := rawArgs["numDurationBins"]
if !ok {
var zeroVal *string
return zeroVal, nil
}
ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("numDurationBins"))
if tmp, ok := rawArgs["numDurationBins"]; ok {
return ec.unmarshalOString2ᚖstring(ctx, tmp)
}
var zeroVal *string
return zeroVal, nil
}
func (ec *executionContext) field_Query_jobsStatistics_argsNumMetricBins(
ctx context.Context,
rawArgs map[string]interface{},
) (*int, error) {
// We won't call the directive if the argument is null.
// Set call_argument_directives_with_null to true to call directives
// even if the argument is null.
_, ok := rawArgs["numMetricBins"]
if !ok {
var zeroVal *int
return zeroVal, nil
}
ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("numMetricBins"))
if tmp, ok := rawArgs["numMetricBins"]; ok {
return ec.unmarshalOInt2ᚖint(ctx, tmp)
}
var zeroVal *int
return zeroVal, nil
}
func (ec *executionContext) field_Query_jobs_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
@@ -3067,6 +3131,7 @@ func (ec *executionContext) field_Query_jobs_args(ctx context.Context, rawArgs m
args["order"] = arg2
return args, nil
}
func (ec *executionContext) field_Query_jobs_argsFilter(
ctx context.Context,
rawArgs map[string]interface{},
@@ -3183,6 +3248,7 @@ func (ec *executionContext) field_Query_nodeMetricsList_args(ctx context.Context
args["resolution"] = arg8
return args, nil
}
func (ec *executionContext) field_Query_nodeMetricsList_argsCluster(
ctx context.Context,
rawArgs map[string]interface{},
@@ -3416,6 +3482,7 @@ func (ec *executionContext) field_Query_nodeMetrics_args(ctx context.Context, ra
args["to"] = arg5
return args, nil
}
func (ec *executionContext) field_Query_nodeMetrics_argsCluster(
ctx context.Context,
rawArgs map[string]interface{},
@@ -3588,6 +3655,7 @@ func (ec *executionContext) field_Query_rooflineHeatmap_args(ctx context.Context
args["maxY"] = arg6
return args, nil
}
func (ec *executionContext) field_Query_rooflineHeatmap_argsFilter(
ctx context.Context,
rawArgs map[string]interface{},
@@ -3752,6 +3820,7 @@ func (ec *executionContext) field_Query_user_args(ctx context.Context, rawArgs m
args["username"] = arg0
return args, nil
}
func (ec *executionContext) field_Query_user_argsUsername(
ctx context.Context,
rawArgs map[string]interface{},
@@ -3784,6 +3853,7 @@ func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, ra
args["includeDeprecated"] = arg0
return args, nil
}
func (ec *executionContext) field___Type_enumValues_argsIncludeDeprecated(
ctx context.Context,
rawArgs map[string]interface{},
@@ -3816,6 +3886,7 @@ func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArg
args["includeDeprecated"] = arg0
return args, nil
}
func (ec *executionContext) field___Type_fields_argsIncludeDeprecated(
ctx context.Context,
rawArgs map[string]interface{},
@@ -10355,7 +10426,7 @@ func (ec *executionContext) _Query_jobsStatistics(ctx context.Context, field gra
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate))
return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate), fc.Args["numDurationBins"].(*string), fc.Args["numMetricBins"].(*int))
})
if err != nil {
ec.Error(ctx, err)
@@ -19995,7 +20066,7 @@ func (ec *executionContext) unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpit
if v == nil {
return nil, nil
}
var res = new(model.Aggregate)
res := new(model.Aggregate)
err := res.UnmarshalGQL(v)
return res, graphql.ErrorOnPath(ctx, err)
}
@@ -20594,7 +20665,7 @@ func (ec *executionContext) unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterC
if v == nil {
return nil, nil
}
var res = new(model.SortByAggregate)
res := new(model.SortByAggregate)
err := res.UnmarshalGQL(v)
return res, graphql.ErrorOnPath(ctx, err)
}

View File

@@ -354,10 +354,14 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
}
// JobsStatistics is the resolver for the jobsStatistics field.
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) {
var err error
var stats []*model.JobsStatistics
// Top Level Defaults
var defaultDurationBins string = "1h"
var defaultMetricBins int = 10
if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
if groupBy == nil {
@@ -391,8 +395,13 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
}
if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") || requireField(ctx, "histNumCores") || requireField(ctx, "histNumAccs") {
if numDurationBins == nil {
numDurationBins = &defaultDurationBins
}
if groupBy == nil {
stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0])
stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0], numDurationBins)
if err != nil {
return nil, err
}
@@ -402,8 +411,13 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
}
if requireField(ctx, "histMetrics") {
if numMetricBins == nil {
numMetricBins = &defaultMetricBins
}
if groupBy == nil {
stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0])
stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0], numMetricBins)
if err != nil {
return nil, err
}

View File

@@ -8,7 +8,6 @@ import (
"context"
"database/sql"
"fmt"
"math"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
@@ -447,15 +446,40 @@ func (r *JobRepository) AddHistograms(
ctx context.Context,
filter []*model.JobFilter,
stat *model.JobsStatistics,
durationBins *string,
) (*model.JobsStatistics, error) {
start := time.Now()
var targetBinCount int
var targetBinSize int
switch {
case *durationBins == "1m": // 1 Minute Bins + Max 60 Bins -> Max 60 Minutes
targetBinCount = 60
targetBinSize = 60
case *durationBins == "10m": // 10 Minute Bins + Max 72 Bins -> Max 12 Hours
targetBinCount = 72
targetBinSize = 600
case *durationBins == "1h": // 1 Hour Bins + Max 48 Bins -> Max 48 Hours
targetBinCount = 48
targetBinSize = 3600
case *durationBins == "6h": // 6 Hour Bins + Max 12 Bins -> Max 3 Days
targetBinCount = 12
targetBinSize = 21600
case *durationBins == "12h": // 12 hour Bins + Max 14 Bins -> Max 7 Days
targetBinCount = 14
targetBinSize = 43200
default: // 24h
targetBinCount = 24
targetBinSize = 3600
}
castType := r.getCastType()
var err error
value := fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as %s) as value`, time.Now().Unix(), castType)
stat.HistDuration, err = r.jobsStatisticsHistogram(ctx, value, filter)
// Return X-Values always as seconds, will be formatted into minutes and hours in frontend
value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType)
stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount)
if err != nil {
log.Warn("Error while loading job statistics histogram: running jobs")
log.Warn("Error while loading job statistics histogram: job duration")
return nil, err
}
@@ -487,6 +511,7 @@ func (r *JobRepository) AddMetricHistograms(
filter []*model.JobFilter,
metrics []string,
stat *model.JobsStatistics,
targetBinCount *int,
) (*model.JobsStatistics, error) {
start := time.Now()
@@ -494,7 +519,7 @@ func (r *JobRepository) AddMetricHistograms(
for _, f := range filter {
if f.State != nil {
if len(f.State) == 1 && f.State[0] == "running" {
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter)
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount)
log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
return stat, nil
}
@@ -503,7 +528,7 @@ func (r *JobRepository) AddMetricHistograms(
// All other cases: Query and make bins in sqlite directly
for _, m := range metrics {
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter)
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount)
if err != nil {
log.Warnf("Error while loading job metric statistics histogram: %s", m)
continue
@@ -540,6 +565,7 @@ func (r *JobRepository) jobsStatisticsHistogram(
}
points := make([]*model.HistoPoint, 0)
// is it possible to introduce zero values here? requires info about bincount
for rows.Next() {
point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil {
@@ -553,10 +579,66 @@ func (r *JobRepository) jobsStatisticsHistogram(
return points, nil
}
func (r *JobRepository) jobsDurationStatisticsHistogram(
ctx context.Context,
value string,
filters []*model.JobFilter,
binSizeSeconds int,
targetBinCount *int,
) ([]*model.HistoPoint, error) {
start := time.Now()
query, qerr := SecurityCheck(ctx,
sq.Select(value, "COUNT(job.id) AS count").From("job"))
if qerr != nil {
return nil, qerr
}
// Setup Array
points := make([]*model.HistoPoint, 0)
for i := 1; i <= *targetBinCount; i++ {
point := model.HistoPoint{Value: i * binSizeSeconds, Count: 0}
points = append(points, &point)
}
for _, f := range filters {
query = BuildWhereClause(f, query)
}
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
if err != nil {
log.Error("Error while running query")
return nil, err
}
// Fill Array at matching $Value
for rows.Next() {
point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil {
log.Warn("Error while scanning rows")
return nil, err
}
for _, e := range points {
if e.Value == (point.Value * binSizeSeconds) {
// Note:
// Matching on unmodified integer value (and multiplying point.Value by binSizeSeconds after match)
// causes frontend to loop into highest targetBinCount, due to zoom condition instantly being fullfilled (cause unknown)
e.Count = point.Count
break
}
}
}
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil
}
func (r *JobRepository) jobsMetricStatisticsHistogram(
ctx context.Context,
metric string,
filters []*model.JobFilter,
bins *int,
) (*model.MetricHistoPoints, error) {
// Get specific Peak or largest Peak
var metricConfig *schema.MetricConfig
@@ -624,16 +706,15 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
return nil, sqlerr
}
bins := 10
binQuery := fmt.Sprintf(`CAST( (case when %s = value.max
then value.max*0.999999999 else %s end - value.min) / (value.max -
value.min) * %d as INTEGER )`, jm, jm, bins)
value.min) * %v as INTEGER )`, jm, jm, *bins)
mainQuery := sq.Select(
fmt.Sprintf(`%s + 1 as bin`, binQuery),
fmt.Sprintf(`count(%s) as count`, jm),
fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery),
fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery),
fmt.Sprintf(`CAST(((value.max / %d) * (%v )) as INTEGER ) as min`, *bins, binQuery),
fmt.Sprintf(`CAST(((value.max / %d) * (%v + 1 )) as INTEGER ) as max`, *bins, binQuery),
).From("job").CrossJoin(
fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs...,
).Where(fmt.Sprintf(`%s is not null and %s <= %f`, jm, jm, peak))
@@ -657,7 +738,15 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
return nil, err
}
// Setup Array
points := make([]*model.MetricHistoPoint, 0)
for i := 1; i <= *bins; i++ {
binMax := ((int(peak) / *bins) * i)
binMin := ((int(peak) / *bins) * (i - 1))
point := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax}
points = append(points, &point)
}
for rows.Next() {
point := model.MetricHistoPoint{}
if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil {
@@ -665,7 +754,20 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
return nil, err // Totally bricks cc-backend if returned and if all metrics requested?
}
points = append(points, &point)
for _, e := range points {
if e.Bin != nil && point.Bin != nil {
if *e.Bin == *point.Bin {
e.Count = point.Count
if point.Min != nil {
e.Min = point.Min
}
if point.Max != nil {
e.Max = point.Max
}
break
}
}
}
}
result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points}
@@ -678,7 +780,9 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
ctx context.Context,
metrics []string,
filters []*model.JobFilter,
bins *int,
) []*model.MetricHistoPoints {
// Get Jobs
jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil)
if err != nil {
@@ -720,7 +824,6 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric)
peak = metricConfig.Peak
unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base
log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
}
}
@@ -740,28 +843,24 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
}
// Make and fill bins
bins := 10.0
peakBin := peak / bins
peakBin := int(peak) / *bins
points := make([]*model.MetricHistoPoint, 0)
for b := 0; b < 10; b++ {
for b := 0; b < *bins; b++ {
count := 0
bindex := b + 1
bmin := math.Round(peakBin * float64(b))
bmax := math.Round(peakBin * (float64(b) + 1.0))
bmin := peakBin * b
bmax := peakBin * (b + 1)
// Iterate AVG values for indexed metric and count for bins
for _, val := range avgs[idx] {
if float64(val) >= bmin && float64(val) < bmax {
if int(val) >= bmin && int(val) < bmax {
count += 1
}
}
bminint := int(bmin)
bmaxint := int(bmax)
// Append Bin to Metric Result Array
point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bminint, Max: &bmaxint}
point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bmin, Max: &bmax}
points = append(points, &point)
}