diff --git a/api/schema.graphqls b/api/schema.graphqls index 82681c0..69e32e2 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -156,12 +156,18 @@ type MetricFootprints { } type Footprints { - nodehours: [NullableFloat!]! + timeWeights: TimeWeights! metrics: [MetricFootprints!]! } +type TimeWeights { + nodeHours: [NullableFloat!]! + accHours: [NullableFloat!]! + coreHours: [NullableFloat!]! +} + enum Aggregate { USER, PROJECT, CLUSTER } -enum Weights { NODE_COUNT, NODE_HOURS } +enum SortByAggregate { TOTALWALLTIME, TOTALJOBS, TOTALNODES, TOTALNODEHOURS, TOTALCORES, TOTALCOREHOURS, TOTALACCS, TOTALACCHOURS } type NodeMetrics { host: String! @@ -192,8 +198,7 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]! - jobsCount(filter: [JobFilter]!, groupBy: Aggregate!, weight: Weights, limit: Int): [Count!]! + jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -288,11 +293,16 @@ type JobsStatistics { runningJobs: Int! # Number of running jobs shortJobs: Int! # Number of jobs with a duration of less than duration totalWalltime: Int! # Sum of the duration of all matched jobs in hours + totalNodes: Int! # Sum of the nodes of all matched jobs totalNodeHours: Int! # Sum of the node hours of all matched jobs + totalCores: Int! # Sum of the cores of all matched jobs totalCoreHours: Int! # Sum of the core hours of all matched jobs + totalAccs: Int! # Sum of the accs of all matched jobs totalAccHours: Int! # Sum of the gpu hours of all matched jobs histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes + histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores + histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs } input PageRequest { diff --git a/internal/config/config.go b/internal/config/config.go index 08d01c6..253951c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -26,21 +26,25 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{ StopJobsExceedingWalltime: 0, ShortRunningJobsDuration: 5 * 60, UiDefaults: map[string]interface{}{ - "analysis_view_histogramMetrics": []string{"flops_any", "mem_bw", "mem_used"}, - "analysis_view_scatterPlotMetrics": [][]string{{"flops_any", "mem_bw"}, {"flops_any", "cpu_load"}, {"cpu_load", "mem_bw"}}, - "job_view_nodestats_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, - "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"}, - "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, - "plot_general_colorBackground": true, - "plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"}, - "plot_general_lineWidth": 3, - "plot_list_jobsPerPage": 50, - "plot_list_selectedMetrics": []string{"cpu_load", "mem_used", "flops_any", "mem_bw"}, - "plot_view_plotsPerRow": 3, - "plot_view_showPolarplot": true, - "plot_view_showRoofline": true, - "plot_view_showStatTable": true, - "system_view_selectedMetric": "cpu_load", + "analysis_view_histogramMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "analysis_view_scatterPlotMetrics": [][]string{{"flops_any", "mem_bw"}, {"flops_any", "cpu_load"}, {"cpu_load", "mem_bw"}}, + "job_view_nodestats_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "plot_general_colorBackground": true, + "plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"}, + "plot_general_lineWidth": 3, + "plot_list_jobsPerPage": 50, + "plot_list_selectedMetrics": []string{"cpu_load", "mem_used", "flops_any", "mem_bw"}, + "plot_view_plotsPerRow": 3, + "plot_view_showPolarplot": true, + "plot_view_showRoofline": true, + "plot_view_showStatTable": true, + "system_view_selectedMetric": "cpu_load", + "analysis_view_selectedTopEntity": "user", + "analysis_view_selectedTopCategory": "totalWalltime", + "status_view_selectedTopUserCategory": "totalJobs", + "status_view_selectedTopProjectCategory": "totalJobs", }, } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 1cb8b74..f29e2a0 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -68,8 +68,8 @@ type ComplexityRoot struct { } Footprints struct { - Metrics func(childComplexity int) int - Nodehours func(childComplexity int) int + Metrics func(childComplexity int) int + TimeWeights func(childComplexity int) int } HistoPoint struct { @@ -141,15 +141,20 @@ type ComplexityRoot struct { JobsStatistics struct { HistDuration func(childComplexity int) int + HistNumAccs func(childComplexity int) int + HistNumCores func(childComplexity int) int HistNumNodes func(childComplexity int) int ID func(childComplexity int) int Name func(childComplexity int) int RunningJobs func(childComplexity int) int ShortJobs func(childComplexity int) int TotalAccHours func(childComplexity int) int + TotalAccs func(childComplexity int) int TotalCoreHours func(childComplexity int) int + TotalCores func(childComplexity int) int TotalJobs func(childComplexity int) int TotalNodeHours func(childComplexity int) int + TotalNodes func(childComplexity int) int TotalWalltime func(childComplexity int) int } @@ -202,9 +207,8 @@ type ComplexityRoot struct { Job func(childComplexity int, id string) int JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int - JobsCount func(childComplexity int, filter []*model.JobFilter, groupBy model.Aggregate, weight *model.Weights, limit *int) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int - JobsStatistics func(childComplexity int, filter []*model.JobFilter, groupBy *model.Aggregate) int + JobsStatistics func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int Tags func(childComplexity int) int @@ -265,6 +269,12 @@ type ComplexityRoot struct { To func(childComplexity int) int } + TimeWeights struct { + AccHours func(childComplexity int) int + CoreHours func(childComplexity int) int + NodeHours func(childComplexity int) int + } + Topology struct { Accelerators func(childComplexity int) int Core func(childComplexity int) int @@ -312,8 +322,7 @@ type QueryResolver interface { JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) - JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) - JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, weight *model.Weights, limit *int) ([]*model.Count, error) + JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) } @@ -406,12 +415,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Footprints.Metrics(childComplexity), true - case "Footprints.nodehours": - if e.complexity.Footprints.Nodehours == nil { + case "Footprints.timeWeights": + if e.complexity.Footprints.TimeWeights == nil { break } - return e.complexity.Footprints.Nodehours(childComplexity), true + return e.complexity.Footprints.TimeWeights(childComplexity), true case "HistoPoint.count": if e.complexity.HistoPoint.Count == nil { @@ -721,6 +730,20 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.HistDuration(childComplexity), true + case "JobsStatistics.histNumAccs": + if e.complexity.JobsStatistics.HistNumAccs == nil { + break + } + + return e.complexity.JobsStatistics.HistNumAccs(childComplexity), true + + case "JobsStatistics.histNumCores": + if e.complexity.JobsStatistics.HistNumCores == nil { + break + } + + return e.complexity.JobsStatistics.HistNumCores(childComplexity), true + case "JobsStatistics.histNumNodes": if e.complexity.JobsStatistics.HistNumNodes == nil { break @@ -763,6 +786,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.TotalAccHours(childComplexity), true + case "JobsStatistics.totalAccs": + if e.complexity.JobsStatistics.TotalAccs == nil { + break + } + + return e.complexity.JobsStatistics.TotalAccs(childComplexity), true + case "JobsStatistics.totalCoreHours": if e.complexity.JobsStatistics.TotalCoreHours == nil { break @@ -770,6 +800,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.TotalCoreHours(childComplexity), true + case "JobsStatistics.totalCores": + if e.complexity.JobsStatistics.TotalCores == nil { + break + } + + return e.complexity.JobsStatistics.TotalCores(childComplexity), true + case "JobsStatistics.totalJobs": if e.complexity.JobsStatistics.TotalJobs == nil { break @@ -784,6 +821,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.TotalNodeHours(childComplexity), true + case "JobsStatistics.totalNodes": + if e.complexity.JobsStatistics.TotalNodes == nil { + break + } + + return e.complexity.JobsStatistics.TotalNodes(childComplexity), true + case "JobsStatistics.totalWalltime": if e.complexity.JobsStatistics.TotalWalltime == nil { break @@ -1046,18 +1090,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.Jobs(childComplexity, args["filter"].([]*model.JobFilter), args["page"].(*model.PageRequest), args["order"].(*model.OrderByInput)), true - case "Query.jobsCount": - if e.complexity.Query.JobsCount == nil { - break - } - - args, err := ec.field_Query_jobsCount_args(context.TODO(), rawArgs) - if err != nil { - return 0, false - } - - return e.complexity.Query.JobsCount(childComplexity, args["filter"].([]*model.JobFilter), args["groupBy"].(model.Aggregate), args["weight"].(*model.Weights), args["limit"].(*int)), true - case "Query.jobsFootprints": if e.complexity.Query.JobsFootprints == nil { break @@ -1080,7 +1112,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["groupBy"].(*model.Aggregate)), true + return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true case "Query.nodeMetrics": if e.complexity.Query.NodeMetrics == nil { @@ -1356,6 +1388,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.TimeRangeOutput.To(childComplexity), true + case "TimeWeights.accHours": + if e.complexity.TimeWeights.AccHours == nil { + break + } + + return e.complexity.TimeWeights.AccHours(childComplexity), true + + case "TimeWeights.coreHours": + if e.complexity.TimeWeights.CoreHours == nil { + break + } + + return e.complexity.TimeWeights.CoreHours(childComplexity), true + + case "TimeWeights.nodeHours": + if e.complexity.TimeWeights.NodeHours == nil { + break + } + + return e.complexity.TimeWeights.NodeHours(childComplexity), true + case "Topology.accelerators": if e.complexity.Topology.Accelerators == nil { break @@ -1703,12 +1756,18 @@ type MetricFootprints { } type Footprints { - nodehours: [NullableFloat!]! + timeWeights: TimeWeights! metrics: [MetricFootprints!]! } +type TimeWeights { + nodeHours: [NullableFloat!]! + accHours: [NullableFloat!]! + coreHours: [NullableFloat!]! +} + enum Aggregate { USER, PROJECT, CLUSTER } -enum Weights { NODE_COUNT, NODE_HOURS } +enum SortByAggregate { TOTALWALLTIME, TOTALJOBS, TOTALNODES, TOTALNODEHOURS, TOTALCORES, TOTALCOREHOURS, TOTALACCS, TOTALACCHOURS } type NodeMetrics { host: String! @@ -1739,8 +1798,7 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]! - jobsCount(filter: [JobFilter]!, groupBy: Aggregate!, weight: Weights, limit: Int): [Count!]! + jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -1835,11 +1893,16 @@ type JobsStatistics { runningJobs: Int! # Number of running jobs shortJobs: Int! # Number of jobs with a duration of less than duration totalWalltime: Int! # Sum of the duration of all matched jobs in hours + totalNodes: Int! # Sum of the nodes of all matched jobs totalNodeHours: Int! # Sum of the node hours of all matched jobs + totalCores: Int! # Sum of the cores of all matched jobs totalCoreHours: Int! # Sum of the core hours of all matched jobs + totalAccs: Int! # Sum of the accs of all matched jobs totalAccHours: Int! # Sum of the gpu hours of all matched jobs histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes + histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores + histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs } input PageRequest { @@ -2043,48 +2106,6 @@ func (ec *executionContext) field_Query_job_args(ctx context.Context, rawArgs ma return args, nil } -func (ec *executionContext) field_Query_jobsCount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { - var err error - args := map[string]interface{}{} - var arg0 []*model.JobFilter - if tmp, ok := rawArgs["filter"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) - arg0, err = ec.unmarshalNJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilter(ctx, tmp) - if err != nil { - return nil, err - } - } - args["filter"] = arg0 - var arg1 model.Aggregate - if tmp, ok := rawArgs["groupBy"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy")) - arg1, err = ec.unmarshalNAggregate2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) - if err != nil { - return nil, err - } - } - args["groupBy"] = arg1 - var arg2 *model.Weights - if tmp, ok := rawArgs["weight"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("weight")) - arg2, err = ec.unmarshalOWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐWeights(ctx, tmp) - if err != nil { - return nil, err - } - } - args["weight"] = arg2 - var arg3 *int - if tmp, ok := rawArgs["limit"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("limit")) - arg3, err = ec.unmarshalOInt2ᚖint(ctx, tmp) - if err != nil { - return nil, err - } - } - args["limit"] = arg3 - return args, nil -} - func (ec *executionContext) field_Query_jobsFootprints_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} @@ -2121,15 +2142,33 @@ func (ec *executionContext) field_Query_jobsStatistics_args(ctx context.Context, } } args["filter"] = arg0 - var arg1 *model.Aggregate - if tmp, ok := rawArgs["groupBy"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy")) - arg1, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) + var arg1 *model.PageRequest + if tmp, ok := rawArgs["page"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) + arg1, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) if err != nil { return nil, err } } - args["groupBy"] = arg1 + args["page"] = arg1 + var arg2 *model.SortByAggregate + if tmp, ok := rawArgs["sortBy"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("sortBy")) + arg2, err = ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) + if err != nil { + return nil, err + } + } + args["sortBy"] = arg2 + var arg3 *model.Aggregate + if tmp, ok := rawArgs["groupBy"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy")) + arg3, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) + if err != nil { + return nil, err + } + } + args["groupBy"] = arg3 return args, nil } @@ -2790,8 +2829,8 @@ func (ec *executionContext) fieldContext_Count_count(ctx context.Context, field return fc, nil } -func (ec *executionContext) _Footprints_nodehours(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Footprints_nodehours(ctx, field) +func (ec *executionContext) _Footprints_timeWeights(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Footprints_timeWeights(ctx, field) if err != nil { return graphql.Null } @@ -2804,7 +2843,7 @@ func (ec *executionContext) _Footprints_nodehours(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Nodehours, nil + return obj.TimeWeights, nil }) if err != nil { ec.Error(ctx, err) @@ -2816,19 +2855,27 @@ func (ec *executionContext) _Footprints_nodehours(ctx context.Context, field gra } return graphql.Null } - res := resTmp.([]schema.Float) + res := resTmp.(*model.TimeWeights) fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐTimeWeights(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Footprints_nodehours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Footprints_timeWeights(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Footprints", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type NullableFloat does not have child fields") + switch field.Name { + case "nodeHours": + return ec.fieldContext_TimeWeights_nodeHours(ctx, field) + case "accHours": + return ec.fieldContext_TimeWeights_accHours(ctx, field) + case "coreHours": + return ec.fieldContext_TimeWeights_coreHours(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type TimeWeights", field.Name) }, } return fc, nil @@ -5129,6 +5176,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_totalWalltime(ctx contex return fc, nil } +func (ec *executionContext) _JobsStatistics_totalNodes(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_totalNodes(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalNodes, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_totalNodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_totalNodeHours(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_totalNodeHours(ctx, field) if err != nil { @@ -5173,6 +5264,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_totalNodeHours(ctx conte return fc, nil } +func (ec *executionContext) _JobsStatistics_totalCores(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_totalCores(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalCores, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_totalCores(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_totalCoreHours(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_totalCoreHours(ctx, field) if err != nil { @@ -5217,6 +5352,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_totalCoreHours(ctx conte return fc, nil } +func (ec *executionContext) _JobsStatistics_totalAccs(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_totalAccs(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalAccs, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_totalAccs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_totalAccHours(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_totalAccHours(ctx, field) if err != nil { @@ -5361,6 +5540,106 @@ func (ec *executionContext) fieldContext_JobsStatistics_histNumNodes(ctx context return fc, nil } +func (ec *executionContext) _JobsStatistics_histNumCores(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_histNumCores(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HistNumCores, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.HistoPoint) + fc.Result = res + return ec.marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_histNumCores(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "count": + return ec.fieldContext_HistoPoint_count(ctx, field) + case "value": + return ec.fieldContext_HistoPoint_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type HistoPoint", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _JobsStatistics_histNumAccs(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_histNumAccs(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HistNumAccs, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.HistoPoint) + fc.Result = res + return ec.marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_histNumAccs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "count": + return ec.fieldContext_HistoPoint_count(ctx, field) + case "value": + return ec.fieldContext_HistoPoint_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type HistoPoint", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _MetricConfig_name(ctx context.Context, field graphql.CollectedField, obj *schema.MetricConfig) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricConfig_name(ctx, field) if err != nil { @@ -6994,8 +7273,8 @@ func (ec *executionContext) fieldContext_Query_jobsFootprints(ctx context.Contex IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { - case "nodehours": - return ec.fieldContext_Footprints_nodehours(ctx, field) + case "timeWeights": + return ec.fieldContext_Footprints_timeWeights(ctx, field) case "metrics": return ec.fieldContext_Footprints_metrics(ctx, field) } @@ -7095,7 +7374,7 @@ func (ec *executionContext) _Query_jobsStatistics(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["groupBy"].(*model.Aggregate)) + return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate)) }) if err != nil { ec.Error(ctx, err) @@ -7132,16 +7411,26 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex return ec.fieldContext_JobsStatistics_shortJobs(ctx, field) case "totalWalltime": return ec.fieldContext_JobsStatistics_totalWalltime(ctx, field) + case "totalNodes": + return ec.fieldContext_JobsStatistics_totalNodes(ctx, field) case "totalNodeHours": return ec.fieldContext_JobsStatistics_totalNodeHours(ctx, field) + case "totalCores": + return ec.fieldContext_JobsStatistics_totalCores(ctx, field) case "totalCoreHours": return ec.fieldContext_JobsStatistics_totalCoreHours(ctx, field) + case "totalAccs": + return ec.fieldContext_JobsStatistics_totalAccs(ctx, field) case "totalAccHours": return ec.fieldContext_JobsStatistics_totalAccHours(ctx, field) case "histDuration": return ec.fieldContext_JobsStatistics_histDuration(ctx, field) case "histNumNodes": return ec.fieldContext_JobsStatistics_histNumNodes(ctx, field) + case "histNumCores": + return ec.fieldContext_JobsStatistics_histNumCores(ctx, field) + case "histNumAccs": + return ec.fieldContext_JobsStatistics_histNumAccs(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type JobsStatistics", field.Name) }, @@ -7160,67 +7449,6 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex return fc, nil } -func (ec *executionContext) _Query_jobsCount(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query_jobsCount(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobsCount(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["groupBy"].(model.Aggregate), fc.Args["weight"].(*model.Weights), fc.Args["limit"].(*int)) - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.([]*model.Count) - fc.Result = res - return ec.marshalNCount2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐCountᚄ(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_Query_jobsCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "Query", - Field: field, - IsMethod: true, - IsResolver: true, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "name": - return ec.fieldContext_Count_name(ctx, field) - case "count": - return ec.fieldContext_Count_count(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type Count", field.Name) - }, - } - defer func() { - if r := recover(); r != nil { - err = ec.Recover(ctx, r) - ec.Error(ctx, err) - } - }() - ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Query_jobsCount_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { - ec.Error(ctx, err) - return fc, err - } - return fc, nil -} - func (ec *executionContext) _Query_rooflineHeatmap(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query_rooflineHeatmap(ctx, field) if err != nil { @@ -8930,6 +9158,138 @@ func (ec *executionContext) fieldContext_TimeRangeOutput_to(ctx context.Context, return fc, nil } +func (ec *executionContext) _TimeWeights_nodeHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TimeWeights_nodeHours(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.NodeHours, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_TimeWeights_nodeHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "TimeWeights", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type NullableFloat does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _TimeWeights_accHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TimeWeights_accHours(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.AccHours, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_TimeWeights_accHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "TimeWeights", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type NullableFloat does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _TimeWeights_coreHours(ctx context.Context, field graphql.CollectedField, obj *model.TimeWeights) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TimeWeights_coreHours(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.CoreHours, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_TimeWeights_coreHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "TimeWeights", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type NullableFloat does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Topology_node(ctx context.Context, field graphql.CollectedField, obj *schema.Topology) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Topology_node(ctx, field) if err != nil { @@ -11848,8 +12208,8 @@ func (ec *executionContext) _Footprints(ctx context.Context, sel ast.SelectionSe switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Footprints") - case "nodehours": - out.Values[i] = ec._Footprints_nodehours(ctx, field, obj) + case "timeWeights": + out.Values[i] = ec._Footprints_timeWeights(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } @@ -12500,16 +12860,31 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti if out.Values[i] == graphql.Null { out.Invalids++ } + case "totalNodes": + out.Values[i] = ec._JobsStatistics_totalNodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "totalNodeHours": out.Values[i] = ec._JobsStatistics_totalNodeHours(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } + case "totalCores": + out.Values[i] = ec._JobsStatistics_totalCores(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "totalCoreHours": out.Values[i] = ec._JobsStatistics_totalCoreHours(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } + case "totalAccs": + out.Values[i] = ec._JobsStatistics_totalAccs(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "totalAccHours": out.Values[i] = ec._JobsStatistics_totalAccHours(ctx, field, obj) if out.Values[i] == graphql.Null { @@ -12525,6 +12900,16 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti if out.Values[i] == graphql.Null { out.Invalids++ } + case "histNumCores": + out.Values[i] = ec._JobsStatistics_histNumCores(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "histNumAccs": + out.Values[i] = ec._JobsStatistics_histNumAccs(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -13096,28 +13481,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) - case "jobsCount": - field := field - - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - } - }() - res = ec._Query_jobsCount(ctx, field) - if res == graphql.Null { - atomic.AddUint32(&fs.Invalids, 1) - } - return res - } - - rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, - func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) - } - out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "rooflineHeatmap": field := field @@ -13598,6 +13961,55 @@ func (ec *executionContext) _TimeRangeOutput(ctx context.Context, sel ast.Select return out } +var timeWeightsImplementors = []string{"TimeWeights"} + +func (ec *executionContext) _TimeWeights(ctx context.Context, sel ast.SelectionSet, obj *model.TimeWeights) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, timeWeightsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("TimeWeights") + case "nodeHours": + out.Values[i] = ec._TimeWeights_nodeHours(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "accHours": + out.Values[i] = ec._TimeWeights_accHours(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "coreHours": + out.Values[i] = ec._TimeWeights_coreHours(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var topologyImplementors = []string{"Topology"} func (ec *executionContext) _Topology(ctx context.Context, sel ast.SelectionSet, obj *schema.Topology) graphql.Marshaler { @@ -14070,16 +14482,6 @@ func (ec *executionContext) marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpit return ec._Accelerator(ctx, sel, v) } -func (ec *executionContext) unmarshalNAggregate2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx context.Context, v interface{}) (model.Aggregate, error) { - var res model.Aggregate - err := res.UnmarshalGQL(v) - return res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalNAggregate2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx context.Context, sel ast.SelectionSet, v model.Aggregate) graphql.Marshaler { - return v -} - func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) { res, err := graphql.UnmarshalBoolean(v) return res, graphql.ErrorOnPath(ctx, err) @@ -14582,23 +14984,6 @@ func (ec *executionContext) marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑ return ec._Job(ctx, sel, v) } -func (ec *executionContext) unmarshalNJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilter(ctx context.Context, v interface{}) ([]*model.JobFilter, error) { - var vSlice []interface{} - if v != nil { - vSlice = graphql.CoerceList(v) - } - var err error - res := make([]*model.JobFilter, len(vSlice)) - for i := range vSlice { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) - res[i], err = ec.unmarshalOJobFilter2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilter(ctx, vSlice[i]) - if err != nil { - return nil, err - } - } - return res, nil -} - func (ec *executionContext) unmarshalNJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx context.Context, v interface{}) ([]*model.JobFilter, error) { var vSlice []interface{} if v != nil { @@ -15331,6 +15716,16 @@ func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel as return res } +func (ec *executionContext) marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐTimeWeights(ctx context.Context, sel ast.SelectionSet, v *model.TimeWeights) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._TimeWeights(ctx, sel, v) +} + func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler { return ec._Topology(ctx, sel, &v) } @@ -15925,14 +16320,6 @@ func (ec *executionContext) unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCock return res, nil } -func (ec *executionContext) unmarshalOJobFilter2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilter(ctx context.Context, v interface{}) (*model.JobFilter, error) { - if v == nil { - return nil, nil - } - res, err := ec.unmarshalInputJobFilter(ctx, v) - return &res, graphql.ErrorOnPath(ctx, err) -} - func (ec *executionContext) marshalOJobLinkResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobLinkResultList(ctx context.Context, sel ast.SelectionSet, v *model.JobLinkResultList) graphql.Marshaler { if v == nil { return graphql.Null @@ -16083,6 +16470,22 @@ func (ec *executionContext) marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋcc return ret } +func (ec *executionContext) unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx context.Context, v interface{}) (*model.SortByAggregate, error) { + if v == nil { + return nil, nil + } + var res = new(model.SortByAggregate) + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx context.Context, sel ast.SelectionSet, v *model.SortByAggregate) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return v +} + func (ec *executionContext) marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐStatsSeries(ctx context.Context, sel ast.SelectionSet, v *schema.StatsSeries) graphql.Marshaler { if v == nil { return graphql.Null @@ -16197,22 +16600,6 @@ func (ec *executionContext) marshalOUser2ᚖgithubᚗcomᚋClusterCockpitᚋcc return ec._User(ctx, sel, v) } -func (ec *executionContext) unmarshalOWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐWeights(ctx context.Context, v interface{}) (*model.Weights, error) { - if v == nil { - return nil, nil - } - var res = new(model.Weights) - err := res.UnmarshalGQL(v) - return res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalOWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐWeights(ctx context.Context, sel ast.SelectionSet, v *model.Weights) graphql.Marshaler { - if v == nil { - return graphql.Null - } - return v -} - func (ec *executionContext) marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.EnumValue) graphql.Marshaler { if v == nil { return graphql.Null diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 44357d6..050784b 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -22,8 +22,8 @@ type FloatRange struct { } type Footprints struct { - Nodehours []schema.Float `json:"nodehours"` - Metrics []*MetricFootprints `json:"metrics"` + TimeWeights *TimeWeights `json:"timeWeights"` + Metrics []*MetricFootprints `json:"metrics"` } type HistoPoint struct { @@ -91,11 +91,16 @@ type JobsStatistics struct { RunningJobs int `json:"runningJobs"` ShortJobs int `json:"shortJobs"` TotalWalltime int `json:"totalWalltime"` + TotalNodes int `json:"totalNodes"` TotalNodeHours int `json:"totalNodeHours"` + TotalCores int `json:"totalCores"` TotalCoreHours int `json:"totalCoreHours"` + TotalAccs int `json:"totalAccs"` TotalAccHours int `json:"totalAccHours"` HistDuration []*HistoPoint `json:"histDuration"` HistNumNodes []*HistoPoint `json:"histNumNodes"` + HistNumCores []*HistoPoint `json:"histNumCores"` + HistNumAccs []*HistoPoint `json:"histNumAccs"` } type MetricFootprints struct { @@ -133,6 +138,12 @@ type TimeRangeOutput struct { To time.Time `json:"to"` } +type TimeWeights struct { + NodeHours []schema.Float `json:"nodeHours"` + AccHours []schema.Float `json:"accHours"` + CoreHours []schema.Float `json:"coreHours"` +} + type User struct { Username string `json:"username"` Name string `json:"name"` @@ -182,6 +193,59 @@ func (e Aggregate) MarshalGQL(w io.Writer) { fmt.Fprint(w, strconv.Quote(e.String())) } +type SortByAggregate string + +const ( + SortByAggregateTotalwalltime SortByAggregate = "TOTALWALLTIME" + SortByAggregateTotaljobs SortByAggregate = "TOTALJOBS" + SortByAggregateTotalnodes SortByAggregate = "TOTALNODES" + SortByAggregateTotalnodehours SortByAggregate = "TOTALNODEHOURS" + SortByAggregateTotalcores SortByAggregate = "TOTALCORES" + SortByAggregateTotalcorehours SortByAggregate = "TOTALCOREHOURS" + SortByAggregateTotalaccs SortByAggregate = "TOTALACCS" + SortByAggregateTotalacchours SortByAggregate = "TOTALACCHOURS" +) + +var AllSortByAggregate = []SortByAggregate{ + SortByAggregateTotalwalltime, + SortByAggregateTotaljobs, + SortByAggregateTotalnodes, + SortByAggregateTotalnodehours, + SortByAggregateTotalcores, + SortByAggregateTotalcorehours, + SortByAggregateTotalaccs, + SortByAggregateTotalacchours, +} + +func (e SortByAggregate) IsValid() bool { + switch e { + case SortByAggregateTotalwalltime, SortByAggregateTotaljobs, SortByAggregateTotalnodes, SortByAggregateTotalnodehours, SortByAggregateTotalcores, SortByAggregateTotalcorehours, SortByAggregateTotalaccs, SortByAggregateTotalacchours: + return true + } + return false +} + +func (e SortByAggregate) String() string { + return string(e) +} + +func (e *SortByAggregate) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = SortByAggregate(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid SortByAggregate", str) + } + return nil +} + +func (e SortByAggregate) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + type SortDirectionEnum string const ( @@ -222,44 +286,3 @@ func (e *SortDirectionEnum) UnmarshalGQL(v interface{}) error { func (e SortDirectionEnum) MarshalGQL(w io.Writer) { fmt.Fprint(w, strconv.Quote(e.String())) } - -type Weights string - -const ( - WeightsNodeCount Weights = "NODE_COUNT" - WeightsNodeHours Weights = "NODE_HOURS" -) - -var AllWeights = []Weights{ - WeightsNodeCount, - WeightsNodeHours, -} - -func (e Weights) IsValid() bool { - switch e { - case WeightsNodeCount, WeightsNodeHours: - return true - } - return false -} - -func (e Weights) String() string { - return string(e) -} - -func (e *Weights) UnmarshalGQL(v interface{}) error { - str, ok := v.(string) - if !ok { - return fmt.Errorf("enums must be strings") - } - - *e = Weights(str) - if !e.IsValid() { - return fmt.Errorf("%s is not a valid Weights", str) - } - return nil -} - -func (e Weights) MarshalGQL(w io.Writer) { - fmt.Fprint(w, strconv.Quote(e.String())) -} diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 8d34fb3..9e5e111 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -244,34 +244,34 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag } // JobsStatistics is the resolver for the jobsStatistics field. -func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { +func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { var err error var stats []*model.JobsStatistics - if requireField(ctx, "totalJobs") { + if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") || + requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") { if groupBy == nil { stats, err = r.Repo.JobsStats(ctx, filter) } else { - stats, err = r.Repo.JobsStatsGrouped(ctx, filter, groupBy) + stats, err = r.Repo.JobsStatsGrouped(ctx, filter, page, sortBy, groupBy) } } else { stats = make([]*model.JobsStatistics, 0, 1) - stats = append(stats, - &model.JobsStatistics{}) + stats = append(stats, &model.JobsStatistics{}) } if groupBy != nil { if requireField(ctx, "shortJobs") { stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "short") } - if requireField(ctx, "RunningJobs") { + if requireField(ctx, "runningJobs") { stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "running") } } else { if requireField(ctx, "shortJobs") { stats, err = r.Repo.AddJobCount(ctx, filter, stats, "short") } - if requireField(ctx, "RunningJobs") { + if requireField(ctx, "runningJobs") { stats, err = r.Repo.AddJobCount(ctx, filter, stats, "running") } } @@ -280,7 +280,7 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF return nil, err } - if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") { + if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") || requireField(ctx, "histNumCores") || requireField(ctx, "histNumAccs") { if groupBy == nil { stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0]) if err != nil { @@ -294,24 +294,6 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF return stats, nil } -// JobsCount is the resolver for the jobsCount field. -func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, weight *model.Weights, limit *int) ([]*model.Count, error) { - counts, err := r.Repo.CountGroupedJobs(ctx, groupBy, filter, weight, limit) - if err != nil { - log.Warn("Error while counting grouped jobs") - return nil, err - } - - res := make([]*model.Count, 0, len(counts)) - for name, count := range counts { - res = append(res, &model.Count{ - Name: name, - Count: count, - }) - } - return res, nil -} - // RooflineHeatmap is the resolver for the rooflineHeatmap field. func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) { return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY) diff --git a/internal/graph/util.go b/internal/graph/util.go index c9423e1..b61bcc7 100644 --- a/internal/graph/util.go +++ b/internal/graph/util.go @@ -15,6 +15,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" + // "github.com/ClusterCockpit/cc-backend/pkg/archive" ) const MAX_JOBS_FOR_ANALYSIS = 500 @@ -106,7 +107,11 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF avgs[i] = make([]schema.Float, 0, len(jobs)) } - nodehours := make([]schema.Float, 0, len(jobs)) + timeweights := new(model.TimeWeights) + timeweights.NodeHours = make([]schema.Float, 0, len(jobs)) + timeweights.AccHours = make([]schema.Float, 0, len(jobs)) + timeweights.CoreHours = make([]schema.Float, 0, len(jobs)) + for _, job := range jobs { if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed { continue @@ -117,7 +122,18 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF return nil, err } - nodehours = append(nodehours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes))) + // #166 collect arrays: Null values or no null values? + timeweights.NodeHours = append(timeweights.NodeHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes))) + if job.NumAcc > 0 { + timeweights.AccHours = append(timeweights.AccHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumAcc))) + } else { + timeweights.AccHours = append(timeweights.AccHours, schema.Float(1.0)) + } + if job.NumHWThreads > 0 { + timeweights.CoreHours = append(timeweights.CoreHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumHWThreads))) // SQLite HWThreads == Cores; numCoresForJob(job) + } else { + timeweights.CoreHours = append(timeweights.CoreHours, schema.Float(1.0)) + } } res := make([]*model.MetricFootprints, len(avgs)) @@ -129,11 +145,34 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF } return &model.Footprints{ - Nodehours: nodehours, - Metrics: res, + TimeWeights: timeweights, + Metrics: res, }, nil } +// func numCoresForJob(job *schema.Job) (numCores int) { + +// subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster) +// if scerr != nil { +// return 1 +// } + +// totalJobCores := 0 +// topology := subcluster.Topology + +// for _, host := range job.Resources { +// hwthreads := host.HWThreads +// if hwthreads == nil { +// hwthreads = topology.Node +// } + +// hostCores, _ := topology.GetCoresFromHWThreads(hwthreads) +// totalJobCores += len(hostCores) +// } + +// return totalJobCores +// } + func requireField(ctx context.Context, name string) bool { fields := graphql.CollectAllFields(ctx) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 6b3153f..cfaa6fd 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -506,7 +506,7 @@ func (ccms *CCMetricStore) LoadStats( metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) + queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { log.Warn("Error while building query") return nil, err diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 08898bd..3117f8c 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -182,7 +182,7 @@ func LoadAverages( ctx context.Context) error { if job.State != schema.JobStateRunning && useArchive { - return archive.LoadAveragesFromArchive(job, metrics, data) + return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here? } repo, ok := metricDataRepos[job.Cluster] @@ -190,7 +190,7 @@ func LoadAverages( return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster) } - stats, err := repo.LoadStats(job, metrics, ctx) + stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion? if err != nil { log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) return err diff --git a/internal/repository/job.go b/internal/repository/job.go index 449c267..76834d1 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -455,69 +455,6 @@ func (r *JobRepository) DeleteJobById(id int64) error { return err } -// TODO: Use node hours instead: SELECT job.user, sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN CAST(strftime('%s', 'now') AS INTEGER) - job.start_time ELSE job.duration END)) as x FROM job GROUP BY user ORDER BY x DESC; -func (r *JobRepository) CountGroupedJobs( - ctx context.Context, - aggreg model.Aggregate, - filters []*model.JobFilter, - weight *model.Weights, - limit *int) (map[string]int, error) { - - start := time.Now() - if !aggreg.IsValid() { - return nil, errors.New("invalid aggregate") - } - - runner := (sq.BaseRunner)(r.stmtCache) - count := "count(*) as count" - if weight != nil { - switch *weight { - case model.WeightsNodeCount: - count = "sum(job.num_nodes) as count" - case model.WeightsNodeHours: - now := time.Now().Unix() - count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now) - runner = r.DB - default: - log.Debugf("CountGroupedJobs() Weight %v unknown.", *weight) - } - } - - q, qerr := SecurityCheck(ctx, sq.Select("job."+string(aggreg), count).From("job").GroupBy("job."+string(aggreg)).OrderBy("count DESC")) - - if qerr != nil { - return nil, qerr - } - - for _, f := range filters { - q = BuildWhereClause(f, q) - } - if limit != nil { - q = q.Limit(uint64(*limit)) - } - - counts := map[string]int{} - rows, err := q.RunWith(runner).Query() - if err != nil { - log.Error("Error while running query") - return nil, err - } - - for rows.Next() { - var group string - var count int - if err := rows.Scan(&group, &count); err != nil { - log.Warn("Error while scanning rows") - return nil, err - } - - counts[group] = count - } - - log.Debugf("Timer CountGroupedJobs %s", time.Since(start)) - return counts, nil -} - func (r *JobRepository) UpdateMonitoringStatus(job int64, monitoringStatus int32) (err error) { stmt := sq.Update("job"). Set("monitoring_status", monitoringStatus). diff --git a/internal/repository/query.go b/internal/repository/query.go index 0501fe1..84b8048 100644 --- a/internal/repository/query.go +++ b/internal/repository/query.go @@ -18,13 +18,17 @@ import ( sq "github.com/Masterminds/squirrel" ) -// SecurityCheck-less, private: Returns a list of jobs matching the provided filters. page and order are optional- -func (r *JobRepository) queryJobs( - query sq.SelectBuilder, +func (r *JobRepository) QueryJobs( + ctx context.Context, filters []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) ([]*schema.Job, error) { + query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("job")) + if qerr != nil { + return nil, qerr + } + if order != nil { field := toSnakeCase(order.Field) @@ -67,34 +71,15 @@ func (r *JobRepository) queryJobs( return jobs, nil } -// testFunction for queryJobs -func (r *JobRepository) testQueryJobs( - filters []*model.JobFilter, - page *model.PageRequest, - order *model.OrderByInput) ([]*schema.Job, error) { - - return r.queryJobs(sq.Select(jobColumns...).From("job"), filters, page, order) -} - -// Public function with added securityCheck, calls private queryJobs function above -func (r *JobRepository) QueryJobs( +func (r *JobRepository) CountJobs( ctx context.Context, - filters []*model.JobFilter, - page *model.PageRequest, - order *model.OrderByInput) ([]*schema.Job, error) { - - query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("job")) - if qerr != nil { - return nil, qerr - } - - return r.queryJobs(query, filters, page, order) -} - -// SecurityCheck-less, private: Returns the number of jobs matching the filters -func (r *JobRepository) countJobs(query sq.SelectBuilder, filters []*model.JobFilter) (int, error) { + query, qerr := SecurityCheck(ctx, sq.Select("count(*)").From("job")) + if qerr != nil { + return 0, qerr + } + for _, f := range filters { query = BuildWhereClause(f, query) } @@ -107,27 +92,6 @@ func (r *JobRepository) countJobs(query sq.SelectBuilder, return count, nil } -// testFunction for countJobs -func (r *JobRepository) testCountJobs( - filters []*model.JobFilter) (int, error) { - - return r.countJobs(sq.Select("count(*)").From("job"), filters) -} - -// Public function with added securityCheck, calls private countJobs function above -func (r *JobRepository) CountJobs( - ctx context.Context, - filters []*model.JobFilter) (int, error) { - - query, qerr := SecurityCheck(ctx, sq.Select("count(*)").From("job")) - - if qerr != nil { - return 0, qerr - } - - return r.countJobs(query, filters) -} - func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) { user := GetUserFromContext(ctx) if user == nil { diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index efb5395..48b692f 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -5,10 +5,12 @@ package repository import ( + "context" "testing" "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" _ "github.com/mattn/go-sqlite3" ) @@ -94,7 +96,7 @@ func BenchmarkDB_CountJobs(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - _, err := db.testCountJobs([]*model.JobFilter{filter}) + _, err := db.CountJobs(getContext(b), []*model.JobFilter{filter}) noErr(b, err) } }) @@ -118,20 +120,37 @@ func BenchmarkDB_QueryJobs(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - _, err := db.testQueryJobs([]*model.JobFilter{filter}, page, order) + _, err := db.QueryJobs(getContext(b), []*model.JobFilter{filter}, page, order) noErr(b, err) } }) }) } +func getContext(tb testing.TB) context.Context { + tb.Helper() + + var roles []string + roles = append(roles, schema.GetRoleString(schema.RoleAdmin)) + projects := make([]string, 0) + + user := &schema.User{ + Username: "demo", + Name: "The man", + Roles: roles, + Projects: projects, + AuthSource: schema.AuthViaLDAP, + } + ctx := context.Background() + return context.WithValue(ctx, ContextUserKey, user) +} + func setup(tb testing.TB) *JobRepository { tb.Helper() log.Init("warn", true) dbfile := "testdata/job.db" err := MigrateDB("sqlite3", dbfile) noErr(tb, err) - Connect("sqlite3", dbfile) return GetJobRepository() } diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 8a74008..3ac3ffd 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -23,6 +23,17 @@ var groupBy2column = map[model.Aggregate]string{ model.AggregateCluster: "job.cluster", } +var sortBy2column = map[model.SortByAggregate]string{ + model.SortByAggregateTotaljobs: "totalJobs", + model.SortByAggregateTotalwalltime: "totalWalltime", + model.SortByAggregateTotalnodes: "totalNodes", + model.SortByAggregateTotalnodehours: "totalNodeHours", + model.SortByAggregateTotalcores: "totalCores", + model.SortByAggregateTotalcorehours: "totalCoreHours", + model.SortByAggregateTotalaccs: "totalAccs", + model.SortByAggregateTotalacchours: "totalAccHours", +} + func (r *JobRepository) buildCountQuery( filter []*model.JobFilter, kind string, @@ -60,19 +71,26 @@ func (r *JobRepository) buildStatsQuery( castType := r.getCastType() if col != "" { - // Scan columns: id, totalJobs, totalWalltime, totalNodeHours, totalCoreHours, totalAccHours - query = sq.Select(col, "COUNT(job.id)", - fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType), - fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s)", castType), - fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s)", castType), - fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s)", castType), + // Scan columns: id, totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours + query = sq.Select(col, "COUNT(job.id) as totalJobs", + fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s) as totalWalltime", castType), + fmt.Sprintf("CAST(SUM(job.num_nodes) as %s) as totalNodes", castType), + fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s) as totalNodeHours", castType), + fmt.Sprintf("CAST(SUM(job.num_hwthreads) as %s) as totalCores", castType), + fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s) as totalCoreHours", castType), + fmt.Sprintf("CAST(SUM(job.num_acc) as %s) as totalAccs", castType), + fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s) as totalAccHours", castType), ).From("job").GroupBy(col) + } else { - // Scan columns: totalJobs, totalWalltime, totalNodeHours, totalCoreHours, totalAccHours + // Scan columns: totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours query = sq.Select("COUNT(job.id)", fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType), + fmt.Sprintf("CAST(SUM(job.num_nodes) as %s)", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s)", castType), + fmt.Sprintf("CAST(SUM(job.num_hwthreads) as %s)", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s)", castType), + fmt.Sprintf("CAST(SUM(job.num_acc) as %s)", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s)", castType), ).From("job") } @@ -112,16 +130,28 @@ func (r *JobRepository) getCastType() string { func (r *JobRepository) JobsStatsGrouped( ctx context.Context, filter []*model.JobFilter, + page *model.PageRequest, + sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { start := time.Now() col := groupBy2column[*groupBy] query := r.buildStatsQuery(filter, col) + query, err := SecurityCheck(ctx, query) if err != nil { return nil, err } + if sortBy != nil { + sortBy := sortBy2column[*sortBy] + query = query.OrderBy(fmt.Sprintf("%s DESC", sortBy)) + } + if page != nil && page.ItemsPerPage != -1 { + limit := uint64(page.ItemsPerPage) + query = query.Offset((uint64(page.Page) - 1) * limit).Limit(limit) + } + rows, err := query.RunWith(r.DB).Query() if err != nil { log.Warn("Error while querying DB for job statistics") @@ -132,15 +162,36 @@ func (r *JobRepository) JobsStatsGrouped( for rows.Next() { var id sql.NullString - var jobs, walltime, nodeHours, coreHours, accHours sql.NullInt64 - if err := rows.Scan(&id, &jobs, &walltime, &nodeHours, &coreHours, &accHours); err != nil { + var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64 + if err := rows.Scan(&id, &jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil { log.Warn("Error while scanning rows") return nil, err } if id.Valid { - var totalCoreHours, totalAccHours int + var totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours int + if jobs.Valid { + totalJobs = int(jobs.Int64) + } + + if walltime.Valid { + totalWalltime = int(walltime.Int64) + } + + if nodes.Valid { + totalNodes = int(nodes.Int64) + } + if cores.Valid { + totalCores = int(cores.Int64) + } + if accs.Valid { + totalAccs = int(accs.Int64) + } + + if nodeHours.Valid { + totalNodeHours = int(nodeHours.Int64) + } if coreHours.Valid { totalCoreHours = int(coreHours.Int64) } @@ -154,9 +205,13 @@ func (r *JobRepository) JobsStatsGrouped( &model.JobsStatistics{ ID: id.String, Name: name, - TotalJobs: int(jobs.Int64), - TotalWalltime: int(walltime.Int64), + TotalJobs: totalJobs, + TotalWalltime: totalWalltime, + TotalNodes: totalNodes, + TotalNodeHours: totalNodeHours, + TotalCores: totalCores, TotalCoreHours: totalCoreHours, + TotalAccs: totalAccs, TotalAccHours: totalAccHours}) } else { stats = append(stats, @@ -164,7 +219,11 @@ func (r *JobRepository) JobsStatsGrouped( ID: id.String, TotalJobs: int(jobs.Int64), TotalWalltime: int(walltime.Int64), + TotalNodes: totalNodes, + TotalNodeHours: totalNodeHours, + TotalCores: totalCores, TotalCoreHours: totalCoreHours, + TotalAccs: totalAccs, TotalAccHours: totalAccHours}) } } @@ -188,15 +247,18 @@ func (r *JobRepository) JobsStats( row := query.RunWith(r.DB).QueryRow() stats := make([]*model.JobsStatistics, 0, 1) - var jobs, walltime, nodeHours, coreHours, accHours sql.NullInt64 - if err := row.Scan(&jobs, &walltime, &nodeHours, &coreHours, &accHours); err != nil { + var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64 + if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil { log.Warn("Error while scanning rows") return nil, err } if jobs.Valid { - var totalCoreHours, totalAccHours int + var totalNodeHours, totalCoreHours, totalAccHours int + if nodeHours.Valid { + totalNodeHours = int(nodeHours.Int64) + } if coreHours.Valid { totalCoreHours = int(coreHours.Int64) } @@ -207,6 +269,7 @@ func (r *JobRepository) JobsStats( &model.JobsStatistics{ TotalJobs: int(jobs.Int64), TotalWalltime: int(walltime.Int64), + TotalNodeHours: totalNodeHours, TotalCoreHours: totalCoreHours, TotalAccHours: totalAccHours}) } @@ -321,7 +384,7 @@ func (r *JobRepository) AddJobCount( return nil, err } - counts := make(map[string]int) + var count int for rows.Next() { var cnt sql.NullInt64 @@ -329,20 +392,22 @@ func (r *JobRepository) AddJobCount( log.Warn("Error while scanning rows") return nil, err } + + count = int(cnt.Int64) } switch kind { case "running": for _, s := range stats { - s.RunningJobs = counts[s.ID] + s.RunningJobs = count } case "short": for _, s := range stats { - s.ShortJobs = counts[s.ID] + s.ShortJobs = count } } - log.Debugf("Timer JobJobCount %s", time.Since(start)) + log.Debugf("Timer AddJobCount %s", time.Since(start)) return stats, nil } @@ -367,6 +432,18 @@ func (r *JobRepository) AddHistograms( return nil, err } + stat.HistNumCores, err = r.jobsStatisticsHistogram(ctx, "job.num_hwthreads as value", filter) + if err != nil { + log.Warn("Error while loading job statistics histogram: num hwthreads") + return nil, err + } + + stat.HistNumAccs, err = r.jobsStatisticsHistogram(ctx, "job.num_acc as value", filter) + if err != nil { + log.Warn("Error while loading job statistics histogram: num acc") + return nil, err + } + log.Debugf("Timer AddHistograms %s", time.Since(start)) return stat, nil } diff --git a/internal/repository/stats_test.go b/internal/repository/stats_test.go index b1a815e..6ed3f72 100644 --- a/internal/repository/stats_test.go +++ b/internal/repository/stats_test.go @@ -7,6 +7,8 @@ package repository import ( "fmt" "testing" + + "github.com/ClusterCockpit/cc-backend/internal/graph/model" ) func TestBuildJobStatsQuery(t *testing.T) { @@ -19,3 +21,15 @@ func TestBuildJobStatsQuery(t *testing.T) { fmt.Printf("SQL: %s\n", sql) } + +func TestJobStats(t *testing.T) { + r := setup(t) + + filter := &model.JobFilter{} + stats, err := r.JobsStats(getContext(t), []*model.JobFilter{filter}) + noErr(t, err) + + if stats[0].TotalJobs != 6 { + t.Fatalf("Want 98, Got %d", stats[0].TotalJobs) + } +} diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index 63751b4..67cc652 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -1,7 +1,7 @@ @@ -151,36 +228,82 @@ Total Walltime {$statsQuery.data.stats[0].totalWalltime} + + Total Node Hours + {$statsQuery.data.stats[0].totalNodeHours} + Total Core Hours {$statsQuery.data.stats[0].totalCoreHours} + + Total Accelerator Hours + {$statsQuery.data.stats[0].totalAccHours} +
-
Top Users
- {#key $statsQuery.data.topUsers} - b.count - a.count).map((tu) => tu.count)} - entities={$statsQuery.data.topUsers.sort((a, b) => b.count - a.count).map((tu) => tu.name)} - /> +
Top + +
+ {#key $topQuery.data} + {#if $topQuery.fetching} + + {:else if $topQuery.error} + {$topQuery.error.message} + {:else} + t[sortSelection.key])} + entities={$topQuery.data.topList.map((t) => t.id)} + /> + {/if} {/key}
- - - {#each $statsQuery.data.topUsers.sort((a, b) => b.count - a.count) as { name, count }, i} - - - - - - {/each} -
LegendUser NameNode Hours
{name}{count}
+ {#key $topQuery.data} + {#if $topQuery.fetching} + + {:else if $topQuery.error} + {$topQuery.error.message} + {:else} + + + + + + + {#each $topQuery.data.topList as te, i} + + + {#if groupSelection.key == 'User'} + + {:else} + + {/if} + + + {/each} +
Legend{groupSelection.label} + +
{te.id}{te.id}{te[sortSelection.key]}
+ {/if} + {/key} @@ -217,13 +340,13 @@
- {#key $statsQuery.data.stats[0].histNumNodes} + {#key $statsQuery.data.stats[0].histNumCores} {/key} @@ -244,8 +367,9 @@ - These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours. - Note that some metrics could be disabled for specific subclusters as per metriConfig and thus could affect shown average values. + These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours by default + (Accelerator hours for native accelerator scope metrics, coreHours for native core scope metrics). + Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values.
@@ -257,7 +381,8 @@ let:width renderFor="analysis" items={metricsInHistograms.map(metric => ({ metric, ...binsFromFootprint( - $footprintsQuery.data.footprints.nodehours, + $footprintsQuery.data.footprints.timeWeights, + metricConfig(cluster.name, metric)?.scope, $footprintsQuery.data.footprints.metrics.find(f => f.metric == metric).data, numBins) }))} itemsPerRow={ccconfig.plot_view_plotsPerRow}> @@ -265,11 +390,11 @@ data={convert2uplot(item.bins)} width={width} height={250} title="Average Distribution of '{item.metric}'" - xlabel={`${item.metric} average [${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') + + xlabel={`${item.metric} bin maximum [${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') + (metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}]`} xunit={`${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') + (metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}`} - ylabel="Node Hours" + ylabel="Normalized Hours" yunit="Hours"/> @@ -279,7 +404,7 @@ Each circle represents one job. The size of a circle is proportional to its node hours. Darker circles mean multiple jobs have the same averages for the respective metrics. - Note that some metrics could be disabled for specific subclusters as per metriConfig and thus could affect shown average values. + Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values.
@@ -301,7 +426,7 @@ (metricConfig(cluster.name, item.m1)?.unit?.base ? metricConfig(cluster.name, item.m1)?.unit?.base : '')}]`} yLabel={`${item.m2} [${(metricConfig(cluster.name, item.m2)?.unit?.prefix ? metricConfig(cluster.name, item.m2)?.unit?.prefix : '') + (metricConfig(cluster.name, item.m2)?.unit?.base ? metricConfig(cluster.name, item.m2)?.unit?.base : '')}]`} - X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.nodehours} /> + X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.timeWeights.nodeHours} />
diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 244862c..c0a67ad 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -1,4 +1,5 @@ @@ -160,52 +249,103 @@
-

Top Users

- {#key $mainQuery.data} - b.count - a.count).map((tu) => tu.count)} - entities={$mainQuery.data.topUsers.sort((a, b) => b.count - a.count).map((tu) => tu.name)} - - /> +

Top Users on {cluster.charAt(0).toUpperCase() + cluster.slice(1)}

+ {#key $topUserQuery.data} + {#if $topUserQuery.fetching} + + {:else if $topUserQuery.error} + {$topUserQuery.error.message} + {:else} + tu[topUserSelection.key])} + entities={$topUserQuery.data.topUser.map((tu) => tu.id)} + /> + {/if} {/key}
- - - {#each $mainQuery.data.topUsers.sort((a, b) => b.count - a.count) as { name, count }, i} - - - - - - {/each} -
LegendUser NameNumber of Nodes
{name}{count}
+ {#key $topUserQuery.data} + {#if $topUserQuery.fetching} + + {:else if $topUserQuery.error} + {$topUserQuery.error.message} + {:else} + + + + + + + {#each $topUserQuery.data.topUser as tu, i} + + + + + + {/each} +
LegendUser NameNumber of + +
{tu.id}{tu[topUserSelection.key]}
+ {/if} + {/key} -

Top Projects

- {#key $mainQuery.data} - b.count - a.count).map((tp) => tp.count)} - entities={$mainQuery.data.topProjects.sort((a, b) => b.count - a.count).map((tp) => tp.name)} - /> +

Top Projects on {cluster.charAt(0).toUpperCase() + cluster.slice(1)}

+ {#key $topProjectQuery.data} + {#if $topProjectQuery.fetching} + + {:else if $topProjectQuery.error} + {$topProjectQuery.error.message} + {:else} + tp[topProjectSelection.key])} + entities={$topProjectQuery.data.topProjects.map((tp) => tp.id)} + /> + {/if} {/key} - - - {#each $mainQuery.data.topProjects.sort((a, b) => b.count - a.count) as { name, count }, i} - - - - - - {/each} -
LegendProject CodeNumber of Nodes
{name}{count}
+ {#key $topProjectQuery.data} + {#if $topProjectQuery.fetching} + + {:else if $topProjectQuery.error} + {$topProjectQuery.error.message} + {:else} + + + + + + + {#each $topProjectQuery.data.topProjects as tp, i} + + + + + + {/each} +
LegendProject CodeNumber of + +
{tp.id}{tp[topProjectSelection.key]}
+ {/if} + {/key}

@@ -237,4 +377,32 @@ {/key} + + +
+ {#key $mainQuery.data.stats} + + {/key} +
+ + + {#key $mainQuery.data.stats} + + {/key} + +
{/if} \ No newline at end of file diff --git a/web/frontend/src/plots/Pie.svelte b/web/frontend/src/plots/Pie.svelte index 6355f09..11dc2c9 100644 --- a/web/frontend/src/plots/Pie.svelte +++ b/web/frontend/src/plots/Pie.svelte @@ -43,14 +43,14 @@ export let entities export let displayLegend = false - const data = { + $: data = { labels: entities, datasets: [ { label: sliceLabel, data: quantities, fill: 1, - backgroundColor: colors.slice(0, quantities.length), + backgroundColor: colors.slice(0, quantities.length) } ] } diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index f68fec4..5e9cdae 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -325,7 +325,7 @@ export function convert2uplot(canvasData) { return uplotData } -export function binsFromFootprint(weights, values, numBins) { +export function binsFromFootprint(weights, scope, values, numBins) { let min = 0, max = 0 if (values.length != 0) { for (let x of values) { @@ -338,10 +338,23 @@ export function binsFromFootprint(weights, values, numBins) { if (numBins == null || numBins < 3) numBins = 3 + let scopeWeights + switch (scope) { + case 'core': + scopeWeights = weights.coreHours + break + case 'accelerator': + scopeWeights = weights.accHours + break + default: // every other scope: use 'node' + scopeWeights = weights.nodeHours + } + const bins = new Array(numBins).fill(0) for (let i = 0; i < values.length; i++) - bins[Math.floor(((values[i] - min) / (max - min)) * numBins)] += weights ? weights[i] : 1 + bins[Math.floor(((values[i] - min) / (max - min)) * numBins)] += scopeWeights ? scopeWeights[i] : 1 + // Manual Canvas Original // return { // label: idx => { // let start = min + (idx / numBins) * (max - min) @@ -355,14 +368,13 @@ export function binsFromFootprint(weights, values, numBins) { return { bins: bins.map((count, idx) => ({ - value: idx => { // Get rounded down next integer to bins' Start-Stop Mean Value - let start = min + (idx / numBins) * (max - min) + value: idx => { // Use bins' max value instead of mean + // let start = min + (idx / numBins) * (max - min) let stop = min + ((idx + 1) / numBins) * (max - min) - return `${formatNumber(Math.floor((start+stop)/2))}` + // return `${formatNumber(Math.floor((start+stop)/2))}` + return Math.floor(stop) }, count: count - })), - min: min, - max: max + })) } }