make partition optional in nodeMetrics query

This commit is contained in:
Lou Knauer 2022-02-02 13:05:21 +01:00
parent f7959cb1bd
commit 3e7b89aebb
3 changed files with 40 additions and 12 deletions

View File

@ -180,7 +180,7 @@ type ComplexityRoot struct {
Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int
JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int
JobsStatistics func(childComplexity int, filter []*model.JobFilter, groupBy *model.Aggregate) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, groupBy *model.Aggregate) int
NodeMetrics func(childComplexity int, cluster string, partition string, nodes []string, scopes []string, metrics []string, from time.Time, to time.Time) int NodeMetrics func(childComplexity int, cluster string, partition *string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int
RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int
Tags func(childComplexity int) int Tags func(childComplexity int) int
} }
@ -245,7 +245,7 @@ type QueryResolver interface {
Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error)
JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error)
RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error)
NodeMetrics(ctx context.Context, cluster string, partition string, nodes []string, scopes []string, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) NodeMetrics(ctx context.Context, cluster string, partition *string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error)
} }
type executableSchema struct { type executableSchema struct {
@ -918,7 +918,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return 0, false return 0, false
} }
return e.complexity.Query.NodeMetrics(childComplexity, args["cluster"].(string), args["partition"].(string), args["nodes"].([]string), args["scopes"].([]string), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time)), true return e.complexity.Query.NodeMetrics(childComplexity, args["cluster"].(string), args["partition"].(*string), args["nodes"].([]string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time)), true
case "Query.rooflineHeatmap": case "Query.rooflineHeatmap":
if e.complexity.Query.RooflineHeatmap == nil { if e.complexity.Query.RooflineHeatmap == nil {
@ -1298,7 +1298,7 @@ type Query {
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
nodeMetrics(cluster: String!, partition: String!, nodes: [String!], scopes: [String!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! nodeMetrics(cluster: String!, partition: String, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]!
} }
type Mutation { type Mutation {
@ -1663,10 +1663,10 @@ func (ec *executionContext) field_Query_nodeMetrics_args(ctx context.Context, ra
} }
} }
args["cluster"] = arg0 args["cluster"] = arg0
var arg1 string var arg1 *string
if tmp, ok := rawArgs["partition"]; ok { if tmp, ok := rawArgs["partition"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition"))
arg1, err = ec.unmarshalNString2string(ctx, tmp) arg1, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1681,10 +1681,10 @@ func (ec *executionContext) field_Query_nodeMetrics_args(ctx context.Context, ra
} }
} }
args["nodes"] = arg2 args["nodes"] = arg2
var arg3 []string var arg3 []schema.MetricScope
if tmp, ok := rawArgs["scopes"]; ok { if tmp, ok := rawArgs["scopes"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes"))
arg3, err = ec.unmarshalOString2ᚕstring(ctx, tmp) arg3, err = ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋschemaᚐMetricScope(ctx, tmp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -4953,7 +4953,7 @@ func (ec *executionContext) _Query_nodeMetrics(ctx context.Context, field graphq
fc.Args = args fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().NodeMetrics(rctx, args["cluster"].(string), args["partition"].(string), args["nodes"].([]string), args["scopes"].([]string), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time)) return ec.resolvers.Query().NodeMetrics(rctx, args["cluster"].(string), args["partition"].(*string), args["nodes"].([]string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time))
}) })
if err != nil { if err != nil {
ec.Error(ctx, err) ec.Error(ctx, err)

View File

@ -139,7 +139,7 @@ type Query {
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
nodeMetrics(cluster: String!, partition: String!, nodes: [String!], scopes: [String!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! nodeMetrics(cluster: String!, partition: String, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]!
} }
type Mutation { type Mutation {

View File

@ -207,13 +207,41 @@ func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.Job
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY) return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
} }
func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, partition string, nodes []string, scopes []string, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) { func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, partition *string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) {
user := auth.GetUser(ctx) user := auth.GetUser(ctx)
if user != nil && !user.HasRole(auth.RoleAdmin) { if user != nil && !user.HasRole(auth.RoleAdmin) {
return nil, errors.New("you need to be an administrator for this query") return nil, errors.New("you need to be an administrator for this query")
} }
return nil, nil if partition == nil {
partition = new(string)
}
data, err := metricdata.LoadNodeData(cluster, *partition, metrics, nodes, scopes, from, to, ctx)
if err != nil {
return nil, err
}
nodeMetrics := make([]*model.NodeMetrics, 0, len(data))
for hostname, metrics := range data {
host := &model.NodeMetrics{
Host: hostname,
Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
}
for metric, scopedMetrics := range metrics {
for _, scopedMetric := range scopedMetrics {
host.Metrics = append(host.Metrics, &model.JobMetricWithName{
Name: metric,
Metric: scopedMetric,
})
}
}
nodeMetrics = append(nodeMetrics, host)
}
return nodeMetrics, nil
} }
// Job returns generated.JobResolver implementation. // Job returns generated.JobResolver implementation.