diff --git a/api/schema.graphqls b/api/schema.graphqls index 01eabc2..aa6aea2 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -203,7 +203,7 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! + jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -291,6 +291,19 @@ type HistoPoint { value: Int! } +type MetricHistoPoints { + metric: String! + unit: String! + data: [MetricHistoPoint!] +} + +type MetricHistoPoint { + bin: Int + count: Int! + min: Int + max: Int +} + type JobsStatistics { id: ID! # If `groupBy` was used, ID of the user/project/cluster name: String! # if User-Statistics: Given Name of Account (ID) Owner @@ -308,6 +321,7 @@ type JobsStatistics { histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs + histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average } input PageRequest { diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 6778e76..d84f043 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -25,6 +25,7 @@ import ( // NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface. func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { return &executableSchema{ + schema: cfg.Schema, resolvers: cfg.Resolvers, directives: cfg.Directives, complexity: cfg.Complexity, @@ -32,6 +33,7 @@ func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { } type Config struct { + Schema *ast.Schema Resolvers ResolverRoot Directives DirectiveRoot Complexity ComplexityRoot @@ -145,6 +147,7 @@ type ComplexityRoot struct { JobsStatistics struct { HistDuration func(childComplexity int) int + HistMetrics func(childComplexity int) int HistNumAccs func(childComplexity int) int HistNumCores func(childComplexity int) int HistNumNodes func(childComplexity int) int @@ -180,6 +183,19 @@ type ComplexityRoot struct { Metric func(childComplexity int) int } + MetricHistoPoint struct { + Bin func(childComplexity int) int + Count func(childComplexity int) int + Max func(childComplexity int) int + Min func(childComplexity int) int + } + + MetricHistoPoints struct { + Data func(childComplexity int) int + Metric func(childComplexity int) int + Unit func(childComplexity int) int + } + MetricStatistics struct { Avg func(childComplexity int) int Max func(childComplexity int) int @@ -212,7 +228,7 @@ type ComplexityRoot struct { JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int - JobsStatistics func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int + JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int Tags func(childComplexity int) int @@ -327,7 +343,7 @@ type QueryResolver interface { JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) - JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) + JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) } @@ -336,12 +352,16 @@ type SubClusterResolver interface { } type executableSchema struct { + schema *ast.Schema resolvers ResolverRoot directives DirectiveRoot complexity ComplexityRoot } func (e *executableSchema) Schema() *ast.Schema { + if e.schema != nil { + return e.schema + } return parsedSchema } @@ -763,6 +783,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.HistDuration(childComplexity), true + case "JobsStatistics.histMetrics": + if e.complexity.JobsStatistics.HistMetrics == nil { + break + } + + return e.complexity.JobsStatistics.HistMetrics(childComplexity), true + case "JobsStatistics.histNumAccs": if e.complexity.JobsStatistics.HistNumAccs == nil { break @@ -952,6 +979,55 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricFootprints.Metric(childComplexity), true + case "MetricHistoPoint.bin": + if e.complexity.MetricHistoPoint.Bin == nil { + break + } + + return e.complexity.MetricHistoPoint.Bin(childComplexity), true + + case "MetricHistoPoint.count": + if e.complexity.MetricHistoPoint.Count == nil { + break + } + + return e.complexity.MetricHistoPoint.Count(childComplexity), true + + case "MetricHistoPoint.max": + if e.complexity.MetricHistoPoint.Max == nil { + break + } + + return e.complexity.MetricHistoPoint.Max(childComplexity), true + + case "MetricHistoPoint.min": + if e.complexity.MetricHistoPoint.Min == nil { + break + } + + return e.complexity.MetricHistoPoint.Min(childComplexity), true + + case "MetricHistoPoints.data": + if e.complexity.MetricHistoPoints.Data == nil { + break + } + + return e.complexity.MetricHistoPoints.Data(childComplexity), true + + case "MetricHistoPoints.metric": + if e.complexity.MetricHistoPoints.Metric == nil { + break + } + + return e.complexity.MetricHistoPoints.Metric(childComplexity), true + + case "MetricHistoPoints.unit": + if e.complexity.MetricHistoPoints.Unit == nil { + break + } + + return e.complexity.MetricHistoPoints.Unit(childComplexity), true + case "MetricStatistics.avg": if e.complexity.MetricStatistics.Avg == nil { break @@ -1145,7 +1221,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true + return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true case "Query.nodeMetrics": if e.complexity.Query.NodeMetrics == nil { @@ -1620,14 +1696,14 @@ func (ec *executionContext) introspectSchema() (*introspection.Schema, error) { if ec.DisableIntrospection { return nil, errors.New("introspection disabled") } - return introspection.WrapSchema(parsedSchema), nil + return introspection.WrapSchema(ec.Schema()), nil } func (ec *executionContext) introspectType(name string) (*introspection.Type, error) { if ec.DisableIntrospection { return nil, errors.New("introspection disabled") } - return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil + return introspection.WrapTypeFromDef(ec.Schema(), ec.Schema().Types[name]), nil } var sources = []*ast.Source{ @@ -1836,7 +1912,7 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! + jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -1924,6 +2000,19 @@ type HistoPoint { value: Int! } +type MetricHistoPoints { + metric: String! + unit: String! + data: [MetricHistoPoint!] +} + +type MetricHistoPoint { + bin: Int + count: Int! + min: Int + max: Int +} + type JobsStatistics { id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster name: String! # if User-Statistics: Given Name of Account (ID) Owner @@ -1941,6 +2030,7 @@ type JobsStatistics { histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs + histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average } input PageRequest { @@ -2180,33 +2270,42 @@ func (ec *executionContext) field_Query_jobsStatistics_args(ctx context.Context, } } args["filter"] = arg0 - var arg1 *model.PageRequest + var arg1 []string + if tmp, ok := rawArgs["metrics"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + arg1, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + if err != nil { + return nil, err + } + } + args["metrics"] = arg1 + var arg2 *model.PageRequest if tmp, ok := rawArgs["page"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) - arg1, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) + arg2, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) if err != nil { return nil, err } } - args["page"] = arg1 - var arg2 *model.SortByAggregate + args["page"] = arg2 + var arg3 *model.SortByAggregate if tmp, ok := rawArgs["sortBy"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("sortBy")) - arg2, err = ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) + arg3, err = ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) if err != nil { return nil, err } } - args["sortBy"] = arg2 - var arg3 *model.Aggregate + args["sortBy"] = arg3 + var arg4 *model.Aggregate if tmp, ok := rawArgs["groupBy"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy")) - arg3, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) + arg4, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) if err != nil { return nil, err } } - args["groupBy"] = arg3 + args["groupBy"] = arg4 return args, nil } @@ -5850,6 +5949,58 @@ func (ec *executionContext) fieldContext_JobsStatistics_histNumAccs(ctx context. return fc, nil } +func (ec *executionContext) _JobsStatistics_histMetrics(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_histMetrics(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HistMetrics, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.MetricHistoPoints) + fc.Result = res + return ec.marshalNMetricHistoPoints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_histMetrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "metric": + return ec.fieldContext_MetricHistoPoints_metric(ctx, field) + case "unit": + return ec.fieldContext_MetricHistoPoints_unit(ctx, field) + case "data": + return ec.fieldContext_MetricHistoPoints_data(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricHistoPoints", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _MetricConfig_name(ctx context.Context, field graphql.CollectedField, obj *schema.MetricConfig) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricConfig_name(ctx, field) if err != nil { @@ -6395,6 +6546,312 @@ func (ec *executionContext) fieldContext_MetricFootprints_data(ctx context.Conte return fc, nil } +func (ec *executionContext) _MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_bin(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Bin, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_count(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_count(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Count, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_min(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Min, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_max(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Max, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_metric(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_metric(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Metric, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_metric(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_unit(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_unit(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Unit, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_data(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_data(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*model.MetricHistoPoint) + fc.Result = res + return ec.marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_data(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "bin": + return ec.fieldContext_MetricHistoPoint_bin(ctx, field) + case "count": + return ec.fieldContext_MetricHistoPoint_count(ctx, field) + case "min": + return ec.fieldContext_MetricHistoPoint_min(ctx, field) + case "max": + return ec.fieldContext_MetricHistoPoint_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricHistoPoint", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _MetricStatistics_avg(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricStatistics_avg(ctx, field) if err != nil { @@ -7592,7 +8049,7 @@ func (ec *executionContext) _Query_jobsStatistics(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate)) + return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate)) }) if err != nil { ec.Error(ctx, err) @@ -7649,6 +8106,8 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex return ec.fieldContext_JobsStatistics_histNumCores(ctx, field) case "histNumAccs": return ec.fieldContext_JobsStatistics_histNumAccs(ctx, field) + case "histMetrics": + return ec.fieldContext_JobsStatistics_histMetrics(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type JobsStatistics", field.Name) }, @@ -13136,6 +13595,11 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti if out.Values[i] == graphql.Null { out.Invalids++ } + case "histMetrics": + out.Values[i] = ec._JobsStatistics_histMetrics(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -13284,6 +13748,97 @@ func (ec *executionContext) _MetricFootprints(ctx context.Context, sel ast.Selec return out } +var metricHistoPointImplementors = []string{"MetricHistoPoint"} + +func (ec *executionContext) _MetricHistoPoint(ctx context.Context, sel ast.SelectionSet, obj *model.MetricHistoPoint) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, metricHistoPointImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("MetricHistoPoint") + case "bin": + out.Values[i] = ec._MetricHistoPoint_bin(ctx, field, obj) + case "count": + out.Values[i] = ec._MetricHistoPoint_count(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "min": + out.Values[i] = ec._MetricHistoPoint_min(ctx, field, obj) + case "max": + out.Values[i] = ec._MetricHistoPoint_max(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var metricHistoPointsImplementors = []string{"MetricHistoPoints"} + +func (ec *executionContext) _MetricHistoPoints(ctx context.Context, sel ast.SelectionSet, obj *model.MetricHistoPoints) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, metricHistoPointsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("MetricHistoPoints") + case "metric": + out.Values[i] = ec._MetricHistoPoints_metric(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "unit": + out.Values[i] = ec._MetricHistoPoints_unit(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "data": + out.Values[i] = ec._MetricHistoPoints_data(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var metricStatisticsImplementors = []string{"MetricStatistics"} func (ec *executionContext) _MetricStatistics(ctx context.Context, sel ast.SelectionSet, obj *schema.MetricStatistics) graphql.Marshaler { @@ -15536,6 +16091,70 @@ func (ec *executionContext) marshalNMetricFootprints2ᚖgithubᚗcomᚋClusterCo return ec._MetricFootprints(ctx, sel, v) } +func (ec *executionContext) marshalNMetricHistoPoint2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoint(ctx context.Context, sel ast.SelectionSet, v *model.MetricHistoPoint) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._MetricHistoPoint(ctx, sel, v) +} + +func (ec *executionContext) marshalNMetricHistoPoints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.MetricHistoPoints) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNMetricHistoPoints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoints(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNMetricHistoPoints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoints(ctx context.Context, sel ast.SelectionSet, v *model.MetricHistoPoints) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._MetricHistoPoints(ctx, sel, v) +} + func (ec *executionContext) unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx context.Context, v interface{}) (schema.MetricScope, error) { var res schema.MetricScope err := res.UnmarshalGQL(v) @@ -16591,6 +17210,53 @@ func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋ return ret } +func (ec *executionContext) marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.MetricHistoPoint) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNMetricHistoPoint2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoint(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx context.Context, v interface{}) ([]schema.MetricScope, error) { if v == nil { return nil, nil diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 050784b..7b8ebd2 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -85,22 +85,23 @@ type JobResultList struct { } type JobsStatistics struct { - ID string `json:"id"` - Name string `json:"name"` - TotalJobs int `json:"totalJobs"` - RunningJobs int `json:"runningJobs"` - ShortJobs int `json:"shortJobs"` - TotalWalltime int `json:"totalWalltime"` - TotalNodes int `json:"totalNodes"` - TotalNodeHours int `json:"totalNodeHours"` - TotalCores int `json:"totalCores"` - TotalCoreHours int `json:"totalCoreHours"` - TotalAccs int `json:"totalAccs"` - TotalAccHours int `json:"totalAccHours"` - HistDuration []*HistoPoint `json:"histDuration"` - HistNumNodes []*HistoPoint `json:"histNumNodes"` - HistNumCores []*HistoPoint `json:"histNumCores"` - HistNumAccs []*HistoPoint `json:"histNumAccs"` + ID string `json:"id"` + Name string `json:"name"` + TotalJobs int `json:"totalJobs"` + RunningJobs int `json:"runningJobs"` + ShortJobs int `json:"shortJobs"` + TotalWalltime int `json:"totalWalltime"` + TotalNodes int `json:"totalNodes"` + TotalNodeHours int `json:"totalNodeHours"` + TotalCores int `json:"totalCores"` + TotalCoreHours int `json:"totalCoreHours"` + TotalAccs int `json:"totalAccs"` + TotalAccHours int `json:"totalAccHours"` + HistDuration []*HistoPoint `json:"histDuration"` + HistNumNodes []*HistoPoint `json:"histNumNodes"` + HistNumCores []*HistoPoint `json:"histNumCores"` + HistNumAccs []*HistoPoint `json:"histNumAccs"` + HistMetrics []*MetricHistoPoints `json:"histMetrics"` } type MetricFootprints struct { @@ -108,6 +109,19 @@ type MetricFootprints struct { Data []schema.Float `json:"data"` } +type MetricHistoPoint struct { + Bin *int `json:"bin,omitempty"` + Count int `json:"count"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` +} + +type MetricHistoPoints struct { + Metric string `json:"metric"` + Unit string `json:"unit"` + Data []*MetricHistoPoint `json:"data,omitempty"` +} + type NodeMetrics struct { Host string `json:"host"` SubCluster string `json:"subCluster"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 9e5e111..82bf026 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -2,7 +2,7 @@ package graph // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. -// Code generated by github.com/99designs/gqlgen version v0.17.36 +// Code generated by github.com/99designs/gqlgen version v0.17.40 import ( "context" @@ -244,7 +244,7 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag } // JobsStatistics is the resolver for the jobsStatistics field. -func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { +func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { var err error var stats []*model.JobsStatistics @@ -291,6 +291,17 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF } } + if requireField(ctx, "histMetrics") { + if groupBy == nil { + stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0]) + if err != nil { + return nil, err + } + } else { + return nil, errors.New("metric histograms only implemented without groupBy argument") + } + } + return stats, nil } diff --git a/internal/repository/query.go b/internal/repository/query.go index 84b8048..317302b 100644 --- a/internal/repository/query.go +++ b/internal/repository/query.go @@ -96,7 +96,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde user := GetUserFromContext(ctx) if user == nil { var qnil sq.SelectBuilder - return qnil, fmt.Errorf("user context is nil!") + return qnil, fmt.Errorf("user context is nil") } else if user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport, schema.RoleApi}) { // Admin & Co. : All jobs return query, nil } else if user.HasRole(schema.RoleManager) { // Manager : Add filter for managed projects' jobs only + personal jobs diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 8084553..4d7be08 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -8,11 +8,15 @@ import ( "context" "database/sql" "fmt" + "math" "time" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" + "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" sq "github.com/Masterminds/squirrel" ) @@ -450,6 +454,39 @@ func (r *JobRepository) AddHistograms( return stat, nil } +// Requires thresholds for metric from config for cluster? Of all clusters and use largest? split to 10 + 1 for artifacts? +func (r *JobRepository) AddMetricHistograms( + ctx context.Context, + filter []*model.JobFilter, + metrics []string, + stat *model.JobsStatistics) (*model.JobsStatistics, error) { + start := time.Now() + + // Running Jobs Only: First query jobdata from sqlite, then query data and make bins + for _, f := range filter { + if f.State != nil { + if len(f.State) == 1 && f.State[0] == "running" { + stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter) + log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + return stat, nil + } + } + } + + // All other cases: Query and make bins in sqlite directly + for _, m := range metrics { + metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter) + if err != nil { + log.Warnf("Error while loading job metric statistics histogram: %s", m) + continue + } + stat.HistMetrics = append(stat.HistMetrics, metricHisto) + } + + log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + return stat, nil +} + // `value` must be the column grouped by, but renamed to "value" func (r *JobRepository) jobsStatisticsHistogram( ctx context.Context, @@ -487,3 +524,231 @@ func (r *JobRepository) jobsStatisticsHistogram( log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) return points, nil } + +func (r *JobRepository) jobsMetricStatisticsHistogram( + ctx context.Context, + metric string, + filters []*model.JobFilter) (*model.MetricHistoPoints, error) { + + var dbMetric string + switch metric { + case "cpu_load": + dbMetric = "load_avg" + case "flops_any": + dbMetric = "flops_any_avg" + case "mem_bw": + dbMetric = "mem_bw_avg" + case "mem_used": + dbMetric = "mem_used_max" + case "net_bw": + dbMetric = "net_bw_avg" + case "file_bw": + dbMetric = "file_bw_avg" + default: + return nil, fmt.Errorf("%s not implemented", metric) + } + + // Get specific Peak or largest Peak + var metricConfig *schema.MetricConfig + var peak float64 = 0.0 + var unit string = "" + + for _, f := range filters { + if f.Cluster != nil { + metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) + peak = metricConfig.Peak + unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base + log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) + } + } + + if peak == 0.0 { + for _, c := range archive.Clusters { + for _, m := range c.MetricConfig { + if m.Name == metric { + if m.Peak > peak { + peak = m.Peak + } + if unit == "" { + unit = m.Unit.Prefix + m.Unit.Base + } + } + } + } + } + + // log.Debugf("Metric %s: DB %s, Peak %f, Unit %s", metric, dbMetric, peak, unit) + // Make bins, see https://jereze.com/code/sql-histogram/ + + start := time.Now() + + crossJoinQuery := sq.Select( + fmt.Sprintf(`max(%s) as max`, dbMetric), + fmt.Sprintf(`min(%s) as min`, dbMetric), + ).From("job").Where( + fmt.Sprintf(`%s is not null`, dbMetric), + ).Where( + fmt.Sprintf(`%s <= %f`, dbMetric, peak), + ) + + crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery) + + if cjqerr != nil { + return nil, cjqerr + } + + for _, f := range filters { + crossJoinQuery = BuildWhereClause(f, crossJoinQuery) + } + + crossJoinQuerySql, crossJoinQueryArgs, sqlerr := crossJoinQuery.ToSql() + if sqlerr != nil { + return nil, sqlerr + } + + bins := 10 + binQuery := fmt.Sprintf(`CAST( (case when job.%s = value.max then value.max*0.999999999 else job.%s end - value.min) / (value.max - value.min) * %d as INTEGER )`, dbMetric, dbMetric, bins) + + mainQuery := sq.Select( + fmt.Sprintf(`%s + 1 as bin`, binQuery), + fmt.Sprintf(`count(job.%s) as count`, dbMetric), + fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery), + fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery), + ).From("job").CrossJoin( + fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., + ).Where(fmt.Sprintf(`job.%s is not null and job.%s <= %f`, dbMetric, dbMetric, peak)) + + mainQuery, qerr := SecurityCheck(ctx, mainQuery) + + if qerr != nil { + return nil, qerr + } + + for _, f := range filters { + mainQuery = BuildWhereClause(f, mainQuery) + } + + // Finalize query with Grouping and Ordering + mainQuery = mainQuery.GroupBy("bin").OrderBy("bin") + + rows, err := mainQuery.RunWith(r.DB).Query() + if err != nil { + log.Errorf("Error while running mainQuery: %s", err) + return nil, err + } + + points := make([]*model.MetricHistoPoint, 0) + for rows.Next() { + point := model.MetricHistoPoint{} + if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil { + log.Warnf("Error while scanning rows for %s", metric) + return nil, err // Totally bricks cc-backend if returned and if all metrics requested? + } + + points = append(points, &point) + } + + result := model.MetricHistoPoints{Metric: metric, Unit: unit, Data: points} + + log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) + return &result, nil +} + +func (r *JobRepository) runningJobsMetricStatisticsHistogram( + ctx context.Context, + metrics []string, + filters []*model.JobFilter) []*model.MetricHistoPoints { + + // Get Jobs + jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil) + if err != nil { + log.Errorf("Error while querying jobs for footprint: %s", err) + return nil + } + if len(jobs) > 500 { + log.Errorf("too many jobs matched (max: %d)", 500) + return nil + } + + // Get AVGs from metric repo + avgs := make([][]schema.Float, len(metrics)) + for i := range avgs { + avgs[i] = make([]schema.Float, 0, len(jobs)) + } + + for _, job := range jobs { + if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed { + continue + } + + if err := metricdata.LoadAverages(job, metrics, avgs, ctx); err != nil { + log.Errorf("Error while loading averages for histogram: %s", err) + return nil + } + } + + // Iterate metrics to fill endresult + data := make([]*model.MetricHistoPoints, 0) + for idx, metric := range metrics { + // Get specific Peak or largest Peak + var metricConfig *schema.MetricConfig + var peak float64 = 0.0 + var unit string = "" + + for _, f := range filters { + if f.Cluster != nil { + metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) + peak = metricConfig.Peak + unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base + log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) + } + } + + if peak == 0.0 { + for _, c := range archive.Clusters { + for _, m := range c.MetricConfig { + if m.Name == metric { + if m.Peak > peak { + peak = m.Peak + } + if unit == "" { + unit = m.Unit.Prefix + m.Unit.Base + } + } + } + } + } + + // Make and fill bins + bins := 10.0 + peakBin := peak / bins + + points := make([]*model.MetricHistoPoint, 0) + for b := 0; b < 10; b++ { + count := 0 + bindex := b + 1 + bmin := math.Round(peakBin * float64(b)) + bmax := math.Round(peakBin * (float64(b) + 1.0)) + + // Iterate AVG values for indexed metric and count for bins + for _, val := range avgs[idx] { + if float64(val) >= bmin && float64(val) < bmax { + count += 1 + } + } + + bminint := int(bmin) + bmaxint := int(bmax) + + // Append Bin to Metric Result Array + point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bminint, Max: &bmaxint} + points = append(points, &point) + } + + // Append Metric Result Array to final results array + result := model.MetricHistoPoints{Metric: metric, Unit: unit, Data: points} + data = append(data, &result) + } + + return data +} diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index 0b1c43b..b1bad0a 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -8,8 +8,8 @@ import ( "errors" "fmt" - "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" ) var Clusters []*schema.Cluster diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index 3fe17e3..8874caf 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -369,12 +369,9 @@ } }, "node_modules/chart.js": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.0.tgz", - "integrity": "sha512-vQEj6d+z0dcsKLlQvbKIMYFHd3t8W/7L2vfJIbYcfyPcRx92CsHqECpueN8qVGNlKyDcr5wBrYAYKnfu/9Q1hQ==", - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.0.tgz", - "integrity": "sha512-vQEj6d+z0dcsKLlQvbKIMYFHd3t8W/7L2vfJIbYcfyPcRx92CsHqECpueN8qVGNlKyDcr5wBrYAYKnfu/9Q1hQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.1.tgz", + "integrity": "sha512-C74QN1bxwV1v2PEujhmKjOZ7iUM4w6BWs23Md/6aOZZSlwMzeCIDGuZay++rBgChYru7/+QFeoQW0fQoP534Dg==", "dependencies": { "@kurkle/color": "^0.3.0" }, @@ -903,12 +900,9 @@ } }, "node_modules/terser": { - "version": "5.24.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.24.0.tgz", - "integrity": "sha512-ZpGR4Hy3+wBEzVEnHvstMvqpD/nABNelQn/z2r0fjVWGQsN3bpOLzQlqDxmb4CDZnXq5lpjnQ+mHQLAOpfM5iw==", - "version": "5.24.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.24.0.tgz", - "integrity": "sha512-ZpGR4Hy3+wBEzVEnHvstMvqpD/nABNelQn/z2r0fjVWGQsN3bpOLzQlqDxmb4CDZnXq5lpjnQ+mHQLAOpfM5iw==", + "version": "5.25.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.25.0.tgz", + "integrity": "sha512-we0I9SIsfvNUMP77zC9HG+MylwYYsGFSBG8qm+13oud2Yh+O104y614FRbyjpxys16jZwot72Fpi827YvGzuqg==", "dev": true, "dependencies": { "@jridgewell/source-map": "^0.3.3", diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index aa4ae37..163d511 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -389,9 +389,10 @@ + import { Modal, ModalBody, ModalHeader, ModalFooter, + Button, ListGroup, ListGroupItem } from 'sveltestrap' + import { gql, getContextClient , mutationStore } from '@urql/svelte' + + export let cluster + export let metricsInHistograms + export let isOpen + + let availableMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', 'net_bw', 'file_bw'] + let pendingMetrics = [...metricsInHistograms] // Copy + const client = getContextClient() + + const updateConfigurationMutation = ({ name, value }) => { + return mutationStore({ + client: client, + query: gql`mutation($name: String!, $value: String!) { + updateConfiguration(name: $name, value: $value) + }`, + variables: { name, value } + }) + } + + function updateConfiguration(data) { + updateConfigurationMutation({ + name: data.name, + value: JSON.stringify(data.value) + }).subscribe(res => { + if (res.fetching === false && res.error) { + throw res.error + // console.log('Error on subscription: ' + res.error) + } + }) + } + + function closeAndApply() { + metricsInHistograms = [...pendingMetrics] // Set for parent + isOpen = !isOpen + updateConfiguration({ + name: cluster ? `user_view_histogramMetrics:${cluster}` : 'user_view_histogramMetrics', + value: metricsInHistograms + }) + } + + + (isOpen = !isOpen)}> + + Select metrics presented in histograms + + + + {#each availableMetrics as metric (metric)} + + + {metric} + + {/each} + + + + + + + diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index fffbfde..d00d9c3 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -15,6 +15,7 @@ Table, Progress, Icon, + Button } from "sveltestrap"; import { init, convert2uplot, transformPerNodeDataForRoofline } from "./utils.js"; import { scaleNumbers } from "./units.js"; @@ -24,6 +25,8 @@ getContextClient, mutationStore, } from "@urql/svelte"; + import PlotTable from './PlotTable.svelte' + import HistogramSelection from './HistogramSelection.svelte' const { query: initq } = init(); const ccconfig = getContext("cc-config"); @@ -63,6 +66,9 @@ option.key == ccconfig.status_view_selectedTopUserCategory ); + let isHistogramSelectionOpen = false + $: metricsInHistograms = cluster ? ccconfig[`user_view_histogramMetrics:${cluster}`] : (ccconfig.user_view_histogramMetrics || []) + const client = getContextClient(); $: mainQuery = queryStore({ client: client, @@ -73,6 +79,7 @@ $metrics: [String!] $from: Time! $to: Time! + $metricsInHistograms: [String!] ) { nodeMetrics( cluster: $cluster @@ -98,7 +105,7 @@ } } - stats: jobsStatistics(filter: $filter) { + stats: jobsStatistics(filter: $filter, metrics: $metricsInHistograms) { histDuration { count value @@ -115,6 +122,16 @@ count value } + histMetrics { + metric + unit + data { + min + max + count + bin + } + } } allocatedNodes(cluster: $cluster) { @@ -129,6 +146,7 @@ from: from.toISOString(), to: to.toISOString(), filter: [{ state: ["running"] }, { cluster: { eq: cluster } }], + metricsInHistograms: metricsInHistograms }, }); @@ -311,7 +329,7 @@

Current utilization of cluster "{cluster}"

- + {#if $initq.fetching || $mainQuery.fetching} {:else if $initq.error} @@ -321,6 +339,13 @@ {/if} + + + { @@ -666,4 +691,35 @@ {/key} +
+ {#if metricsInHistograms} + + + {#key $mainQuery.data.stats[0].histMetrics} + + + + + {/key} + + + {/if} {/if} + + diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 163cb0a..73c4dd0 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -1,7 +1,7 @@