From f5f36427a45d082cb92df72a79566ea8efbe75d0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 13 Mar 2025 17:33:55 +0100 Subject: [PATCH] split statsTable data from jobMetrics query, initial commit - mainly backend changes - statstable changes only for prototyping --- api/schema.graphqls | 39 +- internal/graph/generated/generated.go | 1326 +++++++++++++---- internal/graph/model/models_gen.go | 22 +- internal/graph/schema.resolvers.go | 56 +- internal/metricDataDispatcher/dataLoader.go | 34 +- internal/metricdata/cc-metric-store.go | 93 +- internal/metricdata/influxdb-v2.go | 12 + internal/metricdata/metricdata.go | 5 +- internal/metricdata/prometheus.go | 14 +- internal/metricdata/utils.go | 12 +- pkg/archive/archive.go | 20 +- pkg/archive/fsBackend.go | 46 + pkg/archive/json.go | 37 + pkg/schema/metrics.go | 7 + web/frontend/src/Job.root.svelte | 44 +- web/frontend/src/job/Metric.svelte | 5 - web/frontend/src/job/StatsTable.svelte | 95 +- web/frontend/src/job/StatsTableEntry.svelte | 18 +- .../job/jobsummary/JobFootprintPolar.svelte | 12 +- 19 files changed, 1471 insertions(+), 426 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 9385a6f..ed8843c 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -137,11 +137,6 @@ type JobMetricWithName { metric: JobMetric! } -type JobMetricStatWithName { - name: String! - stats: MetricStatistics! -} - type JobMetric { unit: Unit timestep: Int! @@ -156,6 +151,30 @@ type Series { data: [NullableFloat!]! } +type StatsSeries { + mean: [NullableFloat!]! + median: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! +} + +type JobStatsWithScope { + name: String! + scope: MetricScope! + stats: [ScopedStats!]! +} + +type ScopedStats { + hostname: String! + id: String + data: MetricStatistics! +} + +type JobStats { + name: String! + stats: MetricStatistics! +} + type Unit { base: String! prefix: String @@ -167,13 +186,6 @@ type MetricStatistics { max: Float! } -type StatsSeries { - mean: [NullableFloat!]! - median: [NullableFloat!]! - min: [NullableFloat!]! - max: [NullableFloat!]! -} - type MetricFootprints { metric: String! data: [NullableFloat!]! @@ -247,7 +259,8 @@ type Query { job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! - jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]! + jobStats(id: ID!, metrics: [String!]): [JobStats!]! + scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index b4c6e19..e5c9ca2 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -156,11 +156,6 @@ type ComplexityRoot struct { Unit func(childComplexity int) int } - JobMetricStatWithName struct { - Name func(childComplexity int) int - Stats func(childComplexity int) int - } - JobMetricWithName struct { Metric func(childComplexity int) int Name func(childComplexity int) int @@ -175,6 +170,17 @@ type ComplexityRoot struct { Offset func(childComplexity int) int } + JobStats struct { + Name func(childComplexity int) int + Stats func(childComplexity int) int + } + + JobStatsWithScope struct { + Name func(childComplexity int) int + Scope func(childComplexity int) int + Stats func(childComplexity int) int + } + JobsStatistics struct { HistDuration func(childComplexity int) int HistMetrics func(childComplexity int) int @@ -268,14 +274,15 @@ type ComplexityRoot struct { Clusters func(childComplexity int) int GlobalMetrics func(childComplexity int) int Job func(childComplexity int, id string) int - JobMetricStats func(childComplexity int, id string, metrics []string) int JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope, resolution *int) int + JobStats func(childComplexity int, id string, metrics []string) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int + ScopedJobStats func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Tags func(childComplexity int) int User func(childComplexity int, username string) int } @@ -287,6 +294,12 @@ type ComplexityRoot struct { Hostname func(childComplexity int) int } + ScopedStats struct { + Data func(childComplexity int) int + Hostname func(childComplexity int) int + ID func(childComplexity int) int + } + Series struct { Data func(childComplexity int) int Hostname func(childComplexity int) int @@ -396,7 +409,8 @@ type QueryResolver interface { AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) Job(ctx context.Context, id string) (*schema.Job, error) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) - JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) + JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) + ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) @@ -861,20 +875,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobMetric.Unit(childComplexity), true - case "JobMetricStatWithName.name": - if e.complexity.JobMetricStatWithName.Name == nil { - break - } - - return e.complexity.JobMetricStatWithName.Name(childComplexity), true - - case "JobMetricStatWithName.stats": - if e.complexity.JobMetricStatWithName.Stats == nil { - break - } - - return e.complexity.JobMetricStatWithName.Stats(childComplexity), true - case "JobMetricWithName.metric": if e.complexity.JobMetricWithName.Metric == nil { break @@ -931,6 +931,41 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Offset(childComplexity), true + case "JobStats.name": + if e.complexity.JobStats.Name == nil { + break + } + + return e.complexity.JobStats.Name(childComplexity), true + + case "JobStats.stats": + if e.complexity.JobStats.Stats == nil { + break + } + + return e.complexity.JobStats.Stats(childComplexity), true + + case "JobStatsWithScope.name": + if e.complexity.JobStatsWithScope.Name == nil { + break + } + + return e.complexity.JobStatsWithScope.Name(childComplexity), true + + case "JobStatsWithScope.scope": + if e.complexity.JobStatsWithScope.Scope == nil { + break + } + + return e.complexity.JobStatsWithScope.Scope(childComplexity), true + + case "JobStatsWithScope.stats": + if e.complexity.JobStatsWithScope.Stats == nil { + break + } + + return e.complexity.JobStatsWithScope.Stats(childComplexity), true + case "JobsStatistics.histDuration": if e.complexity.JobsStatistics.HistDuration == nil { break @@ -1400,18 +1435,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.Job(childComplexity, args["id"].(string)), true - case "Query.jobMetricStats": - if e.complexity.Query.JobMetricStats == nil { - break - } - - args, err := ec.field_Query_jobMetricStats_args(context.TODO(), rawArgs) - if err != nil { - return 0, false - } - - return e.complexity.Query.JobMetricStats(childComplexity, args["id"].(string), args["metrics"].([]string)), true - case "Query.jobMetrics": if e.complexity.Query.JobMetrics == nil { break @@ -1424,6 +1447,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope), args["resolution"].(*int)), true + case "Query.jobStats": + if e.complexity.Query.JobStats == nil { + break + } + + args, err := ec.field_Query_jobStats_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.JobStats(childComplexity, args["id"].(string), args["metrics"].([]string)), true + case "Query.jobs": if e.complexity.Query.Jobs == nil { break @@ -1496,6 +1531,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.RooflineHeatmap(childComplexity, args["filter"].([]*model.JobFilter), args["rows"].(int), args["cols"].(int), args["minX"].(float64), args["minY"].(float64), args["maxX"].(float64), args["maxY"].(float64)), true + case "Query.scopedJobStats": + if e.complexity.Query.ScopedJobStats == nil { + break + } + + args, err := ec.field_Query_scopedJobStats_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.ScopedJobStats(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)), true + case "Query.tags": if e.complexity.Query.Tags == nil { break @@ -1543,6 +1590,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Resource.Hostname(childComplexity), true + case "ScopedStats.data": + if e.complexity.ScopedStats.Data == nil { + break + } + + return e.complexity.ScopedStats.Data(childComplexity), true + + case "ScopedStats.hostname": + if e.complexity.ScopedStats.Hostname == nil { + break + } + + return e.complexity.ScopedStats.Hostname(childComplexity), true + + case "ScopedStats.id": + if e.complexity.ScopedStats.ID == nil { + break + } + + return e.complexity.ScopedStats.ID(childComplexity), true + case "Series.data": if e.complexity.Series.Data == nil { break @@ -2131,11 +2199,6 @@ type JobMetricWithName { metric: JobMetric! } -type JobMetricStatWithName { - name: String! - stats: MetricStatistics! -} - type JobMetric { unit: Unit timestep: Int! @@ -2150,6 +2213,30 @@ type Series { data: [NullableFloat!]! } +type StatsSeries { + mean: [NullableFloat!]! + median: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! +} + +type JobStatsWithScope { + name: String! + scope: MetricScope! + stats: [ScopedStats!]! +} + +type ScopedStats { + hostname: String! + id: String + data: MetricStatistics! +} + +type JobStats { + name: String! + stats: MetricStatistics! +} + type Unit { base: String! prefix: String @@ -2161,13 +2248,6 @@ type MetricStatistics { max: Float! } -type StatsSeries { - mean: [NullableFloat!]! - median: [NullableFloat!]! - min: [NullableFloat!]! - max: [NullableFloat!]! -} - type MetricFootprints { metric: String! data: [NullableFloat!]! @@ -2241,7 +2321,8 @@ type Query { job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! - jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]! + jobStats(id: ID!, metrics: [String!]): [JobStats!]! + scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! @@ -2694,57 +2775,6 @@ func (ec *executionContext) field_Query_allocatedNodes_argsCluster( return zeroVal, nil } -func (ec *executionContext) field_Query_jobMetricStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { - var err error - args := map[string]any{} - arg0, err := ec.field_Query_jobMetricStats_argsID(ctx, rawArgs) - if err != nil { - return nil, err - } - args["id"] = arg0 - arg1, err := ec.field_Query_jobMetricStats_argsMetrics(ctx, rawArgs) - if err != nil { - return nil, err - } - args["metrics"] = arg1 - return args, nil -} -func (ec *executionContext) field_Query_jobMetricStats_argsID( - ctx context.Context, - rawArgs map[string]any, -) (string, error) { - if _, ok := rawArgs["id"]; !ok { - var zeroVal string - return zeroVal, nil - } - - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) - if tmp, ok := rawArgs["id"]; ok { - return ec.unmarshalNID2string(ctx, tmp) - } - - var zeroVal string - return zeroVal, nil -} - -func (ec *executionContext) field_Query_jobMetricStats_argsMetrics( - ctx context.Context, - rawArgs map[string]any, -) ([]string, error) { - if _, ok := rawArgs["metrics"]; !ok { - var zeroVal []string - return zeroVal, nil - } - - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) - if tmp, ok := rawArgs["metrics"]; ok { - return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) - } - - var zeroVal []string - return zeroVal, nil -} - func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -2842,6 +2872,57 @@ func (ec *executionContext) field_Query_jobMetrics_argsResolution( return zeroVal, nil } +func (ec *executionContext) field_Query_jobStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_jobStats_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + arg1, err := ec.field_Query_jobStats_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg1 + return args, nil +} +func (ec *executionContext) field_Query_jobStats_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobStats_argsMetrics( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["metrics"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + func (ec *executionContext) field_Query_job_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -3682,6 +3763,80 @@ func (ec *executionContext) field_Query_rooflineHeatmap_argsMaxY( return zeroVal, nil } +func (ec *executionContext) field_Query_scopedJobStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_scopedJobStats_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + arg1, err := ec.field_Query_scopedJobStats_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg1 + arg2, err := ec.field_Query_scopedJobStats_argsScopes(ctx, rawArgs) + if err != nil { + return nil, err + } + args["scopes"] = arg2 + return args, nil +} +func (ec *executionContext) field_Query_scopedJobStats_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_scopedJobStats_argsMetrics( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["metrics"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_scopedJobStats_argsScopes( + ctx context.Context, + rawArgs map[string]any, +) ([]schema.MetricScope, error) { + if _, ok := rawArgs["scopes"]; !ok { + var zeroVal []schema.MetricScope + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) + if tmp, ok := rawArgs["scopes"]; ok { + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + } + + var zeroVal []schema.MetricScope + return zeroVal, nil +} + func (ec *executionContext) field_Query_user_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -6663,102 +6818,6 @@ func (ec *executionContext) fieldContext_JobMetric_statisticsSeries(_ context.Co return fc, nil } -func (ec *executionContext) _JobMetricStatWithName_name(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricStatWithName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobMetricStatWithName_name(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Name, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobMetricStatWithName_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobMetricStatWithName", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") - }, - } - return fc, nil -} - -func (ec *executionContext) _JobMetricStatWithName_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricStatWithName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobMetricStatWithName_stats(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Stats, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(*schema.MetricStatistics) - fc.Result = res - return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobMetricStatWithName_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobMetricStatWithName", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "avg": - return ec.fieldContext_MetricStatistics_avg(ctx, field) - case "min": - return ec.fieldContext_MetricStatistics_min(ctx, field) - case "max": - return ec.fieldContext_MetricStatistics_max(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) - }, - } - return fc, nil -} - func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobMetricWithName_name(ctx, field) if err != nil { @@ -7163,6 +7222,242 @@ func (ec *executionContext) fieldContext_JobResultList_hasNextPage(_ context.Con return fc, nil } +func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStats_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStats_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_stats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stats, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*schema.MetricStatistics) + fc.Result = res + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStats_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "avg": + return ec.fieldContext_MetricStatistics_avg(ctx, field) + case "min": + return ec.fieldContext_MetricStatistics_min(ctx, field) + case "max": + return ec.fieldContext_MetricStatistics_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_scope(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_scope(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Scope, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(schema.MetricScope) + fc.Result = res + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type MetricScope does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_stats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stats, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.ScopedStats) + fc.Result = res + return ec.marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "hostname": + return ec.fieldContext_ScopedStats_hostname(ctx, field) + case "id": + return ec.fieldContext_ScopedStats_id(ctx, field) + case "data": + return ec.fieldContext_ScopedStats_data(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type ScopedStats", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_id(ctx, field) if err != nil { @@ -10296,8 +10591,8 @@ func (ec *executionContext) fieldContext_Query_jobMetrics(ctx context.Context, f return fc, nil } -func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query_jobMetricStats(ctx, field) +func (ec *executionContext) _Query_jobStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_jobStats(ctx, field) if err != nil { return graphql.Null } @@ -10310,7 +10605,7 @@ func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobMetricStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string)) + return ec.resolvers.Query().JobStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string)) }) if err != nil { ec.Error(ctx, err) @@ -10322,12 +10617,12 @@ func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field gra } return graphql.Null } - res := resTmp.([]*model.JobMetricStatWithName) + res := resTmp.([]*model.JobStats) fc.Result = res - return ec.marshalNJobMetricStatWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithNameᚄ(ctx, field.Selections, res) + return ec.marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_jobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, @@ -10336,11 +10631,11 @@ func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Contex Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { case "name": - return ec.fieldContext_JobMetricStatWithName_name(ctx, field) + return ec.fieldContext_JobStats_name(ctx, field) case "stats": - return ec.fieldContext_JobMetricStatWithName_stats(ctx, field) + return ec.fieldContext_JobStats_stats(ctx, field) } - return nil, fmt.Errorf("no field named %q was found under type JobMetricStatWithName", field.Name) + return nil, fmt.Errorf("no field named %q was found under type JobStats", field.Name) }, } defer func() { @@ -10350,7 +10645,70 @@ func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Contex } }() ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Query_jobMetricStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + if fc.Args, err = ec.field_Query_jobStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_scopedJobStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_scopedJobStats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().ScopedJobStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.JobStatsWithScope) + fc.Result = res + return ec.marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_JobStatsWithScope_name(ctx, field) + case "scope": + return ec.fieldContext_JobStatsWithScope_scope(ctx, field) + case "stats": + return ec.fieldContext_JobStatsWithScope_stats(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type JobStatsWithScope", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_scopedJobStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) return fc, err } @@ -11058,6 +11416,143 @@ func (ec *executionContext) fieldContext_Resource_configuration(_ context.Contex return fc, nil } +func (ec *executionContext) _ScopedStats_hostname(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_hostname(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Hostname, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_hostname(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _ScopedStats_id(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_id(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _ScopedStats_data(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_data(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*schema.MetricStatistics) + fc.Result = res + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "avg": + return ec.fieldContext_MetricStatistics_avg(ctx, field) + case "min": + return ec.fieldContext_MetricStatistics_min(ctx, field) + case "max": + return ec.fieldContext_MetricStatistics_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _Series_hostname(ctx context.Context, field graphql.CollectedField, obj *schema.Series) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Series_hostname(ctx, field) if err != nil { @@ -16569,50 +17064,6 @@ func (ec *executionContext) _JobMetric(ctx context.Context, sel ast.SelectionSet return out } -var jobMetricStatWithNameImplementors = []string{"JobMetricStatWithName"} - -func (ec *executionContext) _JobMetricStatWithName(ctx context.Context, sel ast.SelectionSet, obj *model.JobMetricStatWithName) graphql.Marshaler { - fields := graphql.CollectFields(ec.OperationContext, sel, jobMetricStatWithNameImplementors) - - out := graphql.NewFieldSet(fields) - deferred := make(map[string]*graphql.FieldSet) - for i, field := range fields { - switch field.Name { - case "__typename": - out.Values[i] = graphql.MarshalString("JobMetricStatWithName") - case "name": - out.Values[i] = ec._JobMetricStatWithName_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - case "stats": - out.Values[i] = ec._JobMetricStatWithName_stats(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - default: - panic("unknown field " + strconv.Quote(field.Name)) - } - } - out.Dispatch(ctx) - if out.Invalids > 0 { - return graphql.Null - } - - atomic.AddInt32(&ec.deferred, int32(len(deferred))) - - for label, dfs := range deferred { - ec.processDeferredGroup(graphql.DeferredGroup{ - Label: label, - Path: graphql.GetPath(ctx), - FieldSet: dfs, - Context: ctx, - }) - } - - return out -} - var jobMetricWithNameImplementors = []string{"JobMetricWithName"} func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.SelectionSet, obj *model.JobMetricWithName) graphql.Marshaler { @@ -16709,6 +17160,99 @@ func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.Selectio return out } +var jobStatsImplementors = []string{"JobStats"} + +func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet, obj *model.JobStats) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, jobStatsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("JobStats") + case "name": + out.Values[i] = ec._JobStats_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "stats": + out.Values[i] = ec._JobStats_stats(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var jobStatsWithScopeImplementors = []string{"JobStatsWithScope"} + +func (ec *executionContext) _JobStatsWithScope(ctx context.Context, sel ast.SelectionSet, obj *model.JobStatsWithScope) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, jobStatsWithScopeImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("JobStatsWithScope") + case "name": + out.Values[i] = ec._JobStatsWithScope_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "scope": + out.Values[i] = ec._JobStatsWithScope_scope(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "stats": + out.Values[i] = ec._JobStatsWithScope_stats(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var jobsStatisticsImplementors = []string{"JobsStatistics"} func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.SelectionSet, obj *model.JobsStatistics) graphql.Marshaler { @@ -17513,7 +18057,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) - case "jobMetricStats": + case "jobStats": field := field innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { @@ -17522,7 +18066,29 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr ec.Error(ctx, ec.Recover(ctx, r)) } }() - res = ec._Query_jobMetricStats(ctx, field) + res = ec._Query_jobStats(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "scopedJobStats": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_scopedJobStats(ctx, field) if res == graphql.Null { atomic.AddUint32(&fs.Invalids, 1) } @@ -17740,6 +18306,52 @@ func (ec *executionContext) _Resource(ctx context.Context, sel ast.SelectionSet, return out } +var scopedStatsImplementors = []string{"ScopedStats"} + +func (ec *executionContext) _ScopedStats(ctx context.Context, sel ast.SelectionSet, obj *model.ScopedStats) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, scopedStatsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ScopedStats") + case "hostname": + out.Values[i] = ec._ScopedStats_hostname(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "id": + out.Values[i] = ec._ScopedStats_id(ctx, field, obj) + case "data": + out.Values[i] = ec._ScopedStats_data(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var seriesImplementors = []string{"Series"} func (ec *executionContext) _Series(ctx context.Context, sel ast.SelectionSet, obj *schema.Series) graphql.Marshaler { @@ -19346,60 +19958,6 @@ func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpit return ec._JobMetric(ctx, sel, v) } -func (ec *executionContext) marshalNJobMetricStatWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricStatWithName) graphql.Marshaler { - ret := make(graphql.Array, len(v)) - var wg sync.WaitGroup - isLen1 := len(v) == 1 - if !isLen1 { - wg.Add(len(v)) - } - for i := range v { - i := i - fc := &graphql.FieldContext{ - Index: &i, - Result: &v[i], - } - ctx := graphql.WithFieldContext(ctx, fc) - f := func(i int) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil - } - }() - if !isLen1 { - defer wg.Done() - } - ret[i] = ec.marshalNJobMetricStatWithName2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithName(ctx, sel, v[i]) - } - if isLen1 { - f(i) - } else { - go f(i) - } - - } - wg.Wait() - - for _, e := range ret { - if e == graphql.Null { - return graphql.Null - } - } - - return ret -} - -func (ec *executionContext) marshalNJobMetricStatWithName2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithName(ctx context.Context, sel ast.SelectionSet, v *model.JobMetricStatWithName) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - return graphql.Null - } - return ec._JobMetricStatWithName(ctx, sel, v) -} - func (ec *executionContext) marshalNJobMetricWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricWithName) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -19478,6 +20036,114 @@ func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋcc return v } +func (ec *executionContext) marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobStats) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNJobStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStats(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNJobStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStats(ctx context.Context, sel ast.SelectionSet, v *model.JobStats) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._JobStats(ctx, sel, v) +} + +func (ec *executionContext) marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobStatsWithScope) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx context.Context, sel ast.SelectionSet, v *model.JobStatsWithScope) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._JobStatsWithScope(ctx, sel, v) +} + func (ec *executionContext) marshalNJobsStatistics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobsStatisticsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobsStatistics) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -19891,6 +20557,60 @@ func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋ return ec._Resource(ctx, sel, v) } +func (ec *executionContext) marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.ScopedStats) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNScopedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStats(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNScopedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStats(ctx context.Context, sel ast.SelectionSet, v *model.ScopedStats) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._ScopedStats(ctx, sel, v) +} + func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler { return ec._Series(ctx, sel, &v) } diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index d83a318..43c4e37 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -81,11 +81,6 @@ type JobLinkResultList struct { Count *int `json:"count,omitempty"` } -type JobMetricStatWithName struct { - Name string `json:"name"` - Stats *schema.MetricStatistics `json:"stats"` -} - type JobMetricWithName struct { Name string `json:"name"` Scope schema.MetricScope `json:"scope"` @@ -100,6 +95,17 @@ type JobResultList struct { HasNextPage *bool `json:"hasNextPage,omitempty"` } +type JobStats struct { + Name string `json:"name"` + Stats *schema.MetricStatistics `json:"stats"` +} + +type JobStatsWithScope struct { + Name string `json:"name"` + Scope schema.MetricScope `json:"scope"` + Stats []*ScopedStats `json:"stats"` +} + type JobsStatistics struct { ID string `json:"id"` Name string `json:"name"` @@ -173,6 +179,12 @@ type PageRequest struct { Page int `json:"page"` } +type ScopedStats struct { + Hostname string `json:"hostname"` + ID *string `json:"id,omitempty"` + Data *schema.MetricStatistics `json:"data"` +} + type StringInput struct { Eq *string `json:"eq,omitempty"` Neq *string `json:"neq,omitempty"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index ce1384b..1565c7e 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -301,24 +301,23 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str return res, err } -// JobMetricStats is the resolver for the jobMetricStats field. -func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) { - +// JobMetricStats is the resolver for the jobStats field. +func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) { job, err := r.Query().Job(ctx, id) if err != nil { - log.Warn("Error while querying job for metrics") + log.Warnf("Error while querying job %s for metrics", id) return nil, err } - data, err := metricDataDispatcher.LoadStatData(job, metrics, ctx) + data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) if err != nil { - log.Warn("Error while loading job stat data") + log.Warnf("Error while loading job stat data for job id %s", id) return nil, err } - res := []*model.JobMetricStatWithName{} + res := []*model.JobStats{} for name, md := range data { - res = append(res, &model.JobMetricStatWithName{ + res = append(res, &model.JobStats{ Name: name, Stats: &md, }) @@ -327,6 +326,47 @@ func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics [ return res, err } +// JobStats is the resolver for the scopedJobStats field. +func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) { + job, err := r.Query().Job(ctx, id) + if err != nil { + log.Warnf("Error while querying job %s for metrics", id) + return nil, err + } + + data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx) + if err != nil { + log.Warnf("Error while loading scoped job stat data for job id %s", id) + return nil, err + } + + res := make([]*model.JobStatsWithScope, 0) + for name, scoped := range data { + for scope, stats := range scoped { + // log.Debugf("HANDLE >>>>> %s @ %s -> First Array Value %#v", name, scope, *stats[0]) + + mdlStats := make([]*model.ScopedStats, 0) + for _, stat := range stats { + // log.Debugf("CONVERT >>>>> >>>>> %s -> %v -> %#v", stat.Hostname, stat.Id, stat.Data) + mdlStats = append(mdlStats, &model.ScopedStats{ + Hostname: stat.Hostname, + ID: stat.Id, + Data: stat.Data, + }) + } + + // log.Debugf("APPEND >>>>> >>>>> %#v", mdlStats) + res = append(res, &model.JobStatsWithScope{ + Name: name, + Scope: scope, + Stats: mdlStats, + }) + } + } + + return res, nil +} + // JobsFootprints is the resolver for the jobsFootprints field. func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) { // NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column! diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index f3f60b4..c6cecd8 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -224,8 +224,34 @@ func LoadAverages( return nil } -// Used for polar plots in frontend -func LoadStatData( +// Used for statsTable in frontend: Return scoped statistics by metric. +func LoadScopedJobStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context, +) (schema.ScopedJobStats, error) { + + if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { + return archive.LoadScopedStatsFromArchive(job, metrics, scopes) + } + + repo, err := metricdata.GetMetricDataRepo(job.Cluster) + if err != nil { + return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster) + } + + scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx) + if err != nil { + log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) + return nil, err + } + + return scopedStats, nil +} + +// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric. +func LoadJobStats( job *schema.Job, metrics []string, ctx context.Context, @@ -237,12 +263,12 @@ func LoadStatData( data := make(map[string]schema.MetricStatistics, len(metrics)) repo, err := metricdata.GetMetricDataRepo(job.Cluster) if err != nil { - return data, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster) + return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster) } stats, err := repo.LoadStats(job, metrics, ctx) if err != nil { - log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) + log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) return data, err } diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 2b92fbb..6635299 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -618,7 +618,98 @@ func (ccms *CCMetricStore) LoadStats( return stats, nil } -// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known! +// Scoped Stats: Basically Load Data without resolution and data query flag? +func (ccms *CCMetricStore) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context, +) (schema.ScopedJobStats, error) { + queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0) + if err != nil { + log.Warn("Error while building queries") + return nil, err + } + + req := ApiQueryRequest{ + Cluster: job.Cluster, + From: job.StartTime.Unix(), + To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(), + Queries: queries, + WithStats: true, + WithData: false, + } + + resBody, err := ccms.doRequest(ctx, &req) + if err != nil { + log.Error("Error while performing request") + return nil, err + } + + var errors []string + scopedJobStats := make(schema.ScopedJobStats) + + for i, row := range resBody.Results { + query := req.Queries[i] + metric := ccms.toLocalName(query.Metric) + scope := assignedScope[i] + + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for ndx, res := range row { + if res.Error != nil { + /* Build list for "partial errors", if any */ + errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) + continue + } + + id := (*string)(nil) + if query.Type != nil { + id = new(string) + *id = query.TypeIds[ndx] + } + + if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { + // "schema.Float()" because regular float64 can not be JSONed when NaN. + res.Avg = schema.Float(0) + res.Min = schema.Float(0) + res.Max = schema.Float(0) + } + + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: query.Hostname, + Id: id, + Data: &schema.MetricStatistics{ + Avg: float64(res.Avg), + Min: float64(res.Min), + Max: float64(res.Max), + }, + }) + } + + // So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty + if len(scopedJobStats[metric][scope]) == 0 { + delete(scopedJobStats[metric], scope) + if len(scopedJobStats[metric]) == 0 { + delete(scopedJobStats, metric) + } + } + } + + if len(errors) != 0 { + /* Returns list for "partial errors" */ + return scopedJobStats, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) + } + return scopedJobStats, nil +} + +// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known! - Todo Outdated with NodeListData? func (ccms *CCMetricStore) LoadNodeData( cluster string, metrics, nodes []string, diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index 79c2d4a..2a943b6 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -301,6 +301,18 @@ func (idb *InfluxDBv2DataRepository) LoadStats( return stats, nil } +func (idb *InfluxDBv2DataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { + + // TODO : Implement to be used in JobView Stats Table + log.Infof("LoadScopedStats unimplemented for InfluxDBv2DataRepository, Args: Job-ID %d, metrics %v, scopes %v", job.JobID, metrics, scopes) + + return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") +} + func (idb *InfluxDBv2DataRepository) LoadNodeData( cluster string, metrics, nodes []string, diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 0fe94d1..f30d837 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -24,9 +24,12 @@ type MetricDataRepository interface { // Return the JobData for the given job, only with the requested metrics. LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) - // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now. + // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope only. LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) + // Return a map of metrics to a map of scopes to the scoped metric statistics of the job. + LoadScopedStats(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error) + // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index cd849ce..fe829c0 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -448,6 +448,18 @@ func (pdb *PrometheusDataRepository) LoadNodeData( return data, nil } +func (pdb *PrometheusDataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { + + // TODO : Implement to be used in Job-View StatsTable + log.Infof("LoadScopedStats unimplemented for PrometheusDataRepository, Args: job-id %v, metrics %v, scopes %v", job.JobID, metrics, scopes) + + return nil, errors.New("METRICDATA/PROMETHEUS > unimplemented for PrometheusDataRepository") +} + func (pdb *PrometheusDataRepository) LoadNodeListData( cluster, subCluster, nodeFilter string, metrics []string, @@ -463,5 +475,5 @@ func (pdb *PrometheusDataRepository) LoadNodeListData( // TODO : Implement to be used in NodeList-View log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) - return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository") + return nil, totalNodes, hasNextPage, errors.New("METRICDATA/PROMETHEUS > unimplemented for PrometheusDataRepository") } diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index 48dd237..aa7bde1 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -36,7 +36,17 @@ func (tmdr *TestMetricDataRepository) LoadData( func (tmdr *TestMetricDataRepository) LoadStats( job *schema.Job, - metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { + metrics []string, + ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { + + panic("TODO") +} + +func (tmdr *TestMetricDataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { panic("TODO") } diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 2eabb52..002fd5e 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -27,6 +27,8 @@ type ArchiveBackend interface { LoadJobData(job *schema.Job) (schema.JobData, error) + LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) + LoadClusterCfg(name string) (*schema.Cluster, error) StoreJobMeta(jobMeta *schema.JobMeta) error @@ -125,7 +127,7 @@ func LoadAveragesFromArchive( return nil } -// Helper to metricdataloader.LoadStatData(). +// Helper to metricdataloader.LoadJobStats(). func LoadStatsFromArchive( job *schema.Job, metrics []string, @@ -154,6 +156,22 @@ func LoadStatsFromArchive( return data, nil } +// Helper to metricdataloader.LoadScopedJobStats(). +func LoadScopedStatsFromArchive( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, +) (schema.ScopedJobStats, error) { + + data, err := ar.LoadJobStats(job) + if err != nil { + log.Warn("Error while loading job metadata from archiveBackend") + return nil, err + } + + return data, nil +} + func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) { metaFile, err := ar.LoadJobMeta(job) if err != nil { diff --git a/pkg/archive/fsBackend.go b/pkg/archive/fsBackend.go index 8a43748..711b1f5 100644 --- a/pkg/archive/fsBackend.go +++ b/pkg/archive/fsBackend.go @@ -115,6 +115,40 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) { } } +func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) { + f, err := os.Open(filename) + + if err != nil { + log.Errorf("fsBackend LoadJobStats()- %v", err) + return nil, err + } + defer f.Close() + + if isCompressed { + r, err := gzip.NewReader(f) + if err != nil { + log.Errorf(" %v", err) + return nil, err + } + defer r.Close() + + if config.Keys.Validate { + if err := schema.Validate(schema.Data, r); err != nil { + return nil, fmt.Errorf("validate job data: %v", err) + } + } + + return DecodeJobStats(r, filename) + } else { + if config.Keys.Validate { + if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil { + return nil, fmt.Errorf("validate job data: %v", err) + } + } + return DecodeJobStats(bufio.NewReader(f), filename) + } +} + func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) { var config FsArchiveConfig @@ -389,6 +423,18 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) { return loadJobData(filename, isCompressed) } +func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) { + var isCompressed bool = true + filename := getPath(job, fsa.path, "data.json.gz") + + if !util.CheckFileExists(filename) { + filename = getPath(job, fsa.path, "data.json") + isCompressed = false + } + + return loadJobStats(filename, isCompressed) +} + func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) { filename := getPath(job, fsa.path, "meta.json") return loadJobMeta(filename) diff --git a/pkg/archive/json.go b/pkg/archive/json.go index 1219658..5201b74 100644 --- a/pkg/archive/json.go +++ b/pkg/archive/json.go @@ -32,6 +32,43 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) { return data.(schema.JobData), nil } +func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) { + jobData, err := DecodeJobData(r, k) + // Convert schema.JobData to schema.ScopedJobStats + if jobData != nil { + scopedJobStats := make(schema.ScopedJobStats) + for metric, metricData := range jobData { + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + for scope, jobMetric := range metricData { + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for _, series := range jobMetric.Series { + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: series.Hostname, + Id: series.Id, + Data: &series.Statistics, + }) + } + + // So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty + if len(scopedJobStats[metric][scope]) == 0 { + delete(scopedJobStats[metric], scope) + if len(scopedJobStats[metric]) == 0 { + delete(scopedJobStats, metric) + } + } + } + } + return scopedJobStats, nil + } + return nil, err +} + func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) { var d schema.JobMeta if err := json.NewDecoder(r).Decode(&d); err != nil { diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go index ffac21b..fbb85e4 100644 --- a/pkg/schema/metrics.go +++ b/pkg/schema/metrics.go @@ -15,6 +15,7 @@ import ( ) type JobData map[string]map[MetricScope]*JobMetric +type ScopedJobStats map[string]map[MetricScope][]*ScopedStats type JobMetric struct { StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"` @@ -30,6 +31,12 @@ type Series struct { Statistics MetricStatistics `json:"statistics"` } +type ScopedStats struct { + Hostname string `json:"hostname"` + Id *string `json:"id,omitempty"` + Data *MetricStatistics `json:"data"` +} + type MetricStatistics struct { Avg float64 `json:"avg"` Min float64 `json:"min"` diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 6980230..2fe5bc4 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -127,28 +127,17 @@ let job = $initq.data.job; if (!job) return; - const pendingMetrics = [ - ...( - ( - ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || - ccconfig[`job_view_selectedMetrics:${job.cluster}`] - ) || - $initq.data.globalMetrics - .reduce((names, gm) => { - if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { - names.push(gm.name); - } - return names; - }, []) - ), - ...( - ( - ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] || - ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] - ) || - ccconfig[`job_view_nodestats_selectedMetrics`] - ), - ]; + const pendingMetrics = ( + ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || + ccconfig[`job_view_selectedMetrics:${job.cluster}`] + ) || + $initq.data.globalMetrics + .reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { + names.push(gm.name); + } + return names; + }, []) // Select default Scopes to load: Check before if any metric has accelerator scope by default const accScopeDefault = [...pendingMetrics].some(function (m) { @@ -343,7 +332,6 @@ {#if item.data} statsTable.moreLoaded(detail)} job={$initq.data.job} metricName={item.metric} metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit} @@ -404,15 +392,7 @@ class="overflow-x-auto" active={!somethingMissing} > - {#if $jobMetrics?.data?.jobMetrics} - {#key $jobMetrics.data.jobMetrics} - - {/key} - {/if} +
diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index bcfa4fd..b68ef47 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -150,11 +150,6 @@ // On additional scope request if (selectedScope == "load-all") { - // Push scope to statsTable (Needs to be in this case, else newly selected 'Metric.svelte' renders cause statsTable race condition) - const statsTableData = $metricData.data.singleUpdate.filter((x) => x.scope !== "node") - if (statsTableData.length > 0) { - dispatch("more-loaded", statsTableData); - } // Set selected scope to min of returned scopes selectedScope = minScope(scopes) nodeOnly = (selectedScope == "node") // "node" still only scope after load-all diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index c8f12f2..159d24b 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -3,13 +3,14 @@ Properties: - `job Object`: The job object - - `jobMetrics [Object]`: The jobs metricdata - - Exported: - - `moreLoaded`: Adds additional scopes requested from Metric.svelte in Job-View --> diff --git a/web/frontend/src/job/StatsTableEntry.svelte b/web/frontend/src/job/StatsTableEntry.svelte index 9504a63..dc2f628 100644 --- a/web/frontend/src/job/StatsTableEntry.svelte +++ b/web/frontend/src/job/StatsTableEntry.svelte @@ -37,8 +37,8 @@ return s.dir != "up" ? a[field] - b[field] : b[field] - a[field]; } else { return s.dir != "up" - ? a.statistics[field] - b.statistics[field] - : b.statistics[field] - a.statistics[field]; + ? a.data[field] - b.data[field] + : b.data[field] - a.data[field]; } }); } @@ -52,7 +52,7 @@ $: series = jobMetrics .find((jm) => jm.name == metric && jm.scope == scope) - ?.metric.series.filter((s) => s.hostname == host && s.statistics != null) + ?.stats.filter((s) => s.hostname == host && s.data != null) ?.sort(compareNumbers); @@ -60,13 +60,13 @@ No data {:else if series.length == 1 && scope == "node"} - {series[0].statistics.min} + {series[0].data.min} - {series[0].statistics.avg} + {series[0].data.avg} - {series[0].statistics.max} + {series[0].data.max} {:else} @@ -86,9 +86,9 @@ {#each series as s, i} {s.id ?? i} - {s.statistics.min} - {s.statistics.avg} - {s.statistics.max} + {s.data.min} + {s.data.avg} + {s.data.max} {/each} diff --git a/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte b/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte index cf90408..fe6693b 100644 --- a/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte +++ b/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte @@ -40,14 +40,14 @@ const client = getContextClient(); const polarQuery = gql` query ($dbid: ID!, $selectedMetrics: [String!]!) { - jobMetricStats(id: $dbid, metrics: $selectedMetrics) { + jobStats(id: $dbid, metrics: $selectedMetrics) { name stats { - min - avg - max - } + min + avg + max } + } } `; @@ -66,7 +66,7 @@ {:else} {/if} \ No newline at end of file