diff --git a/api/schema.graphqls b/api/schema.graphqls index 69e32e2..aa6aea2 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -28,6 +28,11 @@ type Job { resources: [Resource!]! concurrentJobs: JobLinkResultList + memUsedMax: Float + flopsAnyAvg: Float + memBwAvg: Float + loadAvg: Float + metaData: Any userData: User } @@ -198,7 +203,7 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! + jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -286,6 +291,19 @@ type HistoPoint { value: Int! } +type MetricHistoPoints { + metric: String! + unit: String! + data: [MetricHistoPoint!] +} + +type MetricHistoPoint { + bin: Int + count: Int! + min: Int + max: Int +} + type JobsStatistics { id: ID! # If `groupBy` was used, ID of the user/project/cluster name: String! # if User-Statistics: Given Name of Account (ID) Owner @@ -303,6 +321,7 @@ type JobsStatistics { histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs + histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average } input PageRequest { diff --git a/go.mod b/go.mod index 7c5802e..e10bdfb 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/swaggo/http-swagger v1.3.3 github.com/swaggo/swag v1.16.2 github.com/vektah/gqlparser/v2 v2.5.10 - golang.org/x/crypto v0.15.0 + golang.org/x/crypto v0.16.0 golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea ) @@ -84,11 +84,11 @@ require ( github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.18.0 // indirect + golang.org/x/net v0.19.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.15.0 // indirect + golang.org/x/tools v0.16.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 444308c..f6d58dc 100644 --- a/go.sum +++ b/go.sum @@ -1302,8 +1302,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1425,8 +1425,8 @@ golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1585,9 +1585,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1694,8 +1693,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= -golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index f29e2a0..d84f043 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -25,6 +25,7 @@ import ( // NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface. func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { return &executableSchema{ + schema: cfg.Schema, resolvers: cfg.Resolvers, directives: cfg.Directives, complexity: cfg.Complexity, @@ -32,6 +33,7 @@ func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { } type Config struct { + Schema *ast.Schema Resolvers ResolverRoot Directives DirectiveRoot Complexity ComplexityRoot @@ -88,8 +90,12 @@ type ComplexityRoot struct { ConcurrentJobs func(childComplexity int) int Duration func(childComplexity int) int Exclusive func(childComplexity int) int + FlopsAnyAvg func(childComplexity int) int ID func(childComplexity int) int JobID func(childComplexity int) int + LoadAvg func(childComplexity int) int + MemBwAvg func(childComplexity int) int + MemUsedMax func(childComplexity int) int MetaData func(childComplexity int) int MonitoringStatus func(childComplexity int) int NumAcc func(childComplexity int) int @@ -141,6 +147,7 @@ type ComplexityRoot struct { JobsStatistics struct { HistDuration func(childComplexity int) int + HistMetrics func(childComplexity int) int HistNumAccs func(childComplexity int) int HistNumCores func(childComplexity int) int HistNumNodes func(childComplexity int) int @@ -176,6 +183,19 @@ type ComplexityRoot struct { Metric func(childComplexity int) int } + MetricHistoPoint struct { + Bin func(childComplexity int) int + Count func(childComplexity int) int + Max func(childComplexity int) int + Min func(childComplexity int) int + } + + MetricHistoPoints struct { + Data func(childComplexity int) int + Metric func(childComplexity int) int + Unit func(childComplexity int) int + } + MetricStatistics struct { Avg func(childComplexity int) int Max func(childComplexity int) int @@ -208,7 +228,7 @@ type ComplexityRoot struct { JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int - JobsStatistics func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int + JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int Tags func(childComplexity int) int @@ -303,6 +323,7 @@ type JobResolver interface { Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) + MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) } @@ -322,7 +343,7 @@ type QueryResolver interface { JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) - JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) + JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) } @@ -331,12 +352,16 @@ type SubClusterResolver interface { } type executableSchema struct { + schema *ast.Schema resolvers ResolverRoot directives DirectiveRoot complexity ComplexityRoot } func (e *executableSchema) Schema() *ast.Schema { + if e.schema != nil { + return e.schema + } return parsedSchema } @@ -485,6 +510,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Job.Exclusive(childComplexity), true + case "Job.flopsAnyAvg": + if e.complexity.Job.FlopsAnyAvg == nil { + break + } + + return e.complexity.Job.FlopsAnyAvg(childComplexity), true + case "Job.id": if e.complexity.Job.ID == nil { break @@ -499,6 +531,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Job.JobID(childComplexity), true + case "Job.loadAvg": + if e.complexity.Job.LoadAvg == nil { + break + } + + return e.complexity.Job.LoadAvg(childComplexity), true + + case "Job.memBwAvg": + if e.complexity.Job.MemBwAvg == nil { + break + } + + return e.complexity.Job.MemBwAvg(childComplexity), true + + case "Job.memUsedMax": + if e.complexity.Job.MemUsedMax == nil { + break + } + + return e.complexity.Job.MemUsedMax(childComplexity), true + case "Job.metaData": if e.complexity.Job.MetaData == nil { break @@ -730,6 +783,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.HistDuration(childComplexity), true + case "JobsStatistics.histMetrics": + if e.complexity.JobsStatistics.HistMetrics == nil { + break + } + + return e.complexity.JobsStatistics.HistMetrics(childComplexity), true + case "JobsStatistics.histNumAccs": if e.complexity.JobsStatistics.HistNumAccs == nil { break @@ -919,6 +979,55 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricFootprints.Metric(childComplexity), true + case "MetricHistoPoint.bin": + if e.complexity.MetricHistoPoint.Bin == nil { + break + } + + return e.complexity.MetricHistoPoint.Bin(childComplexity), true + + case "MetricHistoPoint.count": + if e.complexity.MetricHistoPoint.Count == nil { + break + } + + return e.complexity.MetricHistoPoint.Count(childComplexity), true + + case "MetricHistoPoint.max": + if e.complexity.MetricHistoPoint.Max == nil { + break + } + + return e.complexity.MetricHistoPoint.Max(childComplexity), true + + case "MetricHistoPoint.min": + if e.complexity.MetricHistoPoint.Min == nil { + break + } + + return e.complexity.MetricHistoPoint.Min(childComplexity), true + + case "MetricHistoPoints.data": + if e.complexity.MetricHistoPoints.Data == nil { + break + } + + return e.complexity.MetricHistoPoints.Data(childComplexity), true + + case "MetricHistoPoints.metric": + if e.complexity.MetricHistoPoints.Metric == nil { + break + } + + return e.complexity.MetricHistoPoints.Metric(childComplexity), true + + case "MetricHistoPoints.unit": + if e.complexity.MetricHistoPoints.Unit == nil { + break + } + + return e.complexity.MetricHistoPoints.Unit(childComplexity), true + case "MetricStatistics.avg": if e.complexity.MetricStatistics.Avg == nil { break @@ -1112,7 +1221,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true + return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true case "Query.nodeMetrics": if e.complexity.Query.NodeMetrics == nil { @@ -1587,14 +1696,14 @@ func (ec *executionContext) introspectSchema() (*introspection.Schema, error) { if ec.DisableIntrospection { return nil, errors.New("introspection disabled") } - return introspection.WrapSchema(parsedSchema), nil + return introspection.WrapSchema(ec.Schema()), nil } func (ec *executionContext) introspectType(name string) (*introspection.Type, error) { if ec.DisableIntrospection { return nil, errors.New("introspection disabled") } - return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil + return introspection.WrapTypeFromDef(ec.Schema(), ec.Schema().Types[name]), nil } var sources = []*ast.Source{ @@ -1628,6 +1737,11 @@ type Job { resources: [Resource!]! concurrentJobs: JobLinkResultList + memUsedMax: Float + flopsAnyAvg: Float + memBwAvg: Float + loadAvg: Float + metaData: Any userData: User } @@ -1798,7 +1912,7 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! + jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -1886,6 +2000,19 @@ type HistoPoint { value: Int! } +type MetricHistoPoints { + metric: String! + unit: String! + data: [MetricHistoPoint!] +} + +type MetricHistoPoint { + bin: Int + count: Int! + min: Int + max: Int +} + type JobsStatistics { id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster name: String! # if User-Statistics: Given Name of Account (ID) Owner @@ -1903,6 +2030,7 @@ type JobsStatistics { histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs + histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average } input PageRequest { @@ -2142,33 +2270,42 @@ func (ec *executionContext) field_Query_jobsStatistics_args(ctx context.Context, } } args["filter"] = arg0 - var arg1 *model.PageRequest + var arg1 []string + if tmp, ok := rawArgs["metrics"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + arg1, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + if err != nil { + return nil, err + } + } + args["metrics"] = arg1 + var arg2 *model.PageRequest if tmp, ok := rawArgs["page"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) - arg1, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) + arg2, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) if err != nil { return nil, err } } - args["page"] = arg1 - var arg2 *model.SortByAggregate + args["page"] = arg2 + var arg3 *model.SortByAggregate if tmp, ok := rawArgs["sortBy"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("sortBy")) - arg2, err = ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) + arg3, err = ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) if err != nil { return nil, err } } - args["sortBy"] = arg2 - var arg3 *model.Aggregate + args["sortBy"] = arg3 + var arg4 *model.Aggregate if tmp, ok := rawArgs["groupBy"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy")) - arg3, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) + arg4, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) if err != nil { return nil, err } } - args["groupBy"] = arg3 + args["groupBy"] = arg4 return args, nil } @@ -4054,6 +4191,170 @@ func (ec *executionContext) fieldContext_Job_concurrentJobs(ctx context.Context, return fc, nil } +func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_memUsedMax(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemUsedMax, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_memUsedMax(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_flopsAnyAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.FlopsAnyAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_memBwAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_memBwAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemBwAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_memBwAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_loadAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_loadAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.LoadAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_loadAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Job_metaData(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Job_metaData(ctx, field) if err != nil { @@ -4778,6 +5079,14 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context return ec.fieldContext_Job_resources(ctx, field) case "concurrentJobs": return ec.fieldContext_Job_concurrentJobs(ctx, field) + case "memUsedMax": + return ec.fieldContext_Job_memUsedMax(ctx, field) + case "flopsAnyAvg": + return ec.fieldContext_Job_flopsAnyAvg(ctx, field) + case "memBwAvg": + return ec.fieldContext_Job_memBwAvg(ctx, field) + case "loadAvg": + return ec.fieldContext_Job_loadAvg(ctx, field) case "metaData": return ec.fieldContext_Job_metaData(ctx, field) case "userData": @@ -5640,6 +5949,58 @@ func (ec *executionContext) fieldContext_JobsStatistics_histNumAccs(ctx context. return fc, nil } +func (ec *executionContext) _JobsStatistics_histMetrics(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_histMetrics(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HistMetrics, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.MetricHistoPoints) + fc.Result = res + return ec.marshalNMetricHistoPoints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_histMetrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "metric": + return ec.fieldContext_MetricHistoPoints_metric(ctx, field) + case "unit": + return ec.fieldContext_MetricHistoPoints_unit(ctx, field) + case "data": + return ec.fieldContext_MetricHistoPoints_data(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricHistoPoints", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _MetricConfig_name(ctx context.Context, field graphql.CollectedField, obj *schema.MetricConfig) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricConfig_name(ctx, field) if err != nil { @@ -6185,6 +6546,312 @@ func (ec *executionContext) fieldContext_MetricFootprints_data(ctx context.Conte return fc, nil } +func (ec *executionContext) _MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_bin(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Bin, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_count(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_count(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Count, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_min(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Min, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_max(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Max, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_metric(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_metric(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Metric, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_metric(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_unit(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_unit(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Unit, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_data(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_data(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*model.MetricHistoPoint) + fc.Result = res + return ec.marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_data(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "bin": + return ec.fieldContext_MetricHistoPoint_bin(ctx, field) + case "count": + return ec.fieldContext_MetricHistoPoint_count(ctx, field) + case "min": + return ec.fieldContext_MetricHistoPoint_min(ctx, field) + case "max": + return ec.fieldContext_MetricHistoPoint_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricHistoPoint", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _MetricStatistics_avg(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricStatistics_avg(ctx, field) if err != nil { @@ -7152,6 +7819,14 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr return ec.fieldContext_Job_resources(ctx, field) case "concurrentJobs": return ec.fieldContext_Job_concurrentJobs(ctx, field) + case "memUsedMax": + return ec.fieldContext_Job_memUsedMax(ctx, field) + case "flopsAnyAvg": + return ec.fieldContext_Job_flopsAnyAvg(ctx, field) + case "memBwAvg": + return ec.fieldContext_Job_memBwAvg(ctx, field) + case "loadAvg": + return ec.fieldContext_Job_loadAvg(ctx, field) case "metaData": return ec.fieldContext_Job_metaData(ctx, field) case "userData": @@ -7374,7 +8049,7 @@ func (ec *executionContext) _Query_jobsStatistics(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate)) + return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate)) }) if err != nil { ec.Error(ctx, err) @@ -7431,6 +8106,8 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex return ec.fieldContext_JobsStatistics_histNumCores(ctx, field) case "histNumAccs": return ec.fieldContext_JobsStatistics_histNumAccs(ctx, field) + case "histMetrics": + return ec.fieldContext_JobsStatistics_histMetrics(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type JobsStatistics", field.Name) }, @@ -12504,6 +13181,14 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "memUsedMax": + out.Values[i] = ec._Job_memUsedMax(ctx, field, obj) + case "flopsAnyAvg": + out.Values[i] = ec._Job_flopsAnyAvg(ctx, field, obj) + case "memBwAvg": + out.Values[i] = ec._Job_memBwAvg(ctx, field, obj) + case "loadAvg": + out.Values[i] = ec._Job_loadAvg(ctx, field, obj) case "metaData": field := field @@ -12910,6 +13595,11 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti if out.Values[i] == graphql.Null { out.Invalids++ } + case "histMetrics": + out.Values[i] = ec._JobsStatistics_histMetrics(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -13058,6 +13748,97 @@ func (ec *executionContext) _MetricFootprints(ctx context.Context, sel ast.Selec return out } +var metricHistoPointImplementors = []string{"MetricHistoPoint"} + +func (ec *executionContext) _MetricHistoPoint(ctx context.Context, sel ast.SelectionSet, obj *model.MetricHistoPoint) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, metricHistoPointImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("MetricHistoPoint") + case "bin": + out.Values[i] = ec._MetricHistoPoint_bin(ctx, field, obj) + case "count": + out.Values[i] = ec._MetricHistoPoint_count(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "min": + out.Values[i] = ec._MetricHistoPoint_min(ctx, field, obj) + case "max": + out.Values[i] = ec._MetricHistoPoint_max(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var metricHistoPointsImplementors = []string{"MetricHistoPoints"} + +func (ec *executionContext) _MetricHistoPoints(ctx context.Context, sel ast.SelectionSet, obj *model.MetricHistoPoints) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, metricHistoPointsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("MetricHistoPoints") + case "metric": + out.Values[i] = ec._MetricHistoPoints_metric(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "unit": + out.Values[i] = ec._MetricHistoPoints_unit(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "data": + out.Values[i] = ec._MetricHistoPoints_data(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var metricStatisticsImplementors = []string{"MetricStatistics"} func (ec *executionContext) _MetricStatistics(ctx context.Context, sel ast.SelectionSet, obj *schema.MetricStatistics) graphql.Marshaler { @@ -15310,6 +16091,70 @@ func (ec *executionContext) marshalNMetricFootprints2ᚖgithubᚗcomᚋClusterCo return ec._MetricFootprints(ctx, sel, v) } +func (ec *executionContext) marshalNMetricHistoPoint2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoint(ctx context.Context, sel ast.SelectionSet, v *model.MetricHistoPoint) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._MetricHistoPoint(ctx, sel, v) +} + +func (ec *executionContext) marshalNMetricHistoPoints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.MetricHistoPoints) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNMetricHistoPoints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoints(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNMetricHistoPoints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoints(ctx context.Context, sel ast.SelectionSet, v *model.MetricHistoPoints) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._MetricHistoPoints(ctx, sel, v) +} + func (ec *executionContext) unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx context.Context, v interface{}) (schema.MetricScope, error) { var res schema.MetricScope err := res.UnmarshalGQL(v) @@ -16365,6 +17210,53 @@ func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋ return ret } +func (ec *executionContext) marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.MetricHistoPoint) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNMetricHistoPoint2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoint(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx context.Context, v interface{}) ([]schema.MetricScope, error) { if v == nil { return nil, nil diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 050784b..7b8ebd2 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -85,22 +85,23 @@ type JobResultList struct { } type JobsStatistics struct { - ID string `json:"id"` - Name string `json:"name"` - TotalJobs int `json:"totalJobs"` - RunningJobs int `json:"runningJobs"` - ShortJobs int `json:"shortJobs"` - TotalWalltime int `json:"totalWalltime"` - TotalNodes int `json:"totalNodes"` - TotalNodeHours int `json:"totalNodeHours"` - TotalCores int `json:"totalCores"` - TotalCoreHours int `json:"totalCoreHours"` - TotalAccs int `json:"totalAccs"` - TotalAccHours int `json:"totalAccHours"` - HistDuration []*HistoPoint `json:"histDuration"` - HistNumNodes []*HistoPoint `json:"histNumNodes"` - HistNumCores []*HistoPoint `json:"histNumCores"` - HistNumAccs []*HistoPoint `json:"histNumAccs"` + ID string `json:"id"` + Name string `json:"name"` + TotalJobs int `json:"totalJobs"` + RunningJobs int `json:"runningJobs"` + ShortJobs int `json:"shortJobs"` + TotalWalltime int `json:"totalWalltime"` + TotalNodes int `json:"totalNodes"` + TotalNodeHours int `json:"totalNodeHours"` + TotalCores int `json:"totalCores"` + TotalCoreHours int `json:"totalCoreHours"` + TotalAccs int `json:"totalAccs"` + TotalAccHours int `json:"totalAccHours"` + HistDuration []*HistoPoint `json:"histDuration"` + HistNumNodes []*HistoPoint `json:"histNumNodes"` + HistNumCores []*HistoPoint `json:"histNumCores"` + HistNumAccs []*HistoPoint `json:"histNumAccs"` + HistMetrics []*MetricHistoPoints `json:"histMetrics"` } type MetricFootprints struct { @@ -108,6 +109,19 @@ type MetricFootprints struct { Data []schema.Float `json:"data"` } +type MetricHistoPoint struct { + Bin *int `json:"bin,omitempty"` + Count int `json:"count"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` +} + +type MetricHistoPoints struct { + Metric string `json:"metric"` + Unit string `json:"unit"` + Data []*MetricHistoPoint `json:"data,omitempty"` +} + type NodeMetrics struct { Host string `json:"host"` SubCluster string `json:"subCluster"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 9e5e111..82bf026 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -2,7 +2,7 @@ package graph // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. -// Code generated by github.com/99designs/gqlgen version v0.17.36 +// Code generated by github.com/99designs/gqlgen version v0.17.40 import ( "context" @@ -244,7 +244,7 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag } // JobsStatistics is the resolver for the jobsStatistics field. -func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { +func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { var err error var stats []*model.JobsStatistics @@ -291,6 +291,17 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF } } + if requireField(ctx, "histMetrics") { + if groupBy == nil { + stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0]) + if err != nil { + return nil, err + } + } else { + return nil, errors.New("metric histograms only implemented without groupBy argument") + } + } + return stats, nil } diff --git a/internal/repository/job.go b/internal/repository/job.go index 76834d1..e1a997a 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -60,7 +60,7 @@ func GetJobRepository() *JobRepository { var jobColumns []string = []string{ "job.id", "job.job_id", "job.user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.partition", "job.array_job_id", "job.num_nodes", "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state", - "job.duration", "job.walltime", "job.resources", // "job.meta_data", + "job.duration", "job.walltime", "job.resources", "job.mem_used_max", "job.flops_any_avg", "job.mem_bw_avg", "job.load_avg", // "job.meta_data", } func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) { @@ -68,7 +68,7 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) { if err := row.Scan( &job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State, - &job.Duration, &job.Walltime, &job.RawResources /*&job.RawMetaData*/); err != nil { + &job.Duration, &job.Walltime, &job.RawResources, &job.MemUsedMax, &job.FlopsAnyAvg, &job.MemBwAvg, &job.LoadAvg /*&job.RawMetaData*/); err != nil { log.Warnf("Error while scanning rows (Job): %v", err) return nil, err } @@ -483,6 +483,7 @@ func (r *JobRepository) MarkArchived( case "mem_bw": stmt = stmt.Set("mem_bw_avg", stats.Avg) case "load": + stmt = stmt.Set("load_avg", stats.Avg) case "cpu_load": stmt = stmt.Set("load_avg", stats.Avg) case "net_bw": diff --git a/internal/repository/query.go b/internal/repository/query.go index 84b8048..317302b 100644 --- a/internal/repository/query.go +++ b/internal/repository/query.go @@ -96,7 +96,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde user := GetUserFromContext(ctx) if user == nil { var qnil sq.SelectBuilder - return qnil, fmt.Errorf("user context is nil!") + return qnil, fmt.Errorf("user context is nil") } else if user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport, schema.RoleApi}) { // Admin & Co. : All jobs return query, nil } else if user.HasRole(schema.RoleManager) { // Manager : Add filter for managed projects' jobs only + personal jobs diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 8084553..4d7be08 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -8,11 +8,15 @@ import ( "context" "database/sql" "fmt" + "math" "time" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" + "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" sq "github.com/Masterminds/squirrel" ) @@ -450,6 +454,39 @@ func (r *JobRepository) AddHistograms( return stat, nil } +// Requires thresholds for metric from config for cluster? Of all clusters and use largest? split to 10 + 1 for artifacts? +func (r *JobRepository) AddMetricHistograms( + ctx context.Context, + filter []*model.JobFilter, + metrics []string, + stat *model.JobsStatistics) (*model.JobsStatistics, error) { + start := time.Now() + + // Running Jobs Only: First query jobdata from sqlite, then query data and make bins + for _, f := range filter { + if f.State != nil { + if len(f.State) == 1 && f.State[0] == "running" { + stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter) + log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + return stat, nil + } + } + } + + // All other cases: Query and make bins in sqlite directly + for _, m := range metrics { + metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter) + if err != nil { + log.Warnf("Error while loading job metric statistics histogram: %s", m) + continue + } + stat.HistMetrics = append(stat.HistMetrics, metricHisto) + } + + log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + return stat, nil +} + // `value` must be the column grouped by, but renamed to "value" func (r *JobRepository) jobsStatisticsHistogram( ctx context.Context, @@ -487,3 +524,231 @@ func (r *JobRepository) jobsStatisticsHistogram( log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) return points, nil } + +func (r *JobRepository) jobsMetricStatisticsHistogram( + ctx context.Context, + metric string, + filters []*model.JobFilter) (*model.MetricHistoPoints, error) { + + var dbMetric string + switch metric { + case "cpu_load": + dbMetric = "load_avg" + case "flops_any": + dbMetric = "flops_any_avg" + case "mem_bw": + dbMetric = "mem_bw_avg" + case "mem_used": + dbMetric = "mem_used_max" + case "net_bw": + dbMetric = "net_bw_avg" + case "file_bw": + dbMetric = "file_bw_avg" + default: + return nil, fmt.Errorf("%s not implemented", metric) + } + + // Get specific Peak or largest Peak + var metricConfig *schema.MetricConfig + var peak float64 = 0.0 + var unit string = "" + + for _, f := range filters { + if f.Cluster != nil { + metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) + peak = metricConfig.Peak + unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base + log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) + } + } + + if peak == 0.0 { + for _, c := range archive.Clusters { + for _, m := range c.MetricConfig { + if m.Name == metric { + if m.Peak > peak { + peak = m.Peak + } + if unit == "" { + unit = m.Unit.Prefix + m.Unit.Base + } + } + } + } + } + + // log.Debugf("Metric %s: DB %s, Peak %f, Unit %s", metric, dbMetric, peak, unit) + // Make bins, see https://jereze.com/code/sql-histogram/ + + start := time.Now() + + crossJoinQuery := sq.Select( + fmt.Sprintf(`max(%s) as max`, dbMetric), + fmt.Sprintf(`min(%s) as min`, dbMetric), + ).From("job").Where( + fmt.Sprintf(`%s is not null`, dbMetric), + ).Where( + fmt.Sprintf(`%s <= %f`, dbMetric, peak), + ) + + crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery) + + if cjqerr != nil { + return nil, cjqerr + } + + for _, f := range filters { + crossJoinQuery = BuildWhereClause(f, crossJoinQuery) + } + + crossJoinQuerySql, crossJoinQueryArgs, sqlerr := crossJoinQuery.ToSql() + if sqlerr != nil { + return nil, sqlerr + } + + bins := 10 + binQuery := fmt.Sprintf(`CAST( (case when job.%s = value.max then value.max*0.999999999 else job.%s end - value.min) / (value.max - value.min) * %d as INTEGER )`, dbMetric, dbMetric, bins) + + mainQuery := sq.Select( + fmt.Sprintf(`%s + 1 as bin`, binQuery), + fmt.Sprintf(`count(job.%s) as count`, dbMetric), + fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery), + fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery), + ).From("job").CrossJoin( + fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., + ).Where(fmt.Sprintf(`job.%s is not null and job.%s <= %f`, dbMetric, dbMetric, peak)) + + mainQuery, qerr := SecurityCheck(ctx, mainQuery) + + if qerr != nil { + return nil, qerr + } + + for _, f := range filters { + mainQuery = BuildWhereClause(f, mainQuery) + } + + // Finalize query with Grouping and Ordering + mainQuery = mainQuery.GroupBy("bin").OrderBy("bin") + + rows, err := mainQuery.RunWith(r.DB).Query() + if err != nil { + log.Errorf("Error while running mainQuery: %s", err) + return nil, err + } + + points := make([]*model.MetricHistoPoint, 0) + for rows.Next() { + point := model.MetricHistoPoint{} + if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil { + log.Warnf("Error while scanning rows for %s", metric) + return nil, err // Totally bricks cc-backend if returned and if all metrics requested? + } + + points = append(points, &point) + } + + result := model.MetricHistoPoints{Metric: metric, Unit: unit, Data: points} + + log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) + return &result, nil +} + +func (r *JobRepository) runningJobsMetricStatisticsHistogram( + ctx context.Context, + metrics []string, + filters []*model.JobFilter) []*model.MetricHistoPoints { + + // Get Jobs + jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil) + if err != nil { + log.Errorf("Error while querying jobs for footprint: %s", err) + return nil + } + if len(jobs) > 500 { + log.Errorf("too many jobs matched (max: %d)", 500) + return nil + } + + // Get AVGs from metric repo + avgs := make([][]schema.Float, len(metrics)) + for i := range avgs { + avgs[i] = make([]schema.Float, 0, len(jobs)) + } + + for _, job := range jobs { + if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed { + continue + } + + if err := metricdata.LoadAverages(job, metrics, avgs, ctx); err != nil { + log.Errorf("Error while loading averages for histogram: %s", err) + return nil + } + } + + // Iterate metrics to fill endresult + data := make([]*model.MetricHistoPoints, 0) + for idx, metric := range metrics { + // Get specific Peak or largest Peak + var metricConfig *schema.MetricConfig + var peak float64 = 0.0 + var unit string = "" + + for _, f := range filters { + if f.Cluster != nil { + metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) + peak = metricConfig.Peak + unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base + log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) + } + } + + if peak == 0.0 { + for _, c := range archive.Clusters { + for _, m := range c.MetricConfig { + if m.Name == metric { + if m.Peak > peak { + peak = m.Peak + } + if unit == "" { + unit = m.Unit.Prefix + m.Unit.Base + } + } + } + } + } + + // Make and fill bins + bins := 10.0 + peakBin := peak / bins + + points := make([]*model.MetricHistoPoint, 0) + for b := 0; b < 10; b++ { + count := 0 + bindex := b + 1 + bmin := math.Round(peakBin * float64(b)) + bmax := math.Round(peakBin * (float64(b) + 1.0)) + + // Iterate AVG values for indexed metric and count for bins + for _, val := range avgs[idx] { + if float64(val) >= bmin && float64(val) < bmax { + count += 1 + } + } + + bminint := int(bmin) + bmaxint := int(bmax) + + // Append Bin to Metric Result Array + point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bminint, Max: &bmaxint} + points = append(points, &point) + } + + // Append Metric Result Array to final results array + result := model.MetricHistoPoints{Metric: metric, Unit: unit, Data: points} + data = append(data, &result) + } + + return data +} diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index 0b1c43b..b1bad0a 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -8,8 +8,8 @@ import ( "errors" "fmt" - "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" ) var Clusters []*schema.Cluster diff --git a/pkg/schema/job.go b/pkg/schema/job.go index ed3a8b6..90bf2cb 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -54,10 +54,10 @@ type Job struct { BaseJob StartTimeUnix int64 `json:"-" db:"start_time" example:"1649723812"` // Start epoch time stamp in seconds StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type - MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64 - FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64 - MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64 - LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64 + MemUsedMax float64 `json:"memUsedMax" db:"mem_used_max"` // MemUsedMax as Float64 + FlopsAnyAvg float64 `json:"flopsAnyAvg" db:"flops_any_avg"` // FlopsAnyAvg as Float64 + MemBwAvg float64 `json:"memBwAvg" db:"mem_bw_avg"` // MemBwAvg as Float64 + LoadAvg float64 `json:"loadAvg" db:"load_avg"` // LoadAvg as Float64 NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64 NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64 FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64 diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index 1ca3349..8874caf 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -12,7 +12,10 @@ "@rollup/plugin-replace": "^5.0.2", "@urql/svelte": "^4.0.1", "chart.js": "^4.3.3", + "date-fns": "^2.30.0", + "date-fns": "^2.30.0", "graphql": "^16.6.0", + "mathjs": "^12.0.0", "svelte-chartjs": "^3.1.2", "sveltestrap": "^5.11.1", "uplot": "^1.6.24", @@ -42,6 +45,17 @@ } } }, + "node_modules/@babel/runtime": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.5.tgz", + "integrity": "sha512-NdUTHcPe4C99WxPub+K9l9tK5/lV4UXIoaHSYgzco9BCyjKAAwzdBI+wWtYqHt7LJdbo74ZjRPJgzVweq1sz0w==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", @@ -90,9 +104,12 @@ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.19", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", - "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", + "version": "0.3.20", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz", + "integrity": "sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==", + "version": "0.3.20", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz", + "integrity": "sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==", "dev": true, "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", @@ -139,9 +156,12 @@ } }, "node_modules/@rollup/plugin-node-resolve": { - "version": "15.2.1", - "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.1.tgz", - "integrity": "sha512-nsbUg588+GDSu8/NS8T4UAshO6xeaOfINNuXeVHcKV02LJtoRaM1SiOacClw4kws1SFiNhdLGxlbMY9ga/zs/w==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.3.tgz", + "integrity": "sha512-j/lym8nf5E21LwBT4Df1VD6hRO2L2iwUeUmP7litikRsVp1H6NWx20NEp0Y7su+7XGc476GnXXc4kFeZNGmaSQ==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.3.tgz", + "integrity": "sha512-j/lym8nf5E21LwBT4Df1VD6hRO2L2iwUeUmP7litikRsVp1H6NWx20NEp0Y7su+7XGc476GnXXc4kFeZNGmaSQ==", "dev": true, "dependencies": { "@rollup/pluginutils": "^5.0.1", @@ -155,7 +175,8 @@ "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^2.78.0||^3.0.0" + "rollup": "^2.78.0||^3.0.0||^4.0.0" + "rollup": "^2.78.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -164,18 +185,23 @@ } }, "node_modules/@rollup/plugin-replace": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.2.tgz", - "integrity": "sha512-M9YXNekv/C/iHHK+cvORzfRYfPbq0RDD8r0G+bMiTXjNGKulPnCT9O3Ss46WfhI6ZOCgApOP7xAdmCQJ+U2LAA==", + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.5.tgz", + "integrity": "sha512-rYO4fOi8lMaTg/z5Jb+hKnrHHVn8j2lwkqwyS4kTRhKyWOLf2wST2sWXr4WzWiTcoHTp2sTjqUbqIj2E39slKQ==", + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.5.tgz", + "integrity": "sha512-rYO4fOi8lMaTg/z5Jb+hKnrHHVn8j2lwkqwyS4kTRhKyWOLf2wST2sWXr4WzWiTcoHTp2sTjqUbqIj2E39slKQ==", "dependencies": { "@rollup/pluginutils": "^5.0.1", - "magic-string": "^0.27.0" + "magic-string": "^0.30.3" + "magic-string": "^0.30.3" }, "engines": { "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0" + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -183,10 +209,35 @@ } } }, + "node_modules/@rollup/plugin-replace/node_modules/magic-string": { + "version": "0.30.5", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.5.tgz", + "integrity": "sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@rollup/plugin-replace/node_modules/magic-string": { + "version": "0.30.5", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.5.tgz", + "integrity": "sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/@rollup/plugin-terser": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/@rollup/plugin-terser/-/plugin-terser-0.4.3.tgz", - "integrity": "sha512-EF0oejTMtkyhrkwCdg0HJ0IpkcaVg1MMSf2olHb2Jp+1mnLM04OhjpJWGma4HobiDTF0WCyViWuvadyE9ch2XA==", + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/@rollup/plugin-terser/-/plugin-terser-0.4.4.tgz", + "integrity": "sha512-XHeJC5Bgvs8LfukDwWZp7yeqin6ns8RTl2B9avbejt6tZqsqvVoWI7ZTQrcNsfKEDWBTnTxM8nMDkO2IFFbd0A==", + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/@rollup/plugin-terser/-/plugin-terser-0.4.4.tgz", + "integrity": "sha512-XHeJC5Bgvs8LfukDwWZp7yeqin6ns8RTl2B9avbejt6tZqsqvVoWI7ZTQrcNsfKEDWBTnTxM8nMDkO2IFFbd0A==", "dev": true, "dependencies": { "serialize-javascript": "^6.0.1", @@ -197,7 +248,8 @@ "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^2.x || ^3.x" + "rollup": "^2.0.0||^3.0.0||^4.0.0" + "rollup": "^2.0.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -206,9 +258,9 @@ } }, "node_modules/@rollup/pluginutils": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.0.3.tgz", - "integrity": "sha512-hfllNN4a80rwNQ9QCxhxuHCGHMAvabXqxNdaChUSSadMre7t4iEUI6fFAhBOn/eIYTgYVhBv7vCLsAJ4u3lf3g==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.0.tgz", + "integrity": "sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==", "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", @@ -218,7 +270,8 @@ "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0" + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -227,15 +280,18 @@ } }, "node_modules/@timohausmann/quadtree-js": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/@timohausmann/quadtree-js/-/quadtree-js-1.2.5.tgz", - "integrity": "sha512-WcH3pouYtpyLjTCRvNP0WuSV4m7mRyYhLzW44egveFryT7pJhpDsdIJASEe37iCFNA0vmEpqTYGoG0siyXEthA==", + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@timohausmann/quadtree-js/-/quadtree-js-1.2.6.tgz", + "integrity": "sha512-EoAoLMFV2JfSG8+8XD9xWJQdyvfEB5xNpiQWGD7rTDSbDQQV8IVpkm0uOIxwJZ+1uC9hHKri9GmJ5wBSUO4jfg==", "dev": true }, "node_modules/@types/estree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", - "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" }, "node_modules/@types/resolve": { "version": "1.20.2", @@ -244,9 +300,9 @@ "dev": true }, "node_modules/@urql/core": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/@urql/core/-/core-4.1.1.tgz", - "integrity": "sha512-iIoAy6BY+BUZZ7KIpnMT7C9q+ULf5ZCVxGe3/i7WZSJBrQa2h1QkIMhL+8fAKmOn9gt83jSIv5drWWnhZ9izEA==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@urql/core/-/core-4.2.0.tgz", + "integrity": "sha512-GRkZ4kECR9UohWAjiSk2UYUetco6/PqSrvyC4AH6g16tyqEShA63M232cfbE1J9XJPaGNjia14Gi+oOqzp144w==", "dependencies": { "@0no-co/graphql.web": "^1.0.1", "wonka": "^6.3.2" @@ -265,9 +321,12 @@ } }, "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "version": "8.11.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz", + "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==", + "version": "8.11.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz", + "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -310,9 +369,9 @@ } }, "node_modules/chart.js": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.3.3.tgz", - "integrity": "sha512-aTk7pBw+x6sQYhon/NR3ikfUJuym/LdgpTlgZRe2PaEhjUMKBKyNaFCMVRAyTEWYFNO7qRu7iQVqOw/OqzxZxQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.1.tgz", + "integrity": "sha512-C74QN1bxwV1v2PEujhmKjOZ7iUM4w6BWs23Md/6aOZZSlwMzeCIDGuZay++rBgChYru7/+QFeoQW0fQoP534Dg==", "dependencies": { "@kurkle/color": "^0.3.0" }, @@ -332,6 +391,38 @@ "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", "dev": true }, + "node_modules/complex.js": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/complex.js/-/complex.js-2.1.1.tgz", + "integrity": "sha512-8njCHOTtFFLtegk6zQo0kkVX1rngygb/KQI6z1qZxlFI3scluC+LVTCFbrkWjBv4vvLlbQ9t88IPMC6k95VTTg==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://www.patreon.com/infusion" + } + }, + "node_modules/date-fns": { + "version": "2.30.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz", + "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==", + "dependencies": { + "@babel/runtime": "^7.21.0" + }, + "engines": { + "node": ">=0.11" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/date-fns" + } + }, + "node_modules/decimal.js": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz", + "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==" + }, "node_modules/deepmerge": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", @@ -341,11 +432,28 @@ "node": ">=0.10.0" } }, + "node_modules/escape-latex": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/escape-latex/-/escape-latex-1.2.0.tgz", + "integrity": "sha512-nV5aVWW1K0wEiUIEdZ4erkGGH8mDxGyxSeqPzRNtWP7ataw+/olFObw7hujFWlVjNsaDFw5VZ5NzVSIqRgfTiw==" + }, "node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==" }, + "node_modules/fraction.js": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.4.tgz", + "integrity": "sha512-pwiTgt0Q7t+GHZA4yaLjObx4vXmmdcS0iSJ19o8d/goUGgItX9UZWKWNnLHehxviD8wU2IWRsnR8cD5+yOJP2Q==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -367,10 +475,20 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/glob": { "version": "8.1.0", @@ -392,23 +510,32 @@ } }, "node_modules/graphql": { - "version": "16.8.0", - "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.0.tgz", - "integrity": "sha512-0oKGaR+y3qcS5mCu1vb7KG+a89vjn06C7Ihq/dDl3jA+A8B3TKomvi3CiEcVLJQGalbu8F52LxkOym7U5sSfbg==", + "version": "16.8.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.1.tgz", + "integrity": "sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==", + "version": "16.8.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.1.tgz", + "integrity": "sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==", "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "node_modules/hasown": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", + "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", + "node_modules/hasown": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", + "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", "dev": true, "dependencies": { - "function-bind": "^1.1.1" + "function-bind": "^1.1.2" + "function-bind": "^1.1.2" }, "engines": { - "node": ">= 0.4.0" + "node": ">= 0.4" + "node": ">= 0.4" } }, "node_modules/inflight": { @@ -443,12 +570,16 @@ } }, "node_modules/is-core-module": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", - "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", "dev": true, "dependencies": { - "has": "^1.0.3" + "hasown": "^2.0.0" + "hasown": "^2.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -469,10 +600,17 @@ "@types/estree": "*" } }, + "node_modules/javascript-natural-sort": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/javascript-natural-sort/-/javascript-natural-sort-0.7.1.tgz", + "integrity": "sha512-nO6jcEfZWQXDhOiBtG2KvKyEptz7RVbpGP4vTD2hLBdmNQSsCiicO2Ioinv6UI4y9ukqnBpy+XZ9H6uLNgJTlw==" + }, "node_modules/magic-string": { "version": "0.27.0", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.27.0.tgz", "integrity": "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==", + "dev": true, + "dev": true, "dependencies": { "@jridgewell/sourcemap-codec": "^1.4.13" }, @@ -480,6 +618,28 @@ "node": ">=12" } }, + "node_modules/mathjs": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/mathjs/-/mathjs-12.0.0.tgz", + "integrity": "sha512-Oz3swPplNPe7taoP6WrkKhQzhDE2SwvOgLzu8H3EN+hEadw2GjEJUm6Xl+hrioHoB8g2BYb3gfw1glSzhdBKYw==", + "dependencies": { + "@babel/runtime": "^7.23.2", + "complex.js": "^2.1.1", + "decimal.js": "^10.4.3", + "escape-latex": "^1.2.0", + "fraction.js": "4.3.4", + "javascript-natural-sort": "^0.7.1", + "seedrandom": "^3.0.5", + "tiny-emitter": "^2.1.0", + "typed-function": "^4.1.1" + }, + "bin": { + "mathjs": "bin/cli.js" + }, + "engines": { + "node": ">= 18" + } + }, "node_modules/minimatch": { "version": "5.1.6", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", @@ -527,10 +687,23 @@ "safe-buffer": "^5.1.0" } }, + "node_modules/regenerator-runtime": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", + "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" + }, + "node_modules/regenerator-runtime": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", + "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" + }, "node_modules/resolve": { - "version": "1.22.4", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.4.tgz", - "integrity": "sha512-PXNdCiPqDqeUou+w1C2eTQbNfxKSuMxqTCuvlmmMsk1NWHL5fRrhY6Pl0qEYYc6+QqGClco1Qj8XnjPego4wfg==", + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", "dev": true, "dependencies": { "is-core-module": "^2.13.0", @@ -554,9 +727,12 @@ } }, "node_modules/rollup": { - "version": "3.28.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.28.1.tgz", - "integrity": "sha512-R9OMQmIHJm9znrU3m3cpE8uhN0fGdXiawME7aZIpQqvpS/85+Vt1Hq1/yVIcYfOmaQiHjvXkQAoJukvLpau6Yw==", + "version": "3.29.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz", + "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==", + "version": "3.29.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz", + "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==", "devOptional": true, "bin": { "rollup": "dist/bin/rollup" @@ -570,9 +746,12 @@ } }, "node_modules/rollup-plugin-css-only": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/rollup-plugin-css-only/-/rollup-plugin-css-only-4.3.0.tgz", - "integrity": "sha512-BsiCqJJQzZh2lQiHY5irejRoJ3I1EUFHEi5PjVqsr+EmOh54YrWVwd3YZEXnQJ2+fzlhif0YM/Kf0GuH90GAdQ==", + "version": "4.5.2", + "resolved": "https://registry.npmjs.org/rollup-plugin-css-only/-/rollup-plugin-css-only-4.5.2.tgz", + "integrity": "sha512-7rj9+jB17Pz8LNcPgtMUb16JcgD8lxQMK9HcGfAVhMK3na/WXes3oGIo5QsrQQVqtgAU6q6KnQNXJrYunaUIQQ==", + "version": "4.5.2", + "resolved": "https://registry.npmjs.org/rollup-plugin-css-only/-/rollup-plugin-css-only-4.5.2.tgz", + "integrity": "sha512-7rj9+jB17Pz8LNcPgtMUb16JcgD8lxQMK9HcGfAVhMK3na/WXes3oGIo5QsrQQVqtgAU6q6KnQNXJrYunaUIQQ==", "dev": true, "dependencies": { "@rollup/pluginutils": "5" @@ -581,7 +760,8 @@ "node": ">=14" }, "peerDependencies": { - "rollup": "<4" + "rollup": "<5" + "rollup": "<5" } }, "node_modules/rollup-plugin-svelte": { @@ -634,6 +814,11 @@ } ] }, + "node_modules/seedrandom": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-3.0.5.tgz", + "integrity": "sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg==" + }, "node_modules/serialize-javascript": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz", @@ -644,9 +829,12 @@ } }, "node_modules/smob": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/smob/-/smob-1.4.0.tgz", - "integrity": "sha512-MqR3fVulhjWuRNSMydnTlweu38UhQ0HXM4buStD/S3mc/BzX3CuM9OmhyQpmtYCvoYdl5ris6TI0ZqH355Ymqg==", + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/smob/-/smob-1.4.1.tgz", + "integrity": "sha512-9LK+E7Hv5R9u4g4C3p+jjLstaLe11MDsL21UpYaCNmapvMkYhqCV4A/f/3gyH8QjMyh6l68q9xC85vihY9ahMQ==", + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/smob/-/smob-1.4.1.tgz", + "integrity": "sha512-9LK+E7Hv5R9u4g4C3p+jjLstaLe11MDsL21UpYaCNmapvMkYhqCV4A/f/3gyH8QjMyh6l68q9xC85vihY9ahMQ==", "dev": true }, "node_modules/source-map": { @@ -698,9 +886,12 @@ } }, "node_modules/sveltestrap": { - "version": "5.11.1", - "resolved": "https://registry.npmjs.org/sveltestrap/-/sveltestrap-5.11.1.tgz", - "integrity": "sha512-FIvPIEU1VolqMN1wi2XrC8aehWVbIJEST7zPfPbOUUfPimyx9giN4nA3We5wkXrBUaifXA8CSIwuHFvf3CmYQw==", + "version": "5.11.2", + "resolved": "https://registry.npmjs.org/sveltestrap/-/sveltestrap-5.11.2.tgz", + "integrity": "sha512-fkLqIUh2QHBoom7v6kHI85grLeOqplmvtnTiA5Ck2gchzpVmwXWaWpf8qWhCFxfDuMhJBPlWbJvtSmwpDEowrg==", + "version": "5.11.2", + "resolved": "https://registry.npmjs.org/sveltestrap/-/sveltestrap-5.11.2.tgz", + "integrity": "sha512-fkLqIUh2QHBoom7v6kHI85grLeOqplmvtnTiA5Ck2gchzpVmwXWaWpf8qWhCFxfDuMhJBPlWbJvtSmwpDEowrg==", "dependencies": { "@popperjs/core": "^2.11.8" }, @@ -709,9 +900,9 @@ } }, "node_modules/terser": { - "version": "5.19.2", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.19.2.tgz", - "integrity": "sha512-qC5+dmecKJA4cpYxRa5aVkKehYsQKc+AHeKl0Oe62aYjBL8ZA33tTljktDHJSaxxMnbI5ZYw+o/S2DxxLu8OfA==", + "version": "5.25.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.25.0.tgz", + "integrity": "sha512-we0I9SIsfvNUMP77zC9HG+MylwYYsGFSBG8qm+13oud2Yh+O104y614FRbyjpxys16jZwot72Fpi827YvGzuqg==", "dev": true, "dependencies": { "@jridgewell/source-map": "^0.3.3", @@ -726,10 +917,26 @@ "node": ">=10" } }, + "node_modules/tiny-emitter": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", + "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==" + }, + "node_modules/typed-function": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/typed-function/-/typed-function-4.1.1.tgz", + "integrity": "sha512-Pq1DVubcvibmm8bYcMowjVnnMwPVMeh0DIdA8ad8NZY2sJgapANJmiigSUwlt+EgXxpfIv8MWrQXTIzkfYZLYQ==", + "engines": { + "node": ">= 14" + } + }, "node_modules/uplot": { - "version": "1.6.25", - "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.25.tgz", - "integrity": "sha512-eWLAhEaGtIcVBiS67mC2UC0yV+G6eYLS2rU67N4F2JVWjt7uBMg4xKXUYGW0dEz9G+m7fNatjCVXHts4gjyuMQ==" + "version": "1.6.27", + "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.27.tgz", + "integrity": "sha512-78U4ss5YeU65kQkOC/QAKiyII+4uo+TYUJJKvuxRzeSpk/s5sjpY1TL0agkmhHBBShpvLtmbHIEiM7+C5lBULg==" + "version": "1.6.27", + "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.27.tgz", + "integrity": "sha512-78U4ss5YeU65kQkOC/QAKiyII+4uo+TYUJJKvuxRzeSpk/s5sjpY1TL0agkmhHBBShpvLtmbHIEiM7+C5lBULg==" }, "node_modules/wonka": { "version": "6.3.4", diff --git a/web/frontend/package.json b/web/frontend/package.json index 3e77474..cc84e1a 100644 --- a/web/frontend/package.json +++ b/web/frontend/package.json @@ -20,7 +20,9 @@ "@rollup/plugin-replace": "^5.0.2", "@urql/svelte": "^4.0.1", "chart.js": "^4.3.3", + "date-fns": "^2.30.0", "graphql": "^16.6.0", + "mathjs": "^12.0.0", "svelte-chartjs": "^3.1.2", "sveltestrap": "^5.11.1", "uplot": "^1.6.24", diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index aa4ae37..163d511 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -389,9 +389,10 @@ + import { Modal, ModalBody, ModalHeader, ModalFooter, + Button, ListGroup, ListGroupItem } from 'sveltestrap' + import { gql, getContextClient , mutationStore } from '@urql/svelte' + + export let cluster + export let metricsInHistograms + export let isOpen + + let availableMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] // 'net_bw', 'file_bw' + let pendingMetrics = [...metricsInHistograms] // Copy + const client = getContextClient() + + const updateConfigurationMutation = ({ name, value }) => { + return mutationStore({ + client: client, + query: gql`mutation($name: String!, $value: String!) { + updateConfiguration(name: $name, value: $value) + }`, + variables: { name, value } + }) + } + + function updateConfiguration(data) { + updateConfigurationMutation({ + name: data.name, + value: JSON.stringify(data.value) + }).subscribe(res => { + if (res.fetching === false && res.error) { + throw res.error + // console.log('Error on subscription: ' + res.error) + } + }) + } + + function closeAndApply() { + metricsInHistograms = [...pendingMetrics] // Set for parent + isOpen = !isOpen + updateConfiguration({ + name: cluster ? `user_view_histogramMetrics:${cluster}` : 'user_view_histogramMetrics', + value: metricsInHistograms + }) + } + + + (isOpen = !isOpen)}> + + Select metrics presented in histograms + + + + {#each availableMetrics as metric (metric)} + + + {metric} + + {/each} + + + + + + + diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 93c5873..42cfee8 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -27,6 +27,7 @@ import TagManagement from "./TagManagement.svelte"; import MetricSelection from "./MetricSelection.svelte"; import StatsTable from "./StatsTable.svelte"; + import JobFootprint from "./JobFootprint.svelte"; import { getContext } from "svelte"; export let dbid; @@ -46,7 +47,8 @@ resources { hostname, hwthreads, accelerators }, metaData, userData { name, email }, - concurrentJobs { items { id, jobId }, count, listQuery } + concurrentJobs { items { id, jobId }, count, listQuery }, + flopsAnyAvg, memBwAvg, loadAvg } `); @@ -93,7 +95,9 @@ startFetching( job, [...toFetch], - job.numNodes > 2 ? ["node"] : ["node", "core"] + job.numNodes > 2 + ? ["node"] + : ["node", "socket", "core"] ); } else { // Accels and not on node scope @@ -102,7 +106,7 @@ [...toFetch], job.numNodes > 2 ? ["node", "accelerator"] - : ["node", "accelerator", "core"] + : ["node", "accelerator", "socket", "core"] ); } @@ -132,7 +136,9 @@ let plots = {}, jobTags, - statsTable; + statsTable, + jobFootprint; + $: document.title = $initq.fetching ? "Loading..." : $initq.error @@ -200,6 +206,17 @@ {/if} + {#if $jobMetrics.data} + {#key $jobMetrics.data} + + + + {/key} + {/if} {#if $jobMetrics.data && $initq.data} {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} {#if authlevel > roles.manager} @@ -390,8 +407,6 @@ bind:this={statsTable} job={$initq.data.job} jobMetrics={$jobMetrics.data.jobMetrics} - accMetrics={accMetrics} - accNodeOnly={accNodeOnly} /> {/key} {/if} diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte new file mode 100644 index 0000000..e860e04 --- /dev/null +++ b/web/frontend/src/JobFootprint.svelte @@ -0,0 +1,232 @@ + + + + + + {#if view === 'job'} + + + Core Metrics Footprint + + + {/if} + + {#each footprintData as fpd, index} +
+
 {fpd.name}
+
+
+ + {#if fpd.impact === 3 || fpd.impact === -1} + + {:else if fpd.impact === 2} + + {/if} + + {#if fpd.impact === 3} + + {:else if fpd.impact === 2} + + {:else if fpd.impact === 1} + + {:else if fpd.impact === 0} + + {:else if fpd.impact === -1} + + {/if} +
+
+ + {fpd.avg} / {fpd.max} {fpd.unit}   +
+
+ {fpd.message} +
+
+ +
+ {/each} + {#if job?.metaData?.message} +
+ {@html job.metaData.message} + {/if} +
+
+ + diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index 07094b8..d7cbf37 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -23,6 +23,9 @@ let metrics = filterPresets.cluster ? ccconfig[`plot_list_selectedMetrics:${filterPresets.cluster}`] || ccconfig.plot_list_selectedMetrics : ccconfig.plot_list_selectedMetrics + let showFootprint = filterPresets.cluster + ? !!ccconfig[`plot_list_showFootprint:${filterPresets.cluster}`] + : !!ccconfig.plot_list_showFootprint let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null // The filterPresets are handled by the Filters component, @@ -81,7 +84,8 @@ bind:metrics={metrics} bind:sorting={sorting} bind:matchedJobs={matchedJobs} - bind:this={jobList} /> + bind:this={jobList} + bind:showFootprint={showFootprint} /> @@ -93,4 +97,6 @@ bind:cluster={selectedCluster} configName="plot_list_selectedMetrics" bind:metrics={metrics} - bind:isOpen={isMetricsSelectionOpen} /> + bind:isOpen={isMetricsSelectionOpen} + bind:showFootprint={showFootprint} + view='list'/> diff --git a/web/frontend/src/MetricSelection.svelte b/web/frontend/src/MetricSelection.svelte index 59fe263..5f55cb7 100644 --- a/web/frontend/src/MetricSelection.svelte +++ b/web/frontend/src/MetricSelection.svelte @@ -17,12 +17,15 @@ export let configName export let allMetrics = null export let cluster = null + export let showFootprint + export let view = 'job' const clusters = getContext('clusters'), onInit = getContext('on-init') let newMetricsOrder = [] let unorderedMetrics = [...metrics] + let pendingShowFootprint = !!showFootprint onInit(() => { if (allMetrics == null) allMetrics = new Set() @@ -90,6 +93,8 @@ metrics = newMetricsOrder.filter(m => unorderedMetrics.includes(m)) isOpen = false + showFootprint = !!pendingShowFootprint + updateConfigurationMutation({ name: cluster == null ? configName : `${configName}:${cluster}`, value: JSON.stringify(metrics) @@ -99,6 +104,16 @@ // console.log('Error on subscription: ' + res.error) } }) + + updateConfigurationMutation({ + name: cluster == null ? 'plot_list_showFootprint' : `plot_list_showFootprint:${cluster}`, + value: JSON.stringify(showFootprint) + }).subscribe(res => { + if (res.fetching === false && res.error) { + console.log('Error on footprint subscription: ' + res.error) + throw res.error + } + }) } @@ -121,6 +136,12 @@ + {#if view === 'list'} +
  • + Show Footprint +
  • +
    + {/if} {#each newMetricsOrder as metric, index (metric)}
  • m.name))].sort(), scopesForMetric = (metric) => jobMetrics @@ -23,17 +21,23 @@ || getContext('cc-config')['job_view_nodestats_selectedMetrics'] for (let metric of allMetrics) { - // Not Exclusive or Single Node: Get maxScope() - // No Accelerators in Job and not Acc-Metric: Use 'core' - // Accelerator Metric available on accelerator scope: Use 'accelerator' - // Accelerator Metric only on node scope: Fallback to 'node' - selectedScopes[metric] = (job.exclusive != 1 || job.numNodes == 1) ? - (job.numAccs != 0 && accMetrics.includes(metric)) ? - accNodeOnly ? - 'node' - : 'accelerator' - : 'core' - : maxScope(scopesForMetric(metric)) + // Not Exclusive or Multi-Node: get maxScope directly (mostly: node) + // -> Else: Load smallest available granularity as default as per availability + const availableScopes = scopesForMetric(metric) + if (job.exclusive != 1 || job.numNodes == 1) { + if (availableScopes.includes('accelerator')) { + selectedScopes[metric] = 'accelerator' + } else if (availableScopes.includes('core')) { + selectedScopes[metric] = 'core' + } else if (availableScopes.includes('socket')) { + selectedScopes[metric] = 'socket' + } else { + selectedScopes[metric] = 'node' + } + } else { + selectedScopes[metric] = maxScope(availableScopes) + } + sorting[metric] = { min: { dir: 'up', active: false }, avg: { dir: 'up', active: false }, @@ -84,8 +88,7 @@ {metric} +
    h
    @@ -46,7 +56,49 @@
    - + +
    +
    m
    +
    +
    + + +
    + +

    Duration less than

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +
    + +

    Duration between

    + + +
    + +
    +
    h
    +
    +
    + + +
    +
    m
    @@ -57,7 +109,7 @@
    - +
    h
    @@ -65,7 +117,7 @@
    - +
    m
    @@ -77,19 +129,30 @@ - + }}>Reset Values + diff --git a/web/frontend/src/filters/Filters.svelte b/web/frontend/src/filters/Filters.svelte index 38d7e7a..49eaed6 100644 --- a/web/frontend/src/filters/Filters.svelte +++ b/web/frontend/src/filters/Filters.svelte @@ -41,7 +41,7 @@ states: filterPresets.states || filterPresets.state ? [filterPresets.state].flat() : allJobStates, startTime: filterPresets.startTime || { from: null, to: null }, tags: filterPresets.tags || [], - duration: filterPresets.duration || { from: null, to: null }, + duration: filterPresets.duration || { lessThan: null, moreThan: null, from: null, to: null }, jobId: filterPresets.jobId || '', arrayJobId: filterPresets.arrayJobId || null, user: filterPresets.user || '', @@ -88,6 +88,10 @@ items.push({ tags: filters.tags }) if (filters.duration.from || filters.duration.to) items.push({ duration: { from: filters.duration.from, to: filters.duration.to } }) + if (filters.duration.lessThan) + items.push({ duration: { from: 0, to: filters.duration.lessThan } }) + if (filters.duration.moreThan) + items.push({ duration: { from: filters.duration.moreThan, to: 604800 } }) // 7 days to include special jobs with long runtimes if (filters.jobId) items.push({ jobId: { [filters.jobIdMatch]: filters.jobId } }) if (filters.arrayJobId != null) @@ -144,6 +148,10 @@ opts.push(`tag=${tag}`) if (filters.duration.from && filters.duration.to) opts.push(`duration=${filters.duration.from}-${filters.duration.to}`) + if (filters.duration.lessThan) + opts.push(`duration=0-${filters.duration.lessThan}`) + if (filters.duration.moreThan) + opts.push(`duration=${filters.duration.moreThan}-604800`) if (filters.numNodes.from && filters.numNodes.to) opts.push(`numNodes=${filters.numNodes.from}-${filters.numNodes.to}`) if (filters.numAccelerators.from && filters.numAccelerators.to) @@ -267,6 +275,18 @@ {/if} + {#if filters.duration.lessThan} + (isDurationOpen = true)}> + Duration less than {Math.floor(filters.duration.lessThan / 3600)}h:{Math.floor(filters.duration.lessThan % 3600 / 60)}m + + {/if} + + {#if filters.duration.moreThan} + (isDurationOpen = true)}> + Duration more than {Math.floor(filters.duration.moreThan / 3600)}h:{Math.floor(filters.duration.moreThan % 3600 / 60)}m + + {/if} + {#if filters.tags.length != 0} (isTagsOpen = true)}> {#each filters.tags as tagId} @@ -325,6 +345,8 @@ update()} /> diff --git a/web/frontend/src/filters/StartTime.svelte b/web/frontend/src/filters/StartTime.svelte index c89851d..59f8513 100644 --- a/web/frontend/src/filters/StartTime.svelte +++ b/web/frontend/src/filters/StartTime.svelte @@ -1,5 +1,6 @@ @@ -73,7 +69,7 @@ on:click={() => { isOpen = false from = toRFC3339(pendingFrom) - to = toRFC3339(pendingTo, 59) + to = toRFC3339(pendingTo, '59') dispatch('update', { from, to }) }}> Close & Apply diff --git a/web/frontend/src/joblist/JobInfo.svelte b/web/frontend/src/joblist/JobInfo.svelte index 83841c6..b7ca32a 100644 --- a/web/frontend/src/joblist/JobInfo.svelte +++ b/web/frontend/src/joblist/JobInfo.svelte @@ -28,6 +28,17 @@ return `${hours}:${('0' + minutes).slice(-2)}:${('0' + seconds).slice(-2)}`; } + function getStateColor(state) { + switch (state) { + case 'running': + return 'success' + case 'completed': + return 'primary' + default: + return 'danger' + } + } +
    @@ -86,12 +97,7 @@

    Start: {(new Date(job.startTime)).toLocaleString()}
    - Duration: {formatDuration(job.duration)} - {#if job.state == 'running'} - running - {:else if job.state != 'completed'} - {job.state} - {/if} + Duration: {formatDuration(job.duration)} {job.state} {#if job.walltime}
    Walltime: {formatDuration(job.walltime)} diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index 02caf3f..5f8d89b 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -28,6 +28,7 @@ export let sorting = { field: "startTime", order: "DESC" }; export let matchedJobs = 0; export let metrics = ccconfig.plot_list_selectedMetrics; + export let showFootprint; let itemsPerPage = ccconfig.plot_list_jobsPerPage; let page = 1; @@ -73,6 +74,9 @@ name } metaData + flopsAnyAvg + memBwAvg + loadAvg } count } @@ -89,7 +93,7 @@ // Force refresh list with existing unchanged variables (== usually would not trigger reactivity) export function refresh() { - queryStore({ + jobs = queryStore({ client: client, query: query, variables: { paging, sorting, filter }, @@ -134,12 +138,19 @@ }) }; + let plotWidth = null; let tableWidth = null; let jobInfoColumnWidth = 250; - $: plotWidth = Math.floor( - (tableWidth - jobInfoColumnWidth) / metrics.length - 10 - ); + $: if (showFootprint) { + plotWidth = Math.floor( + (tableWidth - jobInfoColumnWidth) / (metrics.length + 1) - 10 + ) + } else { + plotWidth = Math.floor( + (tableWidth - jobInfoColumnWidth) / metrics.length - 10 + ) + } let headerPaddingTop = 0; stickyHeader( @@ -160,6 +171,15 @@ > Job Info + {#if showFootprint} + + Job Footprint + + {/if} {#each metrics as metric (metric)} {:else if $jobs.data && $initialized} {#each $jobs.data.jobs.items as job (job)} - + {:else} diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 2117b91..71bc805 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -14,22 +14,28 @@ import { Card, Spinner } from "sveltestrap"; import MetricPlot from "../plots/MetricPlot.svelte"; import JobInfo from "./JobInfo.svelte"; + import JobFootprint from "../JobFootprint.svelte"; import { maxScope, checkMetricDisabled } from "../utils.js"; export let job; export let metrics; export let plotWidth; export let plotHeight = 275; + export let showFootprint; let { id } = job; let scopes = [job.numNodes == 1 ? "core" : "node"]; + function distinct(value, index, array) { + return array.indexOf(value) === index; + } + const cluster = getContext("clusters").find((c) => c.name == job.cluster); const metricConfig = getContext("metrics"); // Get all MetricConfs which include subCluster-specific settings for this job const client = getContextClient(); const query = gql` - query ($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!) { - jobMetrics(id: $id, metrics: $metrics, scopes: $scopes) { + query ($id: ID!, $queryMetrics: [String!]!, $scopes: [MetricScope!]!) { + jobMetrics(id: $id, metrics: $queryMetrics, scopes: $scopes) { name scope metric { @@ -61,14 +67,24 @@ $: metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes } + variables: { id, queryMetrics, scopes } }); - function refresh() { - queryStore({ + let queryMetrics = null + $: if (showFootprint) { + queryMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', 'acc_utilization', ...metrics].filter(distinct) + scopes = ["node"] + } else { + queryMetrics = [...metrics] + scopes = [job.numNodes == 1 ? "core" : "node"] + } + + export function refresh() { + metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes } + variables: { id, queryMetrics, scopes }, + // requestPolicy: 'network-only' // use default cache-first for refresh }); } @@ -121,6 +137,16 @@ {:else} + {#if showFootprint} + + + + {/if} {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} diff --git a/web/frontend/src/plots/Histogram.svelte b/web/frontend/src/plots/Histogram.svelte index d3e1aaa..499ea4f 100644 --- a/web/frontend/src/plots/Histogram.svelte +++ b/web/frontend/src/plots/Histogram.svelte @@ -11,6 +11,7 @@ import { Card } from 'sveltestrap' export let data + export let usesBins = false export let width = 500 export let height = 300 export let title = '' @@ -160,6 +161,14 @@ series: [ { label: xunit !== '' ? xunit : null, + value: (u, ts, sidx, didx) => { + if (usesBins) { + const min = u.data[sidx][didx - 1] ? u.data[sidx][didx - 1] : 0 + const max = u.data[sidx][didx] + ts = min + ' - ' + max // narrow spaces + } + return ts + } }, Object.assign({ label: yunit !== '' ? yunit : null, diff --git a/web/frontend/src/plots/MetricPlot.svelte b/web/frontend/src/plots/MetricPlot.svelte index 17eec5f..8d7825d 100644 --- a/web/frontend/src/plots/MetricPlot.svelte +++ b/web/frontend/src/plots/MetricPlot.svelte @@ -161,8 +161,8 @@ ? (statisticsSeries.max.reduce((max, x) => Math.max(max, x), thresholds.normal) || thresholds.normal) : (series.reduce((max, series) => Math.max(max, series.statistics?.max), thresholds.normal) || thresholds.normal) - if (maxY >= (10 * thresholds.normal)) { // Hard y-range render limit if outliers in series data - maxY = (10 * thresholds.normal) + if (maxY >= (10 * thresholds.peak)) { // Hard y-range render limit if outliers in series data + maxY = (10 * thresholds.peak) } } @@ -390,12 +390,12 @@ if (scope == 'node' || metricConfig.aggregation == 'avg') { if (metricConfig.subClusters && metricConfig.subClusters.length === 0) { // console.log('subClusterConfigs array empty, use metricConfig defaults') - return { normal: metricConfig.normal, caution: metricConfig.caution, alert: metricConfig.alert } + return { normal: metricConfig.normal, caution: metricConfig.caution, alert: metricConfig.alert, peak: metricConfig.peak } } else if (metricConfig.subClusters && metricConfig.subClusters.length > 0) { // console.log('subClusterConfigs found, use subCluster Settings if matching jobs subcluster:') let forSubCluster = metricConfig.subClusters.find(sc => sc.name == subCluster.name) - if (forSubCluster && forSubCluster.normal && forSubCluster.caution && forSubCluster.alert) return forSubCluster - else return { normal: metricConfig.normal, caution: metricConfig.caution, alert: metricConfig.alert } + if (forSubCluster && forSubCluster.normal && forSubCluster.caution && forSubCluster.alert && forSubCluster.peak) return forSubCluster + else return { normal: metricConfig.normal, caution: metricConfig.caution, alert: metricConfig.alert, peak: metricConfig.peak} } else { console.warn('metricConfig.subClusters not found!') return null @@ -423,6 +423,7 @@ let mc = metricConfig?.subClusters?.find(sc => sc.name == subCluster.name) || metricConfig return { + peak: mc.peak / divisor, normal: mc.normal / divisor, caution: mc.caution / divisor, alert: mc.alert / divisor diff --git a/web/frontend/src/plots/Roofline.svelte b/web/frontend/src/plots/Roofline.svelte index d79f86d..d4ed5f6 100644 --- a/web/frontend/src/plots/Roofline.svelte +++ b/web/frontend/src/plots/Roofline.svelte @@ -176,12 +176,12 @@ // Debug get zoomLevel from browser // console.log("Zoom", Math.round(window.devicePixelRatio * 100)) - if (scalarKneeX < (width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio)) { // Top horizontal roofline + if (scalarKneeX < (width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio)) { // Lower horizontal roofline u.ctx.moveTo(scalarKneeX, flopRateScalarY) u.ctx.lineTo((width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio), flopRateScalarY) } - if (simdKneeX < (width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio)) { // Lower horitontal roofline + if (simdKneeX < (width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio)) { // Top horitontal roofline u.ctx.moveTo(simdKneeX, flopRateSimdY) u.ctx.lineTo((width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio), flopRateSimdY) } @@ -214,6 +214,7 @@ } ] }, + // cursor: { drag: { x: true, y: true } } // Activate zoom }; uplot = new uPlot(opts, plotData, plotWrapper); } else { diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index da1878a..5346208 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -316,11 +316,17 @@ export function checkMetricDisabled(m, c, s) { //[m]etric, [c]luster, [s]ubclust } export function convert2uplot(canvasData) { - // initial use: Canvas Histogram Data to Uplot + // Prep: Uplot Data Structure let uplotData = [[],[]] // [X, Y1, Y2, ...] - canvasData.forEach( pair => { - uplotData[0].push(pair.value) - uplotData[1].push(pair.count) + // Iterate + canvasData.forEach( cd => { + if (Object.keys(cd).length == 4) { // MetricHisto Datafromat + uplotData[0].push(cd?.max ? cd.max : 0) + uplotData[1].push(cd.count) + } else { // Default + uplotData[0].push(cd.value) + uplotData[1].push(cd.count) + } }) return uplotData }