diff --git a/api/schema.graphqls b/api/schema.graphqls index 3165177..d7d4f24 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -194,6 +194,15 @@ type NodeMetrics { metrics: [JobMetricWithName!]! } +type NodesResultList { + items: [NodeMetrics!]! + offset: Int + limit: Int + count: Int + totalNodes: Int + hasNextPage: Boolean +} + type ClusterSupport { cluster: String! subClusters: [String!]! @@ -241,6 +250,7 @@ type Query { rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! + nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList! } type Mutation { diff --git a/internal/api/rest.go b/internal/api/rest.go index 4e52701..2921ba5 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1419,7 +1419,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request) rw.Header().Set("Content-Type", "text/plain") key, value := r.FormValue("key"), r.FormValue("value") - fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value) + // fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value) if err := repository.GetUserCfgRepo().UpdateConfig(key, value, repository.GetUserFromContext(r.Context())); err != nil { http.Error(rw, err.Error(), http.StatusUnprocessableEntity) diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index dbac551..d50033e 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -249,6 +249,15 @@ type ComplexityRoot struct { SubCluster func(childComplexity int) int } + NodesResultList struct { + Count func(childComplexity int) int + HasNextPage func(childComplexity int) int + Items func(childComplexity int) int + Limit func(childComplexity int) int + Offset func(childComplexity int) int + TotalNodes func(childComplexity int) int + } + Query struct { AllocatedNodes func(childComplexity int, cluster string) int Clusters func(childComplexity int) int @@ -259,6 +268,7 @@ type ComplexityRoot struct { JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int + NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int Tags func(childComplexity int) int User func(childComplexity int, username string) int @@ -385,6 +395,7 @@ type QueryResolver interface { JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) + NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) } type SubClusterResolver interface { NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) @@ -1288,6 +1299,48 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.NodeMetrics.SubCluster(childComplexity), true + case "NodesResultList.count": + if e.complexity.NodesResultList.Count == nil { + break + } + + return e.complexity.NodesResultList.Count(childComplexity), true + + case "NodesResultList.hasNextPage": + if e.complexity.NodesResultList.HasNextPage == nil { + break + } + + return e.complexity.NodesResultList.HasNextPage(childComplexity), true + + case "NodesResultList.items": + if e.complexity.NodesResultList.Items == nil { + break + } + + return e.complexity.NodesResultList.Items(childComplexity), true + + case "NodesResultList.limit": + if e.complexity.NodesResultList.Limit == nil { + break + } + + return e.complexity.NodesResultList.Limit(childComplexity), true + + case "NodesResultList.offset": + if e.complexity.NodesResultList.Offset == nil { + break + } + + return e.complexity.NodesResultList.Offset(childComplexity), true + + case "NodesResultList.totalNodes": + if e.complexity.NodesResultList.TotalNodes == nil { + break + } + + return e.complexity.NodesResultList.TotalNodes(childComplexity), true + case "Query.allocatedNodes": if e.complexity.Query.AllocatedNodes == nil { break @@ -1386,6 +1439,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.NodeMetrics(childComplexity, args["cluster"].(string), args["nodes"].([]string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time)), true + case "Query.nodeMetricsList": + if e.complexity.Query.NodeMetricsList == nil { + break + } + + args, err := ec.field_Query_nodeMetricsList_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.NodeMetricsList(childComplexity, args["cluster"].(string), args["subCluster"].(string), args["nodeFilter"].(string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time), args["page"].(*model.PageRequest), args["resolution"].(*int)), true + case "Query.rooflineHeatmap": if e.complexity.Query.RooflineHeatmap == nil { break @@ -2090,6 +2155,15 @@ type NodeMetrics { metrics: [JobMetricWithName!]! } +type NodesResultList { + items: [NodeMetrics!]! + offset: Int + limit: Int + count: Int + totalNodes: Int + hasNextPage: Boolean +} + type ClusterSupport { cluster: String! subClusters: [String!]! @@ -2137,6 +2211,7 @@ type Query { rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! + nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList! } type Mutation { @@ -3112,6 +3187,254 @@ func (ec *executionContext) field_Query_jobs_argsOrder( return zeroVal, nil } +func (ec *executionContext) field_Query_nodeMetricsList_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + arg0, err := ec.field_Query_nodeMetricsList_argsCluster(ctx, rawArgs) + if err != nil { + return nil, err + } + args["cluster"] = arg0 + arg1, err := ec.field_Query_nodeMetricsList_argsSubCluster(ctx, rawArgs) + if err != nil { + return nil, err + } + args["subCluster"] = arg1 + arg2, err := ec.field_Query_nodeMetricsList_argsNodeFilter(ctx, rawArgs) + if err != nil { + return nil, err + } + args["nodeFilter"] = arg2 + arg3, err := ec.field_Query_nodeMetricsList_argsScopes(ctx, rawArgs) + if err != nil { + return nil, err + } + args["scopes"] = arg3 + arg4, err := ec.field_Query_nodeMetricsList_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg4 + arg5, err := ec.field_Query_nodeMetricsList_argsFrom(ctx, rawArgs) + if err != nil { + return nil, err + } + args["from"] = arg5 + arg6, err := ec.field_Query_nodeMetricsList_argsTo(ctx, rawArgs) + if err != nil { + return nil, err + } + args["to"] = arg6 + arg7, err := ec.field_Query_nodeMetricsList_argsPage(ctx, rawArgs) + if err != nil { + return nil, err + } + args["page"] = arg7 + arg8, err := ec.field_Query_nodeMetricsList_argsResolution(ctx, rawArgs) + if err != nil { + return nil, err + } + args["resolution"] = arg8 + return args, nil +} +func (ec *executionContext) field_Query_nodeMetricsList_argsCluster( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["cluster"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) + if tmp, ok := rawArgs["cluster"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsSubCluster( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["subCluster"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("subCluster")) + if tmp, ok := rawArgs["subCluster"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsNodeFilter( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["nodeFilter"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("nodeFilter")) + if tmp, ok := rawArgs["nodeFilter"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsScopes( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]schema.MetricScope, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["scopes"] + if !ok { + var zeroVal []schema.MetricScope + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) + if tmp, ok := rawArgs["scopes"]; ok { + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + } + + var zeroVal []schema.MetricScope + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsMetrics( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["metrics"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsFrom( + ctx context.Context, + rawArgs map[string]interface{}, +) (time.Time, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["from"] + if !ok { + var zeroVal time.Time + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) + if tmp, ok := rawArgs["from"]; ok { + return ec.unmarshalNTime2timeᚐTime(ctx, tmp) + } + + var zeroVal time.Time + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsTo( + ctx context.Context, + rawArgs map[string]interface{}, +) (time.Time, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["to"] + if !ok { + var zeroVal time.Time + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) + if tmp, ok := rawArgs["to"]; ok { + return ec.unmarshalNTime2timeᚐTime(ctx, tmp) + } + + var zeroVal time.Time + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsPage( + ctx context.Context, + rawArgs map[string]interface{}, +) (*model.PageRequest, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["page"] + if !ok { + var zeroVal *model.PageRequest + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) + if tmp, ok := rawArgs["page"]; ok { + return ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) + } + + var zeroVal *model.PageRequest + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsResolution( + ctx context.Context, + rawArgs map[string]interface{}, +) (*int, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["resolution"] + if !ok { + var zeroVal *int + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("resolution")) + if tmp, ok := rawArgs["resolution"]; ok { + return ec.unmarshalOInt2ᚖint(ctx, tmp) + } + + var zeroVal *int + return zeroVal, nil +} + func (ec *executionContext) field_Query_nodeMetrics_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} @@ -9238,6 +9561,263 @@ func (ec *executionContext) fieldContext_NodeMetrics_metrics(_ context.Context, return fc, nil } +func (ec *executionContext) _NodesResultList_items(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_items(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Items, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.NodeMetrics) + fc.Result = res + return ec.marshalNNodeMetrics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeMetricsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_items(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "host": + return ec.fieldContext_NodeMetrics_host(ctx, field) + case "subCluster": + return ec.fieldContext_NodeMetrics_subCluster(ctx, field) + case "metrics": + return ec.fieldContext_NodeMetrics_metrics(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type NodeMetrics", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_offset(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_offset(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Offset, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_offset(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_limit(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_limit(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Limit, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_limit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_count(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_count(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Count, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_totalNodes(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_totalNodes(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalNodes, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_totalNodes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_hasNextPage(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_hasNextPage(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HasNextPage, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*bool) + fc.Result = res + return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_hasNextPage(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Query_clusters(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query_clusters(ctx, field) if err != nil { @@ -10024,6 +10604,75 @@ func (ec *executionContext) fieldContext_Query_nodeMetrics(ctx context.Context, return fc, nil } +func (ec *executionContext) _Query_nodeMetricsList(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_nodeMetricsList(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().NodeMetricsList(rctx, fc.Args["cluster"].(string), fc.Args["subCluster"].(string), fc.Args["nodeFilter"].(string), fc.Args["scopes"].([]schema.MetricScope), fc.Args["metrics"].([]string), fc.Args["from"].(time.Time), fc.Args["to"].(time.Time), fc.Args["page"].(*model.PageRequest), fc.Args["resolution"].(*int)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*model.NodesResultList) + fc.Result = res + return ec.marshalNNodesResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_nodeMetricsList(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "items": + return ec.fieldContext_NodesResultList_items(ctx, field) + case "offset": + return ec.fieldContext_NodesResultList_offset(ctx, field) + case "limit": + return ec.fieldContext_NodesResultList_limit(ctx, field) + case "count": + return ec.fieldContext_NodesResultList_count(ctx, field) + case "totalNodes": + return ec.fieldContext_NodesResultList_totalNodes(ctx, field) + case "hasNextPage": + return ec.fieldContext_NodesResultList_hasNextPage(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type NodesResultList", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_nodeMetricsList_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query___type(ctx, field) if err != nil { @@ -16337,6 +16986,55 @@ func (ec *executionContext) _NodeMetrics(ctx context.Context, sel ast.SelectionS return out } +var nodesResultListImplementors = []string{"NodesResultList"} + +func (ec *executionContext) _NodesResultList(ctx context.Context, sel ast.SelectionSet, obj *model.NodesResultList) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, nodesResultListImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("NodesResultList") + case "items": + out.Values[i] = ec._NodesResultList_items(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "offset": + out.Values[i] = ec._NodesResultList_offset(ctx, field, obj) + case "limit": + out.Values[i] = ec._NodesResultList_limit(ctx, field, obj) + case "count": + out.Values[i] = ec._NodesResultList_count(ctx, field, obj) + case "totalNodes": + out.Values[i] = ec._NodesResultList_totalNodes(ctx, field, obj) + case "hasNextPage": + out.Values[i] = ec._NodesResultList_hasNextPage(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var queryImplementors = []string{"Query"} func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { @@ -16610,6 +17308,28 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "nodeMetricsList": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_nodeMetricsList(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "__type": out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { @@ -18655,6 +19375,20 @@ func (ec *executionContext) marshalNNodeMetrics2ᚖgithubᚗcomᚋClusterCockpit return ec._NodeMetrics(ctx, sel, v) } +func (ec *executionContext) marshalNNodesResultList2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v model.NodesResultList) graphql.Marshaler { + return ec._NodesResultList(ctx, sel, &v) +} + +func (ec *executionContext) marshalNNodesResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v *model.NodesResultList) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._NodesResultList(ctx, sel, v) +} + func (ec *executionContext) unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx context.Context, v interface{}) (schema.Float, error) { var res schema.Float err := res.UnmarshalGQL(v) diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 7f0db5f..fd24897 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -148,6 +148,15 @@ type NodeMetrics struct { Metrics []*JobMetricWithName `json:"metrics"` } +type NodesResultList struct { + Items []*NodeMetrics `json:"items"` + Offset *int `json:"offset,omitempty"` + Limit *int `json:"limit,omitempty"` + Count *int `json:"count,omitempty"` + TotalNodes *int `json:"totalNodes,omitempty"` + HasNextPage *bool `json:"hasNextPage,omitempty"` +} + type OrderByInput struct { Field string `json:"field"` Type string `json:"type"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index c0a7b55..d13a29b 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -437,8 +437,8 @@ func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.Job // NodeMetrics is the resolver for the nodeMetrics field. func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) { user := repository.GetUserFromContext(ctx) - if user != nil && !user.HasRole(schema.RoleAdmin) { - return nil, errors.New("you need to be an administrator for this query") + if user != nil && !user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { + return nil, errors.New("you need to be administrator or support staff for this query") } if metrics == nil { @@ -449,7 +449,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) if err != nil { - log.Warn("Error while loading node data") + log.Warn("error while loading node data") return nil, err } @@ -459,7 +459,10 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ Host: hostname, Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)), } - host.SubCluster, _ = archive.GetSubClusterByNode(cluster, hostname) + host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname) + if err != nil { + log.Warnf("error in nodeMetrics resolver: %s", err) + } for metric, scopedMetrics := range metrics { for _, scopedMetric := range scopedMetrics { @@ -477,6 +480,68 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ return nodeMetrics, nil } +// NodeMetricsList is the resolver for the nodeMetricsList field. +func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) { + if resolution == nil { // Load from Config + if config.Keys.EnableResampling != nil { + defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions) + resolution = &defaultRes + } else { // Set 0 (Loads configured metric timestep) + defaultRes := 0 + resolution = &defaultRes + } + } + + user := repository.GetUserFromContext(ctx) + if user != nil && !user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { + return nil, errors.New("you need to be administrator or support staff for this query") + } + + if metrics == nil { + for _, mc := range archive.GetCluster(cluster).MetricConfig { + metrics = append(metrics, mc.Name) + } + } + + data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx) + if err != nil { + log.Warn("error while loading node data") + return nil, err + } + + nodeMetricsList := make([]*model.NodeMetrics, 0, len(data)) + for hostname, metrics := range data { + host := &model.NodeMetrics{ + Host: hostname, + Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)), + } + host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname) + if err != nil { + log.Warnf("error in nodeMetrics resolver: %s", err) + } + + for metric, scopedMetrics := range metrics { + for scope, scopedMetric := range scopedMetrics { + host.Metrics = append(host.Metrics, &model.JobMetricWithName{ + Name: metric, + Scope: scope, + Metric: scopedMetric, + }) + } + } + + nodeMetricsList = append(nodeMetricsList, host) + } + + nodeMetricsListResult := &model.NodesResultList{ + Items: nodeMetricsList, + TotalNodes: &totalNodes, + HasNextPage: &hasNextPage, + } + + return nodeMetricsListResult, nil +} + // NumberOfNodes is the resolver for the numberOfNodes field. func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) { nodeList, err := archive.ParseNodeList(obj.Nodes) diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index 1f2e175..939a0fb 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" @@ -219,7 +220,7 @@ func LoadAverages( return nil } -// Used for the node/system view. Returns a map of nodes to a map of metrics. +// Used for the classic node/system view. Returns a map of nodes to a map of metrics. func LoadNodeData( cluster string, metrics, nodes []string, @@ -254,3 +255,53 @@ func LoadNodeData( return data, nil } + +func LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + repo, err := metricdata.GetMetricDataRepo(cluster) + if err != nil { + return nil, 0, false, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster) + } + + if metrics == nil { + for _, m := range archive.GetCluster(cluster).MetricConfig { + metrics = append(metrics, m.Name) + } + } + + data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx) + if err != nil { + if len(data) != 0 { + log.Warnf("partial error: %s", err.Error()) + } else { + log.Error("Error while loading node data from metric repository") + return nil, totalNodes, hasNextPage, err + } + } + + // NOTE: New StatsSeries will always be calculated as 'min/median/max' + const maxSeriesSize int = 8 + for _, jd := range data { + for _, scopes := range jd { + for _, jm := range scopes { + if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize { + continue + } + jm.AddStatisticsSeries() + } + } + } + + if data == nil { + return nil, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster) + } + + return data, totalNodes, hasNextPage, nil +} diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index ce5101c..8d2d3f5 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -11,10 +11,12 @@ import ( "encoding/json" "fmt" "net/http" + "sort" "strconv" "strings" "time" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -211,7 +213,6 @@ func (ccms *CCMetricStore) LoadData( } jobMetric, ok := jobData[metric][scope] - if !ok { jobMetric = &schema.JobMetric{ Unit: mc.Unit, @@ -235,8 +236,7 @@ func (ccms *CCMetricStore) LoadData( } if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { - // TODO: use schema.Float instead of float64? - // This is done because regular float64 can not be JSONed when NaN. + // "schema.Float()" because regular float64 can not be JSONed when NaN. res.Avg = schema.Float(0) res.Min = schema.Float(0) res.Max = schema.Float(0) @@ -693,6 +693,445 @@ func (ccms *CCMetricStore) LoadNodeData( return data, nil } +func (ccms *CCMetricStore) LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + + // 0) Init additional vars + var totalNodes int = 0 + var hasNextPage bool = false + + // 1) Get list of all nodes + var nodes []string + if subCluster != "" { + scNodes := archive.NodeLists[cluster][subCluster] + nodes = scNodes.PrintList() + } else { + subClusterNodeLists := archive.NodeLists[cluster] + for _, nodeList := range subClusterNodeLists { + nodes = append(nodes, nodeList.PrintList()...) + } + } + + // 2) Filter nodes + if nodeFilter != "" { + filteredNodes := []string{} + for _, node := range nodes { + if strings.Contains(node, nodeFilter) { + filteredNodes = append(filteredNodes, node) + } + } + nodes = filteredNodes + } + + // 2.1) Count total nodes && Sort nodes -> Sorting invalidated after ccms return ... + totalNodes = len(nodes) + sort.Strings(nodes) + + // 3) Apply paging + if len(nodes) > page.ItemsPerPage { + start := (page.Page - 1) * page.ItemsPerPage + end := start + page.ItemsPerPage + if end > len(nodes) { + end = len(nodes) + hasNextPage = false + } else { + hasNextPage = true + } + nodes = nodes[start:end] + } + + // Note: Order of node data is not guaranteed after this point, but contents match page and filter criteria + + queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) + if err != nil { + log.Warn("Error while building queries") + return nil, totalNodes, hasNextPage, err + } + + req := ApiQueryRequest{ + Cluster: cluster, + Queries: queries, + From: from.Unix(), + To: to.Unix(), + WithStats: true, + WithData: true, + } + + resBody, err := ccms.doRequest(ctx, &req) + if err != nil { + log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) + return nil, totalNodes, hasNextPage, err + } + + var errors []string + data := make(map[string]schema.JobData) + for i, row := range resBody.Results { + var query ApiQuery + if resBody.Queries != nil { + query = resBody.Queries[i] + } else { + query = req.Queries[i] + } + // qdata := res[0] + metric := ccms.toLocalName(query.Metric) + scope := assignedScope[i] + mc := archive.GetMetricConfig(cluster, metric) + + res := row[0].Resolution + if res == 0 { + res = mc.Timestep + } + + // Init Nested Map Data Structures If Not Found + hostData, ok := data[query.Hostname] + if !ok { + hostData = make(schema.JobData) + data[query.Hostname] = hostData + } + + metricData, ok := hostData[metric] + if !ok { + metricData = make(map[schema.MetricScope]*schema.JobMetric) + data[query.Hostname][metric] = metricData + } + + scopeData, ok := metricData[scope] + if !ok { + scopeData = &schema.JobMetric{ + Unit: mc.Unit, + Timestep: res, + Series: make([]schema.Series, 0), + } + data[query.Hostname][metric][scope] = scopeData + } + + for ndx, res := range row { + if res.Error != nil { + /* Build list for "partial errors", if any */ + errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) + continue + } + + id := (*string)(nil) + if query.Type != nil { + id = new(string) + *id = query.TypeIds[ndx] + } + + if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { + // "schema.Float()" because regular float64 can not be JSONed when NaN. + res.Avg = schema.Float(0) + res.Min = schema.Float(0) + res.Max = schema.Float(0) + } + + scopeData.Series = append(scopeData.Series, schema.Series{ + Hostname: query.Hostname, + Id: id, + Statistics: schema.MetricStatistics{ + Avg: float64(res.Avg), + Min: float64(res.Min), + Max: float64(res.Max), + }, + Data: res.Data, + }) + } + } + + if len(errors) != 0 { + /* Returns list of "partial errors" */ + return data, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) + } + + return data, totalNodes, hasNextPage, nil +} + +func (ccms *CCMetricStore) buildNodeQueries( + cluster string, + subCluster string, + nodes []string, + metrics []string, + scopes []schema.MetricScope, + resolution int, +) ([]ApiQuery, []schema.MetricScope, error) { + + queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(nodes)) + assignedScope := []schema.MetricScope{} + + // Get Topol before loop if subCluster given + var subClusterTopol *schema.SubCluster + var scterr error + if subCluster != "" { + subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster) + if scterr != nil { + // TODO: Log + return nil, nil, scterr + } + } + + for _, metric := range metrics { + remoteName := ccms.toRemoteName(metric) + mc := archive.GetMetricConfig(cluster, metric) + if mc == nil { + // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) + log.Infof("metric '%s' is not specified for cluster '%s'", metric, cluster) + continue + } + + // Avoid duplicates... + handledScopes := make([]schema.MetricScope, 0, 3) + + scopesLoop: + for _, requestedScope := range scopes { + nativeScope := mc.Scope + + scope := nativeScope.Max(requestedScope) + for _, s := range handledScopes { + if scope == s { + continue scopesLoop + } + } + handledScopes = append(handledScopes, scope) + + for _, hostname := range nodes { + + // If no subCluster given, get it by node + if subCluster == "" { + subClusterName, scnerr := archive.GetSubClusterByNode(cluster, hostname) + if scnerr != nil { + return nil, nil, scnerr + } + subClusterTopol, scterr = archive.GetSubCluster(cluster, subClusterName) + if scterr != nil { + return nil, nil, scterr + } + } + + // Always full node hwthread id list, no partial queries expected -> Use "topology.Node" directly where applicable + // Always full accelerator id list, no partial queries expected -> Use "acceleratorIds" directly where applicable + topology := subClusterTopol.Topology + acceleratorIds := topology.GetAcceleratorIDs() + + // Moved check here if metric matches hardware specs + if nativeScope == schema.MetricScopeAccelerator && len(acceleratorIds) == 0 { + continue scopesLoop + } + + // Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node) + if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) { + if scope != schema.MetricScopeAccelerator { + // Skip all other catched cases + continue + } + + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &acceleratorString, + TypeIds: acceleratorIds, + Resolution: resolution, + }) + assignedScope = append(assignedScope, schema.MetricScopeAccelerator) + continue + } + + // Accelerator -> Node + if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode { + if len(acceleratorIds) == 0 { + continue + } + + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &acceleratorString, + TypeIds: acceleratorIds, + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // HWThread -> HWThead + if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Node), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // HWThread -> Core + if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { + cores, _ := topology.GetCoresFromHWThreads(topology.Node) + for _, core := range cores { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Core[core]), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + } + continue + } + + // HWThread -> Socket + if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { + sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) + for _, socket := range sockets { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Socket[socket]), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + } + continue + } + + // HWThread -> Node + if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Node), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Core -> Core + if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { + cores, _ := topology.GetCoresFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &coreString, + TypeIds: intToStringSlice(cores), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Core -> Node + if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { + cores, _ := topology.GetCoresFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &coreString, + TypeIds: intToStringSlice(cores), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // MemoryDomain -> MemoryDomain + if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { + sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &memoryDomainString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // MemoryDoman -> Node + if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { + sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &memoryDomainString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Socket -> Socket + if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { + sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &socketString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Socket -> Node + if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { + sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &socketString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Node -> Node + if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + return nil, nil, fmt.Errorf("METRICDATA/CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope) + } + } + } + + return queries, assignedScope, nil +} + func intToStringSlice(is []int) []string { ss := make([]string, len(is)) for i, x := range is { diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index b416fa5..79c2d4a 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -312,3 +313,21 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData( return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") } + +func (idb *InfluxDBv2DataRepository) LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + + var totalNodes int = 0 + var hasNextPage bool = false + // TODO : Implement to be used in NodeList-View + log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) + + return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") +} diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 354dd5f..0fe94d1 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -11,6 +11,7 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -26,8 +27,11 @@ type MetricDataRepository interface { // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now. LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) - // Return a map of hosts to a map of metrics at the requested scopes for that node. + // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) + + // Return a map of hosts to a map of metrics to a map of scopes for multiple nodes. + LoadNodeListData(cluster, subCluster, nodeFilter string, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, page *model.PageRequest, ctx context.Context) (map[string]schema.JobData, int, bool, error) } var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{} diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index 0611824..cd849ce 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -20,6 +20,7 @@ import ( "text/template" "time" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -446,3 +447,21 @@ func (pdb *PrometheusDataRepository) LoadNodeData( log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1) return data, nil } + +func (pdb *PrometheusDataRepository) LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + + var totalNodes int = 0 + var hasNextPage bool = false + // TODO : Implement to be used in NodeList-View + log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) + + return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository") +} diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index dcdaaaa..48dd237 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -9,6 +9,7 @@ import ( "encoding/json" "time" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -50,6 +51,19 @@ func (tmdr *TestMetricDataRepository) LoadNodeData( panic("TODO") } +func (tmdr *TestMetricDataRepository) LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + + panic("TODO") +} + func DeepCopy(jd_temp schema.JobData) schema.JobData { var jd schema.JobData diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 1a3317f..bf74391 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -42,10 +42,12 @@ var routes []Route = []Route{ {"/monitoring/projects/", "monitoring/list.tmpl", "Projects - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "PROJECT"; return i }}, {"/monitoring/tags/", "monitoring/taglist.tmpl", "Tags - ClusterCockpit", false, setupTaglistRoute}, {"/monitoring/user/{id}", "monitoring/user.tmpl", "User - ClusterCockpit", true, setupUserRoute}, - {"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster - ClusterCockpit", false, setupClusterRoute}, + {"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster Node Overview - ClusterCockpit", false, setupClusterOverviewRoute}, + {"/monitoring/systems/list/{cluster}", "monitoring/systems.tmpl", "Cluster Node List - ClusterCockpit", false, setupClusterListRoute}, + {"/monitoring/systems/list/{cluster}/{subcluster}", "monitoring/systems.tmpl", "Cluster Node List - ClusterCockpit", false, setupClusterListRoute}, {"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node - ClusterCockpit", false, setupNodeRoute}, {"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analysis - ClusterCockpit", true, setupAnalysisRoute}, - {"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of - ClusterCockpit", false, setupClusterRoute}, + {"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of - ClusterCockpit", false, setupClusterStatusRoute}, } func setupHomeRoute(i InfoType, r *http.Request) InfoType { @@ -111,7 +113,7 @@ func setupUserRoute(i InfoType, r *http.Request) InfoType { return i } -func setupClusterRoute(i InfoType, r *http.Request) InfoType { +func setupClusterStatusRoute(i InfoType, r *http.Request) InfoType { vars := mux.Vars(r) i["id"] = vars["cluster"] i["cluster"] = vars["cluster"] @@ -123,6 +125,36 @@ func setupClusterRoute(i InfoType, r *http.Request) InfoType { return i } +func setupClusterOverviewRoute(i InfoType, r *http.Request) InfoType { + vars := mux.Vars(r) + i["id"] = vars["cluster"] + i["cluster"] = vars["cluster"] + i["displayType"] = "OVERVIEW" + + from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to") + if from != "" || to != "" { + i["from"] = from + i["to"] = to + } + return i +} + +func setupClusterListRoute(i InfoType, r *http.Request) InfoType { + vars := mux.Vars(r) + i["id"] = vars["cluster"] + i["cluster"] = vars["cluster"] + i["sid"] = vars["subcluster"] + i["subCluster"] = vars["subcluster"] + i["displayType"] = "LIST" + + from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to") + if from != "" || to != "" { + i["from"] = from + i["to"] = to + } + return i +} + func setupNodeRoute(i InfoType, r *http.Request) InfoType { vars := mux.Vars(r) i["cluster"] = vars["cluster"] @@ -343,6 +375,9 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { infos := route.Setup(map[string]interface{}{}, r) if id, ok := infos["id"]; ok { title = strings.Replace(route.Title, "", id.(string), 1) + if sid, ok := infos["sid"]; ok { // 2nd ID element + title = strings.Replace(title, "", sid.(string), 1) + } } // Get User -> What if NIL? diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index fff32c9..72718d0 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -15,12 +15,12 @@ import ( var ( Clusters []*schema.Cluster GlobalMetricList []*schema.GlobalMetricListItem - nodeLists map[string]map[string]NodeList + NodeLists map[string]map[string]NodeList ) func initClusterConfig() error { Clusters = []*schema.Cluster{} - nodeLists = map[string]map[string]NodeList{} + NodeLists = map[string]map[string]NodeList{} metricLookup := make(map[string]schema.GlobalMetricListItem) for _, c := range ar.GetClusters() { @@ -109,7 +109,7 @@ func initClusterConfig() error { Clusters = append(Clusters, cluster) - nodeLists[cluster.Name] = make(map[string]NodeList) + NodeLists[cluster.Name] = make(map[string]NodeList) for _, sc := range cluster.SubClusters { if sc.Nodes == "*" { continue @@ -119,7 +119,7 @@ func initClusterConfig() error { if err != nil { return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > in %s/cluster.json: %w", cluster.Name, err) } - nodeLists[cluster.Name][sc.Name] = nl + NodeLists[cluster.Name][sc.Name] = nl } } @@ -187,7 +187,7 @@ func AssignSubCluster(job *schema.BaseJob) error { } host0 := job.Resources[0].Hostname - for sc, nl := range nodeLists[job.Cluster] { + for sc, nl := range NodeLists[job.Cluster] { if nl != nil && nl.Contains(host0) { job.SubCluster = sc return nil @@ -203,7 +203,7 @@ func AssignSubCluster(job *schema.BaseJob) error { } func GetSubClusterByNode(cluster, hostname string) (string, error) { - for sc, nl := range nodeLists[cluster] { + for sc, nl := range NodeLists[cluster] { if nl != nil && nl.Contains(hostname) { return sc, nil } diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index 0c88c61..07e4647 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -194,7 +194,17 @@ func (topo *Topology) GetAcceleratorID(id int) (string, error) { } } -func (topo *Topology) GetAcceleratorIDs() ([]int, error) { +// Return list of hardware (string) accelerator IDs +func (topo *Topology) GetAcceleratorIDs() []string { + accels := make([]string, 0) + for _, accel := range topo.Accelerators { + accels = append(accels, accel.ID) + } + return accels +} + +// Outdated? Or: Return indices of accelerators in parent array? +func (topo *Topology) GetAcceleratorIDsAsInt() ([]int, error) { accels := make([]int, 0) for _, accel := range topo.Accelerators { id, err := strconv.Atoi(accel.ID) diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index e21171f..4b89d34 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -1,12 +1,12 @@ { "name": "cc-frontend", - "version": "1.0.2", + "version": "1.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "cc-frontend", - "version": "1.0.2", + "version": "1.0.0", "license": "MIT", "dependencies": { "@rollup/plugin-replace": "^5.0.7", diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index 0442394..40757d3 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -522,7 +522,6 @@ ({ metric, ...binsFromFootprint( @@ -566,7 +565,6 @@ ({ m1, f1: $footprintsQuery.data.footprints.metrics.find( diff --git a/web/frontend/src/Config.root.svelte b/web/frontend/src/Config.root.svelte index dc45491..126f92b 100644 --- a/web/frontend/src/Config.root.svelte +++ b/web/frontend/src/Config.root.svelte @@ -3,6 +3,7 @@ Properties: - `ìsAdmin Bool!`: Is currently logged in user admin authority + - `isSupport Bool!`: Is currently logged in user support authority - `isApi Bool!`: Is currently logged in user api authority - `username String!`: Empty string if auth. is disabled, otherwise the username as string --> @@ -10,15 +11,17 @@ -{#if isAdmin == true} +{#if isAdmin} Admin Options @@ -27,6 +30,15 @@ {/if} +{#if isSupport || isAdmin} + + + Support Options + + + +{/if} + User Options diff --git a/web/frontend/src/Header.svelte b/web/frontend/src/Header.svelte index 9b12403..cf3e058 100644 --- a/web/frontend/src/Header.svelte +++ b/web/frontend/src/Header.svelte @@ -26,6 +26,7 @@ export let username; export let authlevel; export let clusters; + export let subClusters; export let roles; let isOpen = false; @@ -93,10 +94,19 @@ }, { title: "Nodes", - requiredRole: roles.admin, + requiredRole: roles.support, href: "/monitoring/systems/", icon: "hdd-rack", perCluster: true, + listOptions: true, + menu: "Info", + }, + { + title: "Analysis", + requiredRole: roles.support, + href: "/monitoring/analysis/", + icon: "graph-up", + perCluster: true, listOptions: false, menu: "Info", }, @@ -109,15 +119,6 @@ listOptions: false, menu: "Info", }, - { - title: "Analysis", - requiredRole: roles.support, - href: "/monitoring/analysis/", - icon: "graph-up", - perCluster: true, - listOptions: false, - menu: "Info", - }, ]; @@ -138,11 +139,13 @@ {#if screenSize > 1500 || screenSize < 768} item.requiredRole <= authlevel)} /> {:else if screenSize > 1300} item.requiredRole <= authlevel && item.menu != "Info", )} @@ -156,6 +159,7 @@ @@ -168,6 +172,7 @@ {:else} item.requiredRole <= authlevel && item.menu == "none", )} @@ -180,6 +185,7 @@ item.requiredRole <= authlevel && item.menu == 'Jobs', @@ -196,6 +202,7 @@ item.requiredRole <= authlevel && item.menu == 'Groups', @@ -212,6 +219,7 @@ item.requiredRole <= authlevel && item.menu == 'Info', diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index ad9a0c7..086f25c 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -348,7 +348,6 @@ {:else if $initq?.data && $jobMetrics?.data?.jobMetrics} Selected Node - + @@ -153,18 +153,20 @@ {#if $nodeJobsData.fetching} {:else if $nodeJobsData.data} - - - Activity - - - Show List - - + + + Activity + + + Show List + + {:else} - - No currently running jobs. - + + + Activity + + {/if} @@ -189,7 +191,6 @@ {:else} ({ diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 98f23a3..f34b98b 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -655,7 +655,6 @@ {#key $mainQuery.data.stats[0].histMetrics} diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 488cdad..4086667 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -1,7 +1,8 @@ + + {#if $initq.data} + + {#if !displayNodeOverview} + + + + Metrics + + + + {#if resampleConfig} + + + + Resolution + + {#each resampleResolutions as res} + + {/each} + + + + {/if} + {/if} - + - Find Node + Find Node(s) @@ -132,20 +142,22 @@ - - - - - Metric - - - + + {#if displayNodeOverview} + + + + Metric + + {#each systemMetrics as metric} + + {/each} + + + + {/if} {/if} -
-{#if $nodesQuery.error} + + +{#if displayType !== "OVERVIEW" && displayType !== "LIST"} - {$nodesQuery.error.message} - - -{:else if $nodesQuery.fetching || $initq.fetching} - - - + Unknown displayList type! {:else} - - h.host.includes(hostnameFilter) && - h.metrics.some( - (m) => m.name == selectedMetric && m.scope == "node", - ), - ) - .map((h) => ({ - host: h.host, - subCluster: h.subCluster, - data: h.metrics.find( - (m) => m.name == selectedMetric && m.scope == "node", - ), - disabled: checkMetricDisabled( - selectedMetric, - cluster, - h.subCluster, - ), - })) - .sort((a, b) => a.host.localeCompare(b.host))} - > -

- {item.host} ({item.subCluster}) -

- {#if item.disabled === false && item.data} - c.name == cluster)} - subCluster={item.subCluster} - forNode={true} - /> - {:else if item.disabled === true && item.data} - Metric disabled for subcluster {selectedMetric}:{item.subCluster} - {:else} - No dataset returned for {selectedMetric} - {/if} -
+ {#if displayNodeOverview} + + + {:else} + + + {/if} {/if} + + { + selectedMetrics = [...detail] + }} +/> diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 57720b8..fae972b 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -308,7 +308,6 @@ {#key $stats.data.jobsStatistics[0].histMetrics} diff --git a/web/frontend/src/config.entrypoint.js b/web/frontend/src/config.entrypoint.js index feb3916..d2949f2 100644 --- a/web/frontend/src/config.entrypoint.js +++ b/web/frontend/src/config.entrypoint.js @@ -5,6 +5,7 @@ new Config({ target: document.getElementById('svelte-app'), props: { isAdmin: isAdmin, + isSupport: isSupport, isApi: isApi, username: username, ncontent: ncontent, diff --git a/web/frontend/src/config/AdminSettings.svelte b/web/frontend/src/config/AdminSettings.svelte index f512d40..dd53df4 100644 --- a/web/frontend/src/config/AdminSettings.svelte +++ b/web/frontend/src/config/AdminSettings.svelte @@ -4,7 +4,7 @@ + + diff --git a/web/frontend/src/config/admin/Options.svelte b/web/frontend/src/config/admin/Options.svelte index a1fe307..3808834 100644 --- a/web/frontend/src/config/admin/Options.svelte +++ b/web/frontend/src/config/admin/Options.svelte @@ -45,7 +45,7 @@ - Metric Plot Resampling + Metric Plot Resampling Info

Triggered at {resampleConfig.trigger} datapoints.

Configured resolutions: {resampleConfig.resolutions}

diff --git a/web/frontend/src/config/support/SupportOptions.svelte b/web/frontend/src/config/support/SupportOptions.svelte new file mode 100644 index 0000000..7d9ce03 --- /dev/null +++ b/web/frontend/src/config/support/SupportOptions.svelte @@ -0,0 +1,89 @@ + + + + + + + +
+ handleSettingSubmit("#node-paging-form", "npag")} + > + + +
Node List Paging Type
+ {#if displayMessage && message.target == "npag"}
+ Update: {message.msg} +
{/if} +
+ +
+
+ {#if config?.node_list_usePaging} + + {:else} + + {/if} + +
+
+ {#if config?.node_list_usePaging} + + {:else} + + {/if} + +
+
+ +
+
+ +
\ No newline at end of file diff --git a/web/frontend/src/config/user/UserOptions.svelte b/web/frontend/src/config/user/UserOptions.svelte index f8c1f00..be7f368 100644 --- a/web/frontend/src/config/user/UserOptions.svelte +++ b/web/frontend/src/config/user/UserOptions.svelte @@ -74,7 +74,7 @@ -
Paging Type
+
Job List Paging Type
{#if displayMessage && message.target == "pag"}
diff --git a/web/frontend/src/generic/PlotGrid.svelte b/web/frontend/src/generic/PlotGrid.svelte index 3bbee55..5152e0d 100644 --- a/web/frontend/src/generic/PlotGrid.svelte +++ b/web/frontend/src/generic/PlotGrid.svelte @@ -4,7 +4,6 @@ Properties: - `itemsPerRow Number`: Elements to render per row - `items [Any]`: List of plot components to render - - `renderFor String`: If 'job', filter disabled metrics --> -{#each rows as row} - - {#each row as item (item)} - - {#if !isPlaceholder(item)} - - {/if} - - {/each} - -{/each} + + {#each items as item} + + + + {/each} + diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 3ce9971..48b1361 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -9,12 +9,12 @@ - `height Number?`: The plot height [Default: 300] - `timestep Number`: The timestep used for X-axis rendering - `series [GraphQL.Series]`: The metric data object - - `useStatsSeries Bool?`: If this plot uses the statistics Min/Max/Median representation; automatically set to according bool [Default: null] + - `useStatsSeries Bool?`: If this plot uses the statistics Min/Max/Median representation; automatically set to according bool [Default: false] - `statisticsSeries [GraphQL.StatisticsSeries]?`: Min/Max/Median representation of metric data [Default: null] - - `cluster GraphQL.Cluster`: Cluster Object of the parent job + - `cluster String`: Cluster name of the parent job / data - `subCluster String`: Name of the subCluster of the parent job - `isShared Bool?`: If this job used shared resources; will adapt threshold indicators accordingly [Default: false] - - `forNode Bool?`: If this plot is used for node data display; will ren[data, err := metricdata.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)](https://github.com/ClusterCockpit/cc-backend/blob/9fe7cdca9215220a19930779a60c8afc910276a3/internal/graph/schema.resolvers.go#L391-L392)der x-axis as negative time with $now as maximum [Default: false] + - `forNode Bool?`: If this plot is used for node data display; will render x-axis as negative time with $now as maximum [Default: false] - `numhwthreads Number?`: Number of job HWThreads [Default: 0] - `numaccs Number?`: Number of job Accelerators [Default: 0] - `zoomState Object?`: The last zoom state to preserve on user zoom [Default: null] @@ -124,13 +124,13 @@ export let metric; export let scope = "node"; - export let width = null; + export let width = 0; export let height = 300; export let timestep; export let series; - export let useStatsSeries = null; + export let useStatsSeries = false; export let statisticsSeries = null; - export let cluster; + export let cluster = ""; export let subCluster; export let isShared = false; export let forNode = false; @@ -138,11 +138,11 @@ export let numaccs = 0; export let zoomState = null; export let thresholdState = null; + export let extendedLegendData = null; - if (useStatsSeries == null) useStatsSeries = statisticsSeries != null; - if (useStatsSeries == false && series == null) useStatsSeries = true; + if (!useStatsSeries && statisticsSeries != null) useStatsSeries = true; - const usesMeanStatsSeries = (useStatsSeries && statisticsSeries.mean.length != 0) + const usesMeanStatsSeries = (statisticsSeries?.mean && statisticsSeries.mean.length != 0) const dispatch = createEventDispatcher(); const subClusterTopology = getContext("getHardwareTopology")(cluster, subCluster); const metricConfig = getContext("getMetricConfig")(cluster, subCluster, metric); @@ -194,6 +194,7 @@ className && legendEl.classList.add(className); uPlot.assign(legendEl.style, { + minWidth: extendedLegendData ? "300px" : "100px", textAlign: "left", pointerEvents: "none", display: "none", @@ -207,11 +208,10 @@ // conditional hide series color markers: if ( - useStatsSeries === true || // Min/Max/Median Self-Explanatory + useStatsSeries || // Min/Max/Median Self-Explanatory dataSize === 1 || // Only one Y-Dataseries - dataSize > 6 + dataSize > 8 // More than 8 Y-Dataseries ) { - // More than 6 Y-Dataseries const idents = legendEl.querySelectorAll(".u-marker"); for (let i = 0; i < idents.length; i++) idents[i].style.display = "none"; @@ -237,12 +237,12 @@ function update(u) { const { left, top } = u.cursor; - const width = u.over.querySelector(".u-legend").offsetWidth; + const width = u?.over?.querySelector(".u-legend")?.offsetWidth ? u.over.querySelector(".u-legend").offsetWidth : 0; legendEl.style.transform = "translate(" + (left - width - 15) + "px, " + (top + 15) + "px)"; } - if (dataSize <= 12 || useStatsSeries === true) { + if (dataSize <= 12 || useStatsSeries) { return { hooks: { init: init, @@ -311,13 +311,6 @@ } } - const plotSeries = [ - { - label: "Runtime", - value: (u, ts, sidx, didx) => - didx == null ? null : formatTime(ts, forNode), - }, - ]; const plotData = [new Array(longestSeries)]; if (forNode === true) { // Negative Timestamp Buildup @@ -334,6 +327,15 @@ plotData[0][j] = j * timestep; } + const plotSeries = [ + // Note: X-Legend Will not be shown as soon as Y-Axis are in extendedMode + { + label: "Runtime", + value: (u, ts, sidx, didx) => + (didx == null) ? null : formatTime(ts, forNode), + } + ]; + let plotBands = undefined; if (useStatsSeries) { plotData.push(statisticsSeries.min); @@ -370,15 +372,60 @@ } else { for (let i = 0; i < series.length; i++) { plotData.push(series[i].data); - plotSeries.push({ - label: - scope === "node" + // Default + if (!extendedLegendData) { + plotSeries.push({ + label: + scope === "node" ? series[i].hostname - : scope + " #" + (i + 1), - scale: "y", - width: lineWidth, - stroke: lineColor(i, series.length), - }); + : scope === "accelerator" + ? 'Acc #' + (i + 1) // series[i].id.slice(9, 14) | Too Hardware Specific + : scope + " #" + (i + 1), + scale: "y", + width: lineWidth, + stroke: lineColor(i, series.length), + }); + } + // Extended Legend For NodeList + else { + plotSeries.push({ + label: + scope === "node" + ? series[i].hostname + : scope === "accelerator" + ? 'Acc #' + (i + 1) // series[i].id.slice(9, 14) | Too Hardware Specific + : scope + " #" + (i + 1), + scale: "y", + width: lineWidth, + stroke: lineColor(i, series.length), + values: (u, sidx, idx) => { + // "i" = "sidx - 1" : sidx contains x-axis-data + if (idx == null) + return { + time: '-', + value: '-', + user: '-', + job: '-' + }; + + if (series[i].id in extendedLegendData) { + return { + time: formatTime(plotData[0][idx], forNode), + value: plotData[sidx][idx], + user: extendedLegendData[series[i].id].user, + job: extendedLegendData[series[i].id].job, + }; + } else { + return { + time: formatTime(plotData[0][idx], forNode), + value: plotData[sidx][idx], + user: '-', + job: '-', + }; + } + } + }); + } } } @@ -434,13 +481,13 @@ u.ctx.save(); u.ctx.textAlign = "start"; // 'end' u.ctx.fillStyle = "black"; - u.ctx.fillText(textl, u.bbox.left + 10, u.bbox.top + 10); + u.ctx.fillText(textl, u.bbox.left + 10, u.bbox.top + (forNode ? 0 : 10)); u.ctx.textAlign = "end"; u.ctx.fillStyle = "black"; u.ctx.fillText( textr, u.bbox.left + u.bbox.width - 10, - u.bbox.top + 10, + u.bbox.top + (forNode ? 0 : 10), ); // u.ctx.fillText(text, u.bbox.left + u.bbox.width - 10, u.bbox.top + u.bbox.height - 10) // Recipe for bottom right @@ -498,10 +545,12 @@ }, legend: { // Display legend until max 12 Y-dataseries - show: series.length <= 12 || useStatsSeries === true ? true : false, - live: series.length <= 12 || useStatsSeries === true ? true : false, + show: series.length <= 12 || useStatsSeries, + live: series.length <= 12 || useStatsSeries, }, - cursor: { drag: { x: true, y: true } }, + cursor: { + drag: { x: true, y: true }, + } }; // RENDER HANDLING @@ -537,17 +586,9 @@ } onMount(() => { - // Setup Wrapper - if (series[0].data.length > 0) { - if (forNode) { - plotWrapper.style.paddingTop = "0.5rem" - plotWrapper.style.paddingBottom = "0.5rem" - } - plotWrapper.style.backgroundColor = backgroundColor(); - plotWrapper.style.borderRadius = "5px"; + if (plotWrapper) { + render(width, height); } - // Init Plot - render(width, height); }); onDestroy(() => { @@ -555,22 +596,20 @@ if (uplot) uplot.destroy(); }); - // This updates it on all size changes - // Condition for reactive triggering (eg scope change) - $: if (series[0].data.length > 0) { + // This updates plot on all size changes if wrapper (== data) exists + $: if (plotWrapper) { onSizeChange(width, height); } - -
- {#if series[0].data.length > 0} -
- {:else} - Cannot render plot: No series data returned for {metric} - {/if} -
- + +{#if series[0]?.data && series[0].data.length > 0} +
+{:else} + Cannot render plot: No series data returned for {metric} +{/if} diff --git a/web/frontend/src/generic/select/MetricSelection.svelte b/web/frontend/src/generic/select/MetricSelection.svelte index 2b1151e..71b42b8 100644 --- a/web/frontend/src/generic/select/MetricSelection.svelte +++ b/web/frontend/src/generic/select/MetricSelection.svelte @@ -12,7 +12,7 @@ --> @@ -175,6 +178,7 @@ + diff --git a/web/frontend/src/generic/utils.js b/web/frontend/src/generic/utils.js index 879ebd0..f63012f 100644 --- a/web/frontend/src/generic/utils.js +++ b/web/frontend/src/generic/utils.js @@ -304,8 +304,19 @@ export function stickyHeader(datatableHeaderSelector, updatePading) { export function checkMetricDisabled(m, c, s) { // [m]etric, [c]luster, [s]ubcluster const metrics = getContext("globalMetrics"); - const result = metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s) - return !result + const available = metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s) + // Return inverse logic + return !available +} + +export function checkMetricsDisabled(ma, c, s) { // [m]etric[a]rray, [c]luster, [s]ubcluster + let result = {}; + const metrics = getContext("globalMetrics"); + ma.forEach((m) => { + // Return named inverse logic: !available + result[m] = !(metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s)) + }); + return result } export function getStatsItems(presetStats = []) { diff --git a/web/frontend/src/header/NavbarLinks.svelte b/web/frontend/src/header/NavbarLinks.svelte index c99f35d..26e7370 100644 --- a/web/frontend/src/header/NavbarLinks.svelte +++ b/web/frontend/src/header/NavbarLinks.svelte @@ -3,6 +3,7 @@ Properties: - `clusters [String]`: List of cluster names + - `subClusters map[String][]string`: Map of subclusters by cluster names - `links [Object]`: Pre-filtered link objects based on user auth - `direction String?`: The direcion of the drop-down menue [default: down] --> @@ -18,45 +19,83 @@ } from "@sveltestrap/sveltestrap"; export let clusters; + export let subClusters; export let links; export let direction = "down"; {#each links as item} {#if item.listOptions} - - - - {item.title} - - - - All Clusters - - - {#each clusters as cluster} - - - {cluster.name} - - - - Running Jobs - - - - {/each} - - + {#if item.title === 'Nodes'} + + + + {item.title} + + + {#each clusters as cluster} + + + {cluster.name} + + + + Node Overview + + + Node List + + {#each subClusters[cluster.name] as subCluster} + + {subCluster} Node List + + {/each} + + + {/each} + + + {:else} + + + + {item.title} + + + + All Clusters + + + {#each clusters as cluster} + + + {cluster.name} + + + + Running Jobs + + + + {/each} + + + {/if} {:else if !item.perCluster} {item.title} + + + + +
+ + + + + + {#each selectedMetrics as metric (metric)} + + {/each} + + + + {#if $nodesQuery.error} + + + {$nodesQuery.error.message} + + + {:else} + {#each nodes as nodeData} + + {:else} + + + + {/each} + {/if} + {#if $nodesQuery.fetching || !$nodesQuery.data} + + + + {/if} + +
+ {cluster} Node Info + {#if $nodesQuery.fetching} + + {/if} + + {metric} ({systemUnits[metric]}) +
No nodes found
+
+

+ Loading nodes {nodes.length + 1} to + { matchedNodes + ? `${(nodes.length + paging.itemsPerPage) > matchedNodes ? matchedNodes : (nodes.length + paging.itemsPerPage)} of ${matchedNodes} total` + : (nodes.length + paging.itemsPerPage) + } +

+ +
+
+
+
+ +{#if usePaging} + { + if (detail.itemsPerPage != itemsPerPage) { + updateConfiguration(detail.itemsPerPage.toString(), detail.page); + } else { + nodes = [] + paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }; + } + }} + /> +{/if} + + diff --git a/web/frontend/src/systems/NodeOverview.svelte b/web/frontend/src/systems/NodeOverview.svelte new file mode 100644 index 0000000..68ccd78 --- /dev/null +++ b/web/frontend/src/systems/NodeOverview.svelte @@ -0,0 +1,155 @@ + + + + +{#if $nodesQuery.error} + + + {$nodesQuery.error.message} + + +{:else if $nodesQuery.fetching } + + + + + +{:else if filteredData?.length > 0} + + + {#each filteredData as item (item.host)} + +

+ {item.host} ({item.subCluster}) +

+ {#if item?.disabled[selectedMetric]} + Metric disabled for subcluster {selectedMetric}:{item.subCluster} + {:else} + + + {/if} + + {/each} +
+{/if} \ No newline at end of file diff --git a/web/frontend/src/systems/nodelist/NodeInfo.svelte b/web/frontend/src/systems/nodelist/NodeInfo.svelte new file mode 100644 index 0000000..ad6c98e --- /dev/null +++ b/web/frontend/src/systems/nodelist/NodeInfo.svelte @@ -0,0 +1,177 @@ + + + + + + +
+
+ Node + + {hostname} + +
+
+
+
+ {cluster} {subCluster} +
+
+
+ + {#if healthWarn} + + + + + + Status + + + + {:else if metricWarn} + + + + + + Status + + + + {:else if nodeJobsData.jobs.count == 1 && nodeJobsData.jobs.items[0].exclusive} + + + + + + Status + + + + {:else if nodeJobsData.jobs.count >= 1 && !nodeJobsData.jobs.items[0].exclusive} + + + + + + Status + + + + {:else} + + + + + + Status + + + + {/if} +
+ + + + + + + Activity + + + + + List + + + + + + + + + Users + + + + + List + + + {#if userList?.length > 0} + +
+ {userList.join(", ")} +
+
+ {/if} + + + + + + + Projects + + + + + List + + + {#if projectList?.length > 0} + +
+ {projectList.join(", ")} +
+
+ {/if} +
+
+ diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte new file mode 100644 index 0000000..a1e4a54 --- /dev/null +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -0,0 +1,187 @@ + + + + + + + {#if $nodeJobsData.fetching} + + + + + + {:else} + + {/if} + + {#each refinedData as metricData (metricData.data.name)} + {#key metricData} + + {#if metricData?.disabled} + Metric disabled for subcluster {metricData.data.name}:{nodeData.subCluster} + {:else if !!metricData.data?.metric.statisticsSeries} + + +
+ {#key extendedLegendData} + + {/key} + {:else} + + {/if} + + {/key} + {/each} + diff --git a/web/templates/base.tmpl b/web/templates/base.tmpl index 2464d7f..358f926 100644 --- a/web/templates/base.tmpl +++ b/web/templates/base.tmpl @@ -15,10 +15,11 @@ {{end}} diff --git a/web/templates/config.tmpl b/web/templates/config.tmpl index 914dc88..0222da7 100644 --- a/web/templates/config.tmpl +++ b/web/templates/config.tmpl @@ -8,6 +8,7 @@ {{define "javascript"}} {{end}} diff --git a/web/web.go b/web/web.go index 1cfa176..45d8646 100644 --- a/web/web.go +++ b/web/web.go @@ -13,6 +13,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/util" + "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -95,6 +96,7 @@ type Page struct { Roles map[string]schema.Role // Available roles for frontend render checks Build Build // Latest information about the application Clusters []schema.ClusterConfig // List of all clusters for use in the Header + SubClusters map[string][]string // Map per cluster of all subClusters for use in the Header FilterPresets map[string]interface{} // For pages with the Filter component, this can be used to set initial filters. Infos map[string]interface{} // For generic use (e.g. username for /monitoring/user/, job id for /monitoring/job/) Config map[string]interface{} // UI settings for the currently logged in user (e.g. line width, ...) @@ -114,6 +116,15 @@ func RenderTemplate(rw http.ResponseWriter, file string, page *Page) { } } + if page.SubClusters == nil { + page.SubClusters = make(map[string][]string) + for _, cluster := range archive.Clusters { + for _, sc := range cluster.SubClusters { + page.SubClusters[cluster.Name] = append(page.SubClusters[cluster.Name], sc.Name) + } + } + } + log.Debugf("Page config : %v\n", page.Config) if err := t.Execute(rw, page); err != nil { log.Errorf("Template error: %s", err.Error())