add scopes, paging and backend filtering to nodeList

This commit is contained in:
Christoph Kluge 2025-01-09 18:56:50 +01:00
parent e871703724
commit 2a3383e9e6
17 changed files with 2300 additions and 565 deletions

View File

@ -194,6 +194,15 @@ type NodeMetrics {
metrics: [JobMetricWithName!]! metrics: [JobMetricWithName!]!
} }
type NodesResultList {
items: [NodeMetrics!]!
offset: Int
limit: Int
count: Int
totalNodes: Int
hasNextPage: Boolean
}
type ClusterSupport { type ClusterSupport {
cluster: String! cluster: String!
subClusters: [String!]! subClusters: [String!]!
@ -241,6 +250,7 @@ type Query {
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]!
nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList!
} }
type Mutation { type Mutation {

File diff suppressed because it is too large Load Diff

View File

@ -148,6 +148,15 @@ type NodeMetrics struct {
Metrics []*JobMetricWithName `json:"metrics"` Metrics []*JobMetricWithName `json:"metrics"`
} }
type NodesResultList struct {
Items []*NodeMetrics `json:"items"`
Offset *int `json:"offset,omitempty"`
Limit *int `json:"limit,omitempty"`
Count *int `json:"count,omitempty"`
TotalNodes *int `json:"totalNodes,omitempty"`
HasNextPage *bool `json:"hasNextPage,omitempty"`
}
type OrderByInput struct { type OrderByInput struct {
Field string `json:"field"` Field string `json:"field"`
Type string `json:"type"` Type string `json:"type"`

View File

@ -2,7 +2,7 @@ package graph
// This file will be automatically regenerated based on the schema, any resolver implementations // This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end. // will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.49 // Code generated by github.com/99designs/gqlgen version v0.17.57
import ( import (
"context" "context"
@ -466,6 +466,68 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
return nodeMetrics, nil return nodeMetrics, nil
} }
// NodeMetricsList is the resolver for the nodeMetricsList field.
func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) {
if resolution == nil { // Load from Config
if config.Keys.EnableResampling != nil {
defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions)
resolution = &defaultRes
} else { // Set 0 (Loads configured metric timestep)
defaultRes := 0
resolution = &defaultRes
}
}
user := repository.GetUserFromContext(ctx)
if user != nil && !user.HasRole(schema.RoleAdmin) {
return nil, errors.New("you need to be an administrator for this query")
}
if metrics == nil {
for _, mc := range archive.GetCluster(cluster).MetricConfig {
metrics = append(metrics, mc.Name)
}
}
data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx)
if err != nil {
log.Warn("error while loading node data")
return nil, err
}
nodeMetricsList := make([]*model.NodeMetrics, 0, len(data))
for hostname, metrics := range data {
host := &model.NodeMetrics{
Host: hostname,
Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
}
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
if err != nil {
log.Warnf("error in nodeMetrics resolver: %s", err)
}
for metric, scopedMetrics := range metrics {
for scope, scopedMetric := range scopedMetrics {
host.Metrics = append(host.Metrics, &model.JobMetricWithName{
Name: metric,
Scope: scope,
Metric: scopedMetric,
})
}
}
nodeMetricsList = append(nodeMetricsList, host)
}
nodeMetricsListResult := &model.NodesResultList{
Items: nodeMetricsList,
TotalNodes: &totalNodes,
HasNextPage: &hasNextPage,
}
return nodeMetricsListResult, nil
}
// NumberOfNodes is the resolver for the numberOfNodes field. // NumberOfNodes is the resolver for the numberOfNodes field.
func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) { func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) {
nodeList, err := archive.ParseNodeList(obj.Nodes) nodeList, err := archive.ParseNodeList(obj.Nodes)
@ -493,11 +555,9 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
// SubCluster returns generated.SubClusterResolver implementation. // SubCluster returns generated.SubClusterResolver implementation.
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
type ( type clusterResolver struct{ *Resolver }
clusterResolver struct{ *Resolver } type jobResolver struct{ *Resolver }
jobResolver struct{ *Resolver } type metricValueResolver struct{ *Resolver }
metricValueResolver struct{ *Resolver } type mutationResolver struct{ *Resolver }
mutationResolver struct{ *Resolver } type queryResolver struct{ *Resolver }
queryResolver struct{ *Resolver } type subClusterResolver struct{ *Resolver }
subClusterResolver struct{ *Resolver }
)

View File

@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
@ -219,7 +220,7 @@ func LoadAverages(
return nil return nil
} }
// Used for the node/system view. Returns a map of nodes to a map of metrics. // Used for the classic node/system view. Returns a map of nodes to a map of metrics.
func LoadNodeData( func LoadNodeData(
cluster string, cluster string,
metrics, nodes []string, metrics, nodes []string,
@ -254,3 +255,40 @@ func LoadNodeData(
return data, nil return data, nil
} }
func LoadNodeListData(
cluster, subCluster, nodeFilter string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
from, to time.Time,
page *model.PageRequest,
ctx context.Context,
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
repo, err := metricdata.GetMetricDataRepo(cluster)
if err != nil {
return nil, 0, false, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
}
if metrics == nil {
for _, m := range archive.GetCluster(cluster).MetricConfig {
metrics = append(metrics, m.Name)
}
}
data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx)
if err != nil {
if len(data) != 0 {
log.Warnf("partial error: %s", err.Error())
} else {
log.Error("Error while loading node data from metric repository")
return nil, totalNodes, hasNextPage, err
}
}
if data == nil {
return nil, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
}
return data, totalNodes, hasNextPage, nil
}

View File

@ -11,6 +11,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -44,7 +45,6 @@ type CCMetricStore struct {
type ApiQueryRequest struct { type ApiQueryRequest struct {
Cluster string `json:"cluster"` Cluster string `json:"cluster"`
Queries []ApiQuery `json:"queries"` Queries []ApiQuery `json:"queries"`
NodeQuery NodeQuery `json:"node-query"`
ForAllNodes []string `json:"for-all-nodes"` ForAllNodes []string `json:"for-all-nodes"`
From int64 `json:"from"` From int64 `json:"from"`
To int64 `json:"to"` To int64 `json:"to"`
@ -63,19 +63,6 @@ type ApiQuery struct {
Aggregate bool `json:"aggreg"` Aggregate bool `json:"aggreg"`
} }
type NodeQuery struct {
Type *string `json:"type,omitempty"`
SubType *string `json:"subtype,omitempty"`
Metrics []string `json:"metrics"`
NodeFilter string `json:"node-filter"`
Resolution int `json:"resolution"`
TypeIds []string `json:"type-ids,omitempty"`
SubTypeIds []string `json:"subtype-ids,omitempty"`
Aggregate bool `json:"aggreg"`
Page int `json:"page"`
ItemsPerPage int `json:"items-per-page"`
}
type ApiQueryResponse struct { type ApiQueryResponse struct {
Queries []ApiQuery `json:"queries,omitempty"` Queries []ApiQuery `json:"queries,omitempty"`
Results [][]ApiMetricData `json:"results"` Results [][]ApiMetricData `json:"results"`
@ -712,9 +699,13 @@ func (ccms *CCMetricStore) LoadNodeListData(
scopes []schema.MetricScope, scopes []schema.MetricScope,
resolution int, resolution int,
from, to time.Time, from, to time.Time,
page model.PageRequest, page *model.PageRequest,
ctx context.Context, ctx context.Context,
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) { ) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
// 0) Init additional vars
var totalNodes int = 0
var hasNextPage bool = false
// 1) Get list of all nodes // 1) Get list of all nodes
var nodes []string var nodes []string
@ -728,8 +719,6 @@ func (ccms *CCMetricStore) LoadNodeListData(
} }
} }
log.Debugf(">> SEE HERE: NODES (All)! %v (Len: %d)", nodes, len(nodes))
// 2) Filter nodes // 2) Filter nodes
if nodeFilter != "" { if nodeFilter != "" {
filteredNodes := []string{} filteredNodes := []string{}
@ -741,7 +730,9 @@ func (ccms *CCMetricStore) LoadNodeListData(
nodes = filteredNodes nodes = filteredNodes
} }
log.Debugf(">> SEE HERE: NODES (Filtered)! %v (Len: %d)", nodes, len(nodes)) // 2.1) Count total nodes && Sort nodes -> Sorting invalidated after ccms return ...
totalNodes = len(nodes)
sort.Strings(nodes)
// 3) Apply paging // 3) Apply paging
if len(nodes) > page.ItemsPerPage { if len(nodes) > page.ItemsPerPage {
@ -749,16 +740,17 @@ func (ccms *CCMetricStore) LoadNodeListData(
end := start + page.ItemsPerPage end := start + page.ItemsPerPage
if end > len(nodes) { if end > len(nodes) {
end = len(nodes) end = len(nodes)
hasNextPage = false
} else {
hasNextPage = true
} }
nodes = nodes[start:end] nodes = nodes[start:end]
} }
log.Debugf(">> SEE HERE: NODES (Paged)! %v (Len: %d)", nodes, len(nodes))
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
if err != nil { if err != nil {
log.Warn("Error while building queries") log.Warn("Error while building queries")
return nil, err return nil, totalNodes, hasNextPage, err
} }
req := ApiQueryRequest{ req := ApiQueryRequest{
@ -773,7 +765,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
resBody, err := ccms.doRequest(ctx, &req) resBody, err := ccms.doRequest(ctx, &req)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) log.Error(fmt.Sprintf("Error while performing request %#v\n", err))
return nil, err return nil, totalNodes, hasNextPage, err
} }
var errors []string var errors []string
@ -795,14 +787,27 @@ func (ccms *CCMetricStore) LoadNodeListData(
res = mc.Timestep res = mc.Timestep
} }
nodeMetric, ok := data[query.Hostname][metric][scope] // Init Nested Map Data Structures If Not Found
hostData, ok := data[query.Hostname]
if !ok { if !ok {
nodeMetric = &schema.JobMetric{ hostData = make(map[string]map[schema.MetricScope]*schema.JobMetric)
data[query.Hostname] = hostData
}
metricData, ok := hostData[metric]
if !ok {
metricData = make(map[schema.MetricScope]*schema.JobMetric)
data[query.Hostname][metric] = metricData
}
scopeData, ok := metricData[scope]
if !ok {
scopeData = &schema.JobMetric{
Unit: mc.Unit, Unit: mc.Unit,
Timestep: res, Timestep: res,
Series: make([]schema.Series, 0), Series: make([]schema.Series, 0),
} }
data[query.Hostname][metric][scope] = nodeMetric data[query.Hostname][metric][scope] = scopeData
} }
for ndx, res := range row { for ndx, res := range row {
@ -825,7 +830,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
res.Max = schema.Float(0) res.Max = schema.Float(0)
} }
nodeMetric.Series = append(nodeMetric.Series, schema.Series{ scopeData.Series = append(scopeData.Series, schema.Series{
Hostname: query.Hostname, Hostname: query.Hostname,
Id: id, Id: id,
Statistics: schema.MetricStatistics{ Statistics: schema.MetricStatistics{
@ -840,12 +845,10 @@ func (ccms *CCMetricStore) LoadNodeListData(
if len(errors) != 0 { if len(errors) != 0 {
/* Returns list of "partial errors" */ /* Returns list of "partial errors" */
return data, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) return data, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
} }
log.Debugf(">> SEE HERE: DATA (Final)! %v (Len: %d)", data, len(data)) return data, totalNodes, hasNextPage, nil
return data, nil
} }
func (ccms *CCMetricStore) buildNodeQueries( func (ccms *CCMetricStore) buildNodeQueries(

View File

@ -320,12 +320,14 @@ func (idb *InfluxDBv2DataRepository) LoadNodeListData(
scopes []schema.MetricScope, scopes []schema.MetricScope,
resolution int, resolution int,
from, to time.Time, from, to time.Time,
page model.PageRequest, page *model.PageRequest,
ctx context.Context, ctx context.Context,
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) { ) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
var totalNodes int = 0
var hasNextPage bool = false
// TODO : Implement to be used in NodeList-View // TODO : Implement to be used in NodeList-View
log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
} }

View File

@ -31,7 +31,7 @@ type MetricDataRepository interface {
LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error)
// Return a map of hosts to a map of metrics to a map of scopes for multiple nodes. // Return a map of hosts to a map of metrics to a map of scopes for multiple nodes.
LoadNodeListData(cluster, subCluster, nodeFilter string, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, page model.PageRequest, ctx context.Context) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) LoadNodeListData(cluster, subCluster, nodeFilter string, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, page *model.PageRequest, ctx context.Context) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error)
} }
var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{} var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{}

View File

@ -454,12 +454,14 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
scopes []schema.MetricScope, scopes []schema.MetricScope,
resolution int, resolution int,
from, to time.Time, from, to time.Time,
page model.PageRequest, page *model.PageRequest,
ctx context.Context, ctx context.Context,
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) { ) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
var totalNodes int = 0
var hasNextPage bool = false
// TODO : Implement to be used in NodeList-View // TODO : Implement to be used in NodeList-View
log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository") return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository")
} }

View File

@ -57,9 +57,9 @@ func (tmdr *TestMetricDataRepository) LoadNodeListData(
scopes []schema.MetricScope, scopes []schema.MetricScope,
resolution int, resolution int,
from, to time.Time, from, to time.Time,
page model.PageRequest, page *model.PageRequest,
ctx context.Context, ctx context.Context,
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) { ) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
panic("TODO") panic("TODO")
} }

View File

@ -44,6 +44,7 @@ var routes []Route = []Route{
{"/monitoring/user/{id}", "monitoring/user.tmpl", "User <ID> - ClusterCockpit", true, setupUserRoute}, {"/monitoring/user/{id}", "monitoring/user.tmpl", "User <ID> - ClusterCockpit", true, setupUserRoute},
{"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> Overview - ClusterCockpit", false, setupClusterOverviewRoute}, {"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> Overview - ClusterCockpit", false, setupClusterOverviewRoute},
{"/monitoring/systems/list/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> List - ClusterCockpit", false, setupClusterListRoute}, {"/monitoring/systems/list/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> List - ClusterCockpit", false, setupClusterListRoute},
{"/monitoring/systems/list/{cluster}/{subcluster}", "monitoring/systems.tmpl", "Cluster <ID> List - ClusterCockpit", false, setupClusterListRoute},
{"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node <ID> - ClusterCockpit", false, setupNodeRoute}, {"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node <ID> - ClusterCockpit", false, setupNodeRoute},
{"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analysis - ClusterCockpit", true, setupAnalysisRoute}, {"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analysis - ClusterCockpit", true, setupAnalysisRoute},
{"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of <ID> - ClusterCockpit", false, setupClusterStatusRoute}, {"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of <ID> - ClusterCockpit", false, setupClusterStatusRoute},
@ -142,6 +143,7 @@ func setupClusterListRoute(i InfoType, r *http.Request) InfoType {
vars := mux.Vars(r) vars := mux.Vars(r)
i["id"] = vars["cluster"] i["id"] = vars["cluster"]
i["cluster"] = vars["cluster"] i["cluster"] = vars["cluster"]
i["subCluster"] = vars["subcluster"]
i["displayType"] = "LIST" i["displayType"] = "LIST"
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to") from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")

View File

@ -10,7 +10,6 @@
<script> <script>
import { getContext } from "svelte"; import { getContext } from "svelte";
import { queryStore, gql, getContextClient } from "@urql/svelte"
import { import {
Row, Row,
Col, Col,
@ -20,10 +19,9 @@
InputGroupText, InputGroupText,
Icon, Icon,
Button, Button,
Spinner,
} from "@sveltestrap/sveltestrap"; } from "@sveltestrap/sveltestrap";
import { init, checkMetricsDisabled } from "./generic/utils.js"; import { init } from "./generic/utils.js";
import NodeOverview from "./systems/NodeOverview.svelte"; import NodeOverview from "./systems/NodeOverview.svelte";
import NodeList from "./systems/NodeList.svelte"; import NodeList from "./systems/NodeList.svelte";
import MetricSelection from "./generic/select/MetricSelection.svelte"; import MetricSelection from "./generic/select/MetricSelection.svelte";
@ -32,6 +30,7 @@
export let displayType; export let displayType;
export let cluster; export let cluster;
export let subCluster = "";
export let from = null; export let from = null;
export let to = null; export let to = null;
@ -45,7 +44,7 @@
if (from == null || to == null) { if (from == null || to == null) {
to = new Date(Date.now()); to = new Date(Date.now());
from = new Date(to.getTime()); from = new Date(to.getTime());
from.setHours(from.getHours() - 2); from.setHours(from.getHours() - 12);
} }
const initialized = getContext("initialized"); const initialized = getContext("initialized");
@ -58,79 +57,15 @@
let selectedMetrics = ccconfig[`node_list_selectedMetrics:${cluster}`] || [ccconfig.system_view_selectedMetric]; let selectedMetrics = ccconfig[`node_list_selectedMetrics:${cluster}`] || [ccconfig.system_view_selectedMetric];
let isMetricsSelectionOpen = false; let isMetricsSelectionOpen = false;
// New Jan 2025
/* /*
- Toss "add_resolution_node_systems" branch OR include/merge here if resolutions in node-overview useful Note 1: Scope Selector or Auto-Scoped?
- Add single object field for nodeData query to CCMS query: "nodeDataQuery" Note 2: "Sorting" as use-case ignored for now, probably default to alphanumerical on hostnames of cluster
- Contains following fields: Note 3: Add Idle State Filter (== No allocated Jobs) [Frontend?] : Cannot be handled by CCMS, requires secondary job query and refiltering of visible nodes
- metrics: [String] // List of metrics to query
- page: Int // Page number
- itemsPerPage: Int // Number of items per page
- resolution: Int // Requested Resolution for all returned data
- nodeFilter: String // (partial) hostname string
- With this, all use-cases except "scopes" can be handled, if nodeFilter is "" (empty) all nodes are returned by default
- Is basically a stepped up version of the "forAllNodes" property, as "these metrics for all nodes" is still the base idea
- Required: Handling in CCMS, co-develop in close contact with Aditya
- Question: How and where to handle scope queries? (e.g. "node" vs "accelerator") -> NOT handled in ccms!
- NOtes: "Sorting" as use-case ignored for now, probably default to alphanumerical on hostnames of cluster
*/ */
// Todo: Add Idle State Filter (== No allocated Jobs) [Frontend?] : Cannot be handled by CCMS, requires secondary job query and refiltering of visible nodes
// Todo: NodeList: Mindestens Accelerator Scope ... "Show Detail" Switch?
// Todo: Rework GQL Query: Add Paging (Scrollable / Paging Configbar), Add Nodes Filter (see jobs-onthefly-userfilter: ccms inkompatibel!), add scopes
// All three issues need either new features in ccms (paging, filter) or new implementation of ccms node queries with scopes (currently very job-specific)
// Todo: Review performance // observed high client-side load frequency
// Is Svelte {#each} -> <MetricPlot/> -> onMount() related : Cannot be skipped ...
// Will be solved as soon as dedicated paging, itemLimits and filtering is implemented in ccms
// ==> Skip for Q4/24 Release, build from ccms upgrade (paging/filter) up
const client = getContextClient();
const nodeQuery = gql`
query ($cluster: String!, $metrics: [String!], $from: Time!, $to: Time!) {
nodeMetrics(
cluster: $cluster
metrics: $metrics
from: $from
to: $to
) {
host
subCluster
metrics {
name
scope
metric {
timestep
unit {
base
prefix
}
series {
statistics {
min
avg
max
}
data
}
}
}
}
}
`
$: nodesQuery = queryStore({
client: client,
query: nodeQuery,
variables: {
cluster: cluster,
metrics: selectedMetrics,
from: from.toISOString(),
to: to.toISOString(),
},
});
let systemMetrics = []; let systemMetrics = [];
let systemUnits = {}; let systemUnits = {};
function loadMetrics(isInitialized) { function loadMetrics(isInitialized) {
if (!isInitialized) return if (!isInitialized) return
systemMetrics = [...globalMetrics.filter((gm) => gm?.availability.find((av) => av.cluster == cluster))] systemMetrics = [...globalMetrics.filter((gm) => gm?.availability.find((av) => av.cluster == cluster))]
@ -145,43 +80,6 @@
selectedMetrics = [selectedMetric] selectedMetrics = [selectedMetric]
} }
let rawData = []
$: if ($initq.data && $nodesQuery?.data) {
rawData = $nodesQuery?.data?.nodeMetrics.filter((h) => {
if (h.subCluster === '') { // Exclude nodes with empty subCluster field
console.warn('subCluster not configured for node', h.host)
return false
} else {
return h.metrics.some(
(m) => selectedMetrics.includes(m.name) && m.scope == "node",
)
}
})
}
let mappedData = []
$: if (rawData?.length > 0) {
mappedData = rawData.map((h) => ({
host: h.host,
subCluster: h.subCluster,
data: h.metrics.filter(
(m) => selectedMetrics.includes(m.name) && m.scope == "node",
),
disabled: checkMetricsDisabled(
selectedMetrics,
cluster,
h.subCluster,
),
}))
.sort((a, b) => a.host.localeCompare(b.host))
}
let filteredData = []
$: if (mappedData?.length > 0) {
filteredData = mappedData.filter((h) =>
h.host.includes(hostnameFilter)
)
}
</script> </script>
<!-- ROW1: Tools--> <!-- ROW1: Tools-->
@ -255,25 +153,13 @@
<Card body color="danger">Unknown displayList type! </Card> <Card body color="danger">Unknown displayList type! </Card>
</Col> </Col>
</Row> </Row>
{:else if $nodesQuery.error} {:else}
<Row>
<Col>
<Card body color="danger">{$nodesQuery.error.message}</Card>
</Col>
</Row>
{:else if $nodesQuery.fetching }
<Row>
<Col>
<Spinner />
</Col>
</Row>
{:else if filteredData?.length > 0}
{#if displayNodeOverview} {#if displayNodeOverview}
<!-- ROW2-1: Node Overview (Grid Included)--> <!-- ROW2-1: Node Overview (Grid Included)-->
<NodeOverview {cluster} {ccconfig} data={filteredData}/> <NodeOverview {cluster} {subCluster} {ccconfig} {selectedMetrics} {from} {to} {hostnameFilter}/>
{:else} {:else}
<!-- ROW2-2: Node List (Grid Included)--> <!-- ROW2-2: Node List (Grid Included)-->
<NodeList {cluster} {selectedMetrics} {systemUnits} data={filteredData} bind:selectedMetric/> <NodeList {cluster} {subCluster} {ccconfig} {selectedMetrics} {hostnameFilter} {from} {to} {systemUnits}/>
{/if} {/if}
{/if} {/if}

View File

@ -553,7 +553,7 @@
</script> </script>
<!-- Define $width Wrapper and NoData Card --> <!-- Define $width Wrapper and NoData Card -->
{#if series[0].data.length > 0} {#if series[0]?.data && series[0].data.length > 0}
<div bind:this={plotWrapper} bind:clientWidth={width} <div bind:this={plotWrapper} bind:clientWidth={width}
style="background-color: {backgroundColor()};" class={forNode ? 'py-2 rounded' : 'rounded'} style="background-color: {backgroundColor()};" class={forNode ? 'py-2 rounded' : 'rounded'}
/> />

View File

@ -6,6 +6,7 @@ new Systems({
props: { props: {
displayType: displayType, displayType: displayType,
cluster: infos.cluster, cluster: infos.cluster,
subCluster: infos.subCluster,
from: infos.from, from: infos.from,
to: infos.to to: infos.to
}, },

View File

@ -3,68 +3,168 @@
Properties: Properties:
- `cluster String`: The nodes' cluster - `cluster String`: The nodes' cluster
- `data [Object]`: The node data array for all nodes - `subCluster String`: The nodes' subCluster
- `ccconfig Object?`: The ClusterCockpit Config Context [Default: null]
- `selectedMetrics [String]`: The array of selected metrics - `selectedMetrics [String]`: The array of selected metrics
- `selectedMetrics Object`: The object of metric units - `systemUnits Object`: The object of metric units
--> -->
<script> <script>
import { Row, Table } from "@sveltestrap/sveltestrap"; import { queryStore, gql, getContextClient } from "@urql/svelte";
import { import { Row, Col, Card, Table, Spinner } from "@sveltestrap/sveltestrap";
stickyHeader import { init, stickyHeader } from "../generic/utils.js";
} from "../generic/utils.js";
import NodeListRow from "./nodelist/NodeListRow.svelte"; import NodeListRow from "./nodelist/NodeListRow.svelte";
import Pagination from "../generic/joblist/Pagination.svelte";
export let cluster; export let cluster;
export let data = null; export let subCluster = "";
export const ccconfig = null;
export let selectedMetrics = []; export let selectedMetrics = [];
export let hostnameFilter = "";
export let systemUnits = null; export let systemUnits = null;
export let from = null;
export let to = null;
// Always use ONE BIG list, but: Make copyable markers -> Nodeinfo ! (like in markdown) // let usePaging = ccconfig.node_list_usePaging
let itemsPerPage = 10 // usePaging ? ccconfig.node_list_jobsPerPage : 10;
let page = 1;
let paging = { itemsPerPage, page };
let headerPaddingTop = 0; let headerPaddingTop = 0;
stickyHeader( stickyHeader(
".cc-table-wrapper > table.table >thead > tr > th.position-sticky:nth-child(1)", ".cc-table-wrapper > table.table >thead > tr > th.position-sticky:nth-child(1)",
(x) => (headerPaddingTop = x), (x) => (headerPaddingTop = x),
); );
const { query: initq } = init();
const client = getContextClient();
const nodeListQuery = gql`
query ($cluster: String!, $subCluster: String!, $nodeFilter: String!, $metrics: [String!], $scopes: [MetricScope!]!, $from: Time!, $to: Time!, $paging: PageRequest!) {
nodeMetricsList(
cluster: $cluster
subCluster: $subCluster
nodeFilter: $nodeFilter
scopes: $scopes
metrics: $metrics
from: $from
to: $to
page: $paging
) {
items {
host
subCluster
metrics {
name
scope
metric {
timestep
unit {
base
prefix
}
series {
statistics {
min
avg
max
}
data
}
}
}
}
totalNodes
hasNextPage
}
}
`
$: nodesQuery = queryStore({
client: client,
query: nodeListQuery,
variables: {
cluster: cluster,
subCluster: subCluster,
nodeFilter: hostnameFilter,
scopes: ["core", "accelerator"],
metrics: selectedMetrics,
from: from.toISOString(),
to: to.toISOString(),
paging: paging,
},
});
$: matchedNodes = $nodesQuery.data?.nodeMetricsList.totalNodes || 0;
</script> </script>
<Row> {#if $nodesQuery.error}
<div class="col cc-table-wrapper"> <Row>
<Table cellspacing="0px" cellpadding="0px"> <Col>
<thead> <Card body color="danger">{$nodesQuery.error.message}</Card>
<tr> </Col>
<th </Row>
class="position-sticky top-0 text-capitalize" {:else if $nodesQuery.fetching }
scope="col" <Row>
style="padding-top: {headerPaddingTop}px;" <Col>
> <Spinner />
{cluster} Node Info </Col>
</th> </Row>
{:else if $initq?.data && $nodesQuery?.data}
{#each selectedMetrics as metric (metric)} <Row>
<th <div class="col cc-table-wrapper">
class="position-sticky top-0 text-center" <Table cellspacing="0px" cellpadding="0px">
scope="col" <thead>
style="padding-top: {headerPaddingTop}px"
>
{metric} ({systemUnits[metric]})
</th>
{/each}
</tr>
</thead>
<tbody>
{#each data as nodeData (nodeData.host)}
<NodeListRow {nodeData} {cluster} {selectedMetrics}/>
{:else}
<tr> <tr>
<td>No nodes found </td> <th
class="position-sticky top-0 text-capitalize"
scope="col"
style="padding-top: {headerPaddingTop}px;"
>
{cluster} Node Info
</th>
{#each selectedMetrics as metric (metric)}
<th
class="position-sticky top-0 text-center"
scope="col"
style="padding-top: {headerPaddingTop}px"
>
{metric} ({systemUnits[metric]})
</th>
{/each}
</tr> </tr>
{/each} </thead>
</tbody> <tbody>
</Table> {#each $nodesQuery.data.nodeMetricsList.items as nodeData (nodeData.host)}
</div> <NodeListRow {nodeData} {cluster} {selectedMetrics}/>
</Row> {:else}
<tr>
<td>No nodes found </td>
</tr>
{/each}
</tbody>
</Table>
</div>
</Row>
{/if}
{#if true} <!-- usePaging -->
<Pagination
bind:page
{itemsPerPage}
itemText="Nodes"
totalItems={matchedNodes}
on:update-paging={({ detail }) => {
paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }
// if (detail.itemsPerPage != itemsPerPage) {
// updateConfiguration(detail.itemsPerPage.toString(), detail.page);
// } else {
// // nodes = []
// paging = { itemsPerPage: detail.itemsPerPage, page: detail.page };
// }
}}
/>
{/if}
<style> <style>
.cc-table-wrapper { .cc-table-wrapper {

View File

@ -3,50 +3,153 @@
Properties: Properties:
- `ccconfig Object?`: The ClusterCockpit Config Context [Default: null] - `ccconfig Object?`: The ClusterCockpit Config Context [Default: null]
- `data Object?`: The GQL nodeMetrics data [Default: null]
- `cluster String`: The cluster to show status information for - `cluster String`: The cluster to show status information for
- `selectedMetric String?`: The selectedMetric input [Default: ""] - `selectedMetric String?`: The selectedMetric input [Default: ""]
--> -->
<script> <script>
import { Row, Col, Card } from "@sveltestrap/sveltestrap"; import { queryStore, gql, getContextClient } from "@urql/svelte";
import { Row, Col, Card, Spinner } from "@sveltestrap/sveltestrap";
import { init, checkMetricsDisabled } from "../generic/utils.js";
import MetricPlot from "../generic/plots/MetricPlot.svelte"; import MetricPlot from "../generic/plots/MetricPlot.svelte";
export let ccconfig = null; export let ccconfig = null;
export let data = null;
export let cluster = ""; export let cluster = "";
export let selectedMetric = ""; export const subCluster = "";
export let selectedMetrics = null;
export let hostnameFilter = "";
export let from = null;
export let to = null;
const { query: initq } = init();
const client = getContextClient();
const nodeQuery = gql`
query ($cluster: String!, $metrics: [String!], $from: Time!, $to: Time!) {
nodeMetrics(
cluster: $cluster
metrics: $metrics
from: $from
to: $to
) {
host
subCluster
metrics {
name
scope
metric {
timestep
unit {
base
prefix
}
series {
statistics {
min
avg
max
}
data
}
}
}
}
}
`
$: selectedMetric = selectedMetrics[0] ? selectedMetrics[0] : "";
$: nodesQuery = queryStore({
client: client,
query: nodeQuery,
variables: {
cluster: cluster,
metrics: selectedMetrics,
from: from.toISOString(),
to: to.toISOString(),
},
});
let rawData = []
$: if ($initq.data && $nodesQuery?.data) {
rawData = $nodesQuery?.data?.nodeMetrics.filter((h) => {
if (h.subCluster === '') { // Exclude nodes with empty subCluster field
console.warn('subCluster not configured for node', h.host)
return false
} else {
return h.metrics.some(
(m) => selectedMetrics.includes(m.name) && m.scope == "node",
)
}
})
}
let mappedData = []
$: if (rawData?.length > 0) {
mappedData = rawData.map((h) => ({
host: h.host,
subCluster: h.subCluster,
data: h.metrics.filter(
(m) => selectedMetrics.includes(m.name) && m.scope == "node",
),
disabled: checkMetricsDisabled(
selectedMetrics,
cluster,
h.subCluster,
),
}))
.sort((a, b) => a.host.localeCompare(b.host))
}
let filteredData = []
$: if (mappedData?.length > 0) {
filteredData = mappedData.filter((h) =>
h.host.includes(hostnameFilter)
)
}
</script> </script>
<!-- PlotGrid flattened into this component --> {#if $nodesQuery.error}
<Row cols={{ xs: 1, sm: 2, md: 3, lg: ccconfig.plot_view_plotsPerRow}}> <Row>
{#each data as item (item.host)} <Col>
<Col class="px-1"> <Card body color="danger">{$nodesQuery.error.message}</Card>
<h4 style="width: 100%; text-align: center;">
<a
style="display: block;padding-top: 15px;"
href="/monitoring/node/{cluster}/{item.host}"
>{item.host} ({item.subCluster})</a
>
</h4>
{#if item?.disabled[selectedMetric]}
<Card body class="mx-3" color="info"
>Metric disabled for subcluster <code
>{selectedMetric}:{item.subCluster}</code
></Card
>
{:else}
<!-- "No Data"-Warning included in MetricPlot-Component -->
<MetricPlot
timestep={item.data[0].metric.timestep}
series={item.data[0].metric.series}
metric={item.data[0].name}
{cluster}
subCluster={item.subCluster}
forNode
/>
{/if}
</Col> </Col>
{/each} </Row>
</Row> {:else if $nodesQuery.fetching }
<Row>
<Col>
<Spinner />
</Col>
</Row>
{:else if filteredData?.length > 0}
<!-- PlotGrid flattened into this component -->
<Row cols={{ xs: 1, sm: 2, md: 3, lg: ccconfig.plot_view_plotsPerRow}}>
{#each filteredData as item (item.host)}
<Col class="px-1">
<h4 style="width: 100%; text-align: center;">
<a
style="display: block;padding-top: 15px;"
href="/monitoring/node/{cluster}/{item.host}"
>{item.host} ({item.subCluster})</a
>
</h4>
{#if item?.disabled[selectedMetric]}
<Card body class="mx-3" color="info"
>Metric disabled for subcluster <code
>{selectedMetric}:{item.subCluster}</code
></Card
>
{:else}
<!-- "No Data"-Warning included in MetricPlot-Component -->
<MetricPlot
timestep={item.data[0].metric.timestep}
series={item.data[0].metric.series}
metric={item.data[0].name}
{cluster}
subCluster={item.subCluster}
forNode
/>
{/if}
</Col>
{/each}
</Row>
{/if}

View File

@ -9,6 +9,7 @@
<script> <script>
import { Card } from "@sveltestrap/sveltestrap"; import { Card } from "@sveltestrap/sveltestrap";
import { maxScope, checkMetricDisabled } from "../../generic/utils.js";
import MetricPlot from "../../generic/plots/MetricPlot.svelte"; import MetricPlot from "../../generic/plots/MetricPlot.svelte";
import NodeInfo from "./NodeInfo.svelte"; import NodeInfo from "./NodeInfo.svelte";
@ -16,28 +17,55 @@
export let nodeData; export let nodeData;
export let selectedMetrics; export let selectedMetrics;
const sortOrder = (nodeMetrics) => // Helper
selectedMetrics.map((name) => nodeMetrics.find((nodeMetric) => nodeMetric.name == name)); const selectScope = (nodeMetrics) =>
nodeMetrics.reduce(
(a, b) =>
maxScope([a.scope, b.scope]) == a.scope ? b : a,
nodeMetrics[0],
);
const sortAndSelectScope = (allNodeMetrics) =>
selectedMetrics
.map((selectedName) => allNodeMetrics.filter((nodeMetric) => nodeMetric.name == selectedName))
.map((matchedNodeMetrics) => ({
disabled: false,
data: matchedNodeMetrics.length > 0 ? selectScope(matchedNodeMetrics) : null,
}))
.map((scopedNodeMetric) => {
if (scopedNodeMetric?.data) {
return {
disabled: checkMetricDisabled(
scopedNodeMetric.data.name,
cluster,
nodeData.subCluster,
),
data: scopedNodeMetric.data,
};
} else {
return scopedNodeMetric;
}
});
</script> </script>
<tr> <tr>
<td> <td>
<NodeInfo {cluster} subCluster={nodeData.subCluster} hostname={nodeData.host} /> <NodeInfo {cluster} subCluster={nodeData.subCluster} hostname={nodeData.host} />
</td> </td>
{#each sortOrder(nodeData?.data) as metricData (metricData.name)} {#each sortAndSelectScope(nodeData?.metrics) as metricData (metricData.data.name)}
<td> <td>
{#if nodeData?.disabled[metricData.name]} {#if metricData?.disabled}
<Card body class="mx-3" color="info" <Card body class="mx-3" color="info"
>Metric disabled for subcluster <code >Metric disabled for subcluster <code
>{metricData.name}:{nodeData.subCluster}</code >{metricData.data.name}:{nodeData.subCluster}</code
></Card ></Card
> >
{:else} {:else}
<!-- "No Data"-Warning included in MetricPlot-Component --> <!-- "No Data"-Warning included in MetricPlot-Component -->
<MetricPlot <MetricPlot
timestep={metricData.metric.timestep} timestep={metricData.data.metric.timestep}
series={metricData.metric.series} series={metricData.data.metric.series}
metric={metricData.name} metric={metricData.data.name}
{cluster} {cluster}
subCluster={nodeData.subCluster} subCluster={nodeData.subCluster}
forNode forNode