mirror of
				https://github.com/ClusterCockpit/cc-backend
				synced 2025-11-04 01:25:06 +01:00 
			
		
		
		
	add scopes, paging and backend filtering to nodeList
This commit is contained in:
		@@ -194,6 +194,15 @@ type NodeMetrics {
 | 
			
		||||
  metrics:    [JobMetricWithName!]!
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type NodesResultList {
 | 
			
		||||
  items:  [NodeMetrics!]!
 | 
			
		||||
  offset: Int
 | 
			
		||||
  limit:  Int
 | 
			
		||||
  count:  Int
 | 
			
		||||
  totalNodes: Int
 | 
			
		||||
  hasNextPage: Boolean
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ClusterSupport {
 | 
			
		||||
  cluster: String!
 | 
			
		||||
  subClusters: [String!]!
 | 
			
		||||
@@ -241,6 +250,7 @@ type Query {
 | 
			
		||||
  rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
 | 
			
		||||
 | 
			
		||||
  nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]!
 | 
			
		||||
  nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList!
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Mutation {
 | 
			
		||||
 
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -148,6 +148,15 @@ type NodeMetrics struct {
 | 
			
		||||
	Metrics    []*JobMetricWithName `json:"metrics"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type NodesResultList struct {
 | 
			
		||||
	Items       []*NodeMetrics `json:"items"`
 | 
			
		||||
	Offset      *int           `json:"offset,omitempty"`
 | 
			
		||||
	Limit       *int           `json:"limit,omitempty"`
 | 
			
		||||
	Count       *int           `json:"count,omitempty"`
 | 
			
		||||
	TotalNodes  *int           `json:"totalNodes,omitempty"`
 | 
			
		||||
	HasNextPage *bool          `json:"hasNextPage,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type OrderByInput struct {
 | 
			
		||||
	Field string            `json:"field"`
 | 
			
		||||
	Type  string            `json:"type"`
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ package graph
 | 
			
		||||
 | 
			
		||||
// This file will be automatically regenerated based on the schema, any resolver implementations
 | 
			
		||||
// will be copied through when generating and any unknown code will be moved to the end.
 | 
			
		||||
// Code generated by github.com/99designs/gqlgen version v0.17.49
 | 
			
		||||
// Code generated by github.com/99designs/gqlgen version v0.17.57
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
@@ -466,6 +466,68 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
 | 
			
		||||
	return nodeMetrics, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NodeMetricsList is the resolver for the nodeMetricsList field.
 | 
			
		||||
func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) {
 | 
			
		||||
	if resolution == nil { // Load from Config
 | 
			
		||||
		if config.Keys.EnableResampling != nil {
 | 
			
		||||
			defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions)
 | 
			
		||||
			resolution = &defaultRes
 | 
			
		||||
		} else { // Set 0 (Loads configured metric timestep)
 | 
			
		||||
			defaultRes := 0
 | 
			
		||||
			resolution = &defaultRes
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	user := repository.GetUserFromContext(ctx)
 | 
			
		||||
	if user != nil && !user.HasRole(schema.RoleAdmin) {
 | 
			
		||||
		return nil, errors.New("you need to be an administrator for this query")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if metrics == nil {
 | 
			
		||||
		for _, mc := range archive.GetCluster(cluster).MetricConfig {
 | 
			
		||||
			metrics = append(metrics, mc.Name)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Warn("error while loading node data")
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeMetricsList := make([]*model.NodeMetrics, 0, len(data))
 | 
			
		||||
	for hostname, metrics := range data {
 | 
			
		||||
		host := &model.NodeMetrics{
 | 
			
		||||
			Host:    hostname,
 | 
			
		||||
			Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
 | 
			
		||||
		}
 | 
			
		||||
		host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			log.Warnf("error in nodeMetrics resolver: %s", err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for metric, scopedMetrics := range metrics {
 | 
			
		||||
			for scope, scopedMetric := range scopedMetrics {
 | 
			
		||||
				host.Metrics = append(host.Metrics, &model.JobMetricWithName{
 | 
			
		||||
					Name:   metric,
 | 
			
		||||
					Scope:  scope,
 | 
			
		||||
					Metric: scopedMetric,
 | 
			
		||||
				})
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		nodeMetricsList = append(nodeMetricsList, host)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeMetricsListResult := &model.NodesResultList{
 | 
			
		||||
		Items:       nodeMetricsList,
 | 
			
		||||
		TotalNodes:  &totalNodes,
 | 
			
		||||
		HasNextPage: &hasNextPage,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nodeMetricsListResult, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NumberOfNodes is the resolver for the numberOfNodes field.
 | 
			
		||||
func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) {
 | 
			
		||||
	nodeList, err := archive.ParseNodeList(obj.Nodes)
 | 
			
		||||
@@ -493,11 +555,9 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
 | 
			
		||||
// SubCluster returns generated.SubClusterResolver implementation.
 | 
			
		||||
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
 | 
			
		||||
 | 
			
		||||
type (
 | 
			
		||||
	clusterResolver     struct{ *Resolver }
 | 
			
		||||
	jobResolver         struct{ *Resolver }
 | 
			
		||||
	metricValueResolver struct{ *Resolver }
 | 
			
		||||
	mutationResolver    struct{ *Resolver }
 | 
			
		||||
	queryResolver       struct{ *Resolver }
 | 
			
		||||
	subClusterResolver  struct{ *Resolver }
 | 
			
		||||
)
 | 
			
		||||
type clusterResolver struct{ *Resolver }
 | 
			
		||||
type jobResolver struct{ *Resolver }
 | 
			
		||||
type metricValueResolver struct{ *Resolver }
 | 
			
		||||
type mutationResolver struct{ *Resolver }
 | 
			
		||||
type queryResolver struct{ *Resolver }
 | 
			
		||||
type subClusterResolver struct{ *Resolver }
 | 
			
		||||
 
 | 
			
		||||
@@ -10,6 +10,7 @@ import (
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/ClusterCockpit/cc-backend/internal/config"
 | 
			
		||||
	"github.com/ClusterCockpit/cc-backend/internal/graph/model"
 | 
			
		||||
	"github.com/ClusterCockpit/cc-backend/internal/metricdata"
 | 
			
		||||
	"github.com/ClusterCockpit/cc-backend/pkg/archive"
 | 
			
		||||
	"github.com/ClusterCockpit/cc-backend/pkg/log"
 | 
			
		||||
@@ -219,7 +220,7 @@ func LoadAverages(
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Used for the node/system view. Returns a map of nodes to a map of metrics.
 | 
			
		||||
// Used for the classic node/system view. Returns a map of nodes to a map of metrics.
 | 
			
		||||
func LoadNodeData(
 | 
			
		||||
	cluster string,
 | 
			
		||||
	metrics, nodes []string,
 | 
			
		||||
@@ -254,3 +255,40 @@ func LoadNodeData(
 | 
			
		||||
 | 
			
		||||
	return data, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func LoadNodeListData(
 | 
			
		||||
	cluster, subCluster, nodeFilter string,
 | 
			
		||||
	metrics []string,
 | 
			
		||||
	scopes []schema.MetricScope,
 | 
			
		||||
	resolution int,
 | 
			
		||||
	from, to time.Time,
 | 
			
		||||
	page *model.PageRequest,
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
 | 
			
		||||
	repo, err := metricdata.GetMetricDataRepo(cluster)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, 0, false, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if metrics == nil {
 | 
			
		||||
		for _, m := range archive.GetCluster(cluster).MetricConfig {
 | 
			
		||||
			metrics = append(metrics, m.Name)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if len(data) != 0 {
 | 
			
		||||
			log.Warnf("partial error: %s", err.Error())
 | 
			
		||||
		} else {
 | 
			
		||||
			log.Error("Error while loading node data from metric repository")
 | 
			
		||||
			return nil, totalNodes, hasNextPage, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if data == nil {
 | 
			
		||||
		return nil, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return data, totalNodes, hasNextPage, nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -11,6 +11,7 @@ import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"sort"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
@@ -44,7 +45,6 @@ type CCMetricStore struct {
 | 
			
		||||
type ApiQueryRequest struct {
 | 
			
		||||
	Cluster     string     `json:"cluster"`
 | 
			
		||||
	Queries     []ApiQuery `json:"queries"`
 | 
			
		||||
	NodeQuery   NodeQuery  `json:"node-query"`
 | 
			
		||||
	ForAllNodes []string   `json:"for-all-nodes"`
 | 
			
		||||
	From        int64      `json:"from"`
 | 
			
		||||
	To          int64      `json:"to"`
 | 
			
		||||
@@ -63,19 +63,6 @@ type ApiQuery struct {
 | 
			
		||||
	Aggregate  bool     `json:"aggreg"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type NodeQuery struct {
 | 
			
		||||
	Type         *string  `json:"type,omitempty"`
 | 
			
		||||
	SubType      *string  `json:"subtype,omitempty"`
 | 
			
		||||
	Metrics      []string `json:"metrics"`
 | 
			
		||||
	NodeFilter   string   `json:"node-filter"`
 | 
			
		||||
	Resolution   int      `json:"resolution"`
 | 
			
		||||
	TypeIds      []string `json:"type-ids,omitempty"`
 | 
			
		||||
	SubTypeIds   []string `json:"subtype-ids,omitempty"`
 | 
			
		||||
	Aggregate    bool     `json:"aggreg"`
 | 
			
		||||
	Page         int      `json:"page"`
 | 
			
		||||
	ItemsPerPage int      `json:"items-per-page"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ApiQueryResponse struct {
 | 
			
		||||
	Queries []ApiQuery        `json:"queries,omitempty"`
 | 
			
		||||
	Results [][]ApiMetricData `json:"results"`
 | 
			
		||||
@@ -712,9 +699,13 @@ func (ccms *CCMetricStore) LoadNodeListData(
 | 
			
		||||
	scopes []schema.MetricScope,
 | 
			
		||||
	resolution int,
 | 
			
		||||
	from, to time.Time,
 | 
			
		||||
	page model.PageRequest,
 | 
			
		||||
	page *model.PageRequest,
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) {
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
 | 
			
		||||
 | 
			
		||||
	// 0) Init additional vars
 | 
			
		||||
	var totalNodes int = 0
 | 
			
		||||
	var hasNextPage bool = false
 | 
			
		||||
 | 
			
		||||
	// 1) Get list of all nodes
 | 
			
		||||
	var nodes []string
 | 
			
		||||
@@ -728,8 +719,6 @@ func (ccms *CCMetricStore) LoadNodeListData(
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	log.Debugf(">> SEE HERE: NODES (All)! %v (Len: %d)", nodes, len(nodes))
 | 
			
		||||
 | 
			
		||||
	// 2) Filter nodes
 | 
			
		||||
	if nodeFilter != "" {
 | 
			
		||||
		filteredNodes := []string{}
 | 
			
		||||
@@ -741,7 +730,9 @@ func (ccms *CCMetricStore) LoadNodeListData(
 | 
			
		||||
		nodes = filteredNodes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	log.Debugf(">> SEE HERE: NODES (Filtered)! %v (Len: %d)", nodes, len(nodes))
 | 
			
		||||
	// 2.1) Count total nodes && Sort nodes -> Sorting invalidated after ccms return ...
 | 
			
		||||
	totalNodes = len(nodes)
 | 
			
		||||
	sort.Strings(nodes)
 | 
			
		||||
 | 
			
		||||
	// 3) Apply paging
 | 
			
		||||
	if len(nodes) > page.ItemsPerPage {
 | 
			
		||||
@@ -749,16 +740,17 @@ func (ccms *CCMetricStore) LoadNodeListData(
 | 
			
		||||
		end := start + page.ItemsPerPage
 | 
			
		||||
		if end > len(nodes) {
 | 
			
		||||
			end = len(nodes)
 | 
			
		||||
			hasNextPage = false
 | 
			
		||||
		} else {
 | 
			
		||||
			hasNextPage = true
 | 
			
		||||
		}
 | 
			
		||||
		nodes = nodes[start:end]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	log.Debugf(">> SEE HERE: NODES (Paged)! %v (Len: %d)", nodes, len(nodes))
 | 
			
		||||
 | 
			
		||||
	queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Warn("Error while building queries")
 | 
			
		||||
		return nil, err
 | 
			
		||||
		return nil, totalNodes, hasNextPage, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	req := ApiQueryRequest{
 | 
			
		||||
@@ -773,7 +765,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
 | 
			
		||||
	resBody, err := ccms.doRequest(ctx, &req)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Error(fmt.Sprintf("Error while performing request %#v\n", err))
 | 
			
		||||
		return nil, err
 | 
			
		||||
		return nil, totalNodes, hasNextPage, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var errors []string
 | 
			
		||||
@@ -795,14 +787,27 @@ func (ccms *CCMetricStore) LoadNodeListData(
 | 
			
		||||
			res = mc.Timestep
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		nodeMetric, ok := data[query.Hostname][metric][scope]
 | 
			
		||||
		// Init Nested Map Data Structures If Not Found
 | 
			
		||||
		hostData, ok := data[query.Hostname]
 | 
			
		||||
		if !ok {
 | 
			
		||||
			nodeMetric = &schema.JobMetric{
 | 
			
		||||
			hostData = make(map[string]map[schema.MetricScope]*schema.JobMetric)
 | 
			
		||||
			data[query.Hostname] = hostData
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		metricData, ok := hostData[metric]
 | 
			
		||||
		if !ok {
 | 
			
		||||
			metricData = make(map[schema.MetricScope]*schema.JobMetric)
 | 
			
		||||
			data[query.Hostname][metric] = metricData
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		scopeData, ok := metricData[scope]
 | 
			
		||||
		if !ok {
 | 
			
		||||
			scopeData = &schema.JobMetric{
 | 
			
		||||
				Unit:     mc.Unit,
 | 
			
		||||
				Timestep: res,
 | 
			
		||||
				Series:   make([]schema.Series, 0),
 | 
			
		||||
			}
 | 
			
		||||
			data[query.Hostname][metric][scope] = nodeMetric
 | 
			
		||||
			data[query.Hostname][metric][scope] = scopeData
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for ndx, res := range row {
 | 
			
		||||
@@ -825,7 +830,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
 | 
			
		||||
				res.Max = schema.Float(0)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			nodeMetric.Series = append(nodeMetric.Series, schema.Series{
 | 
			
		||||
			scopeData.Series = append(scopeData.Series, schema.Series{
 | 
			
		||||
				Hostname: query.Hostname,
 | 
			
		||||
				Id:       id,
 | 
			
		||||
				Statistics: schema.MetricStatistics{
 | 
			
		||||
@@ -840,12 +845,10 @@ func (ccms *CCMetricStore) LoadNodeListData(
 | 
			
		||||
 | 
			
		||||
	if len(errors) != 0 {
 | 
			
		||||
		/* Returns list of "partial errors" */
 | 
			
		||||
		return data, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
 | 
			
		||||
		return data, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	log.Debugf(">> SEE HERE: DATA (Final)! %v (Len: %d)", data, len(data))
 | 
			
		||||
 | 
			
		||||
	return data, nil
 | 
			
		||||
	return data, totalNodes, hasNextPage, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ccms *CCMetricStore) buildNodeQueries(
 | 
			
		||||
 
 | 
			
		||||
@@ -320,12 +320,14 @@ func (idb *InfluxDBv2DataRepository) LoadNodeListData(
 | 
			
		||||
	scopes []schema.MetricScope,
 | 
			
		||||
	resolution int,
 | 
			
		||||
	from, to time.Time,
 | 
			
		||||
	page model.PageRequest,
 | 
			
		||||
	page *model.PageRequest,
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) {
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
 | 
			
		||||
 | 
			
		||||
	var totalNodes int = 0
 | 
			
		||||
	var hasNextPage bool = false
 | 
			
		||||
	// TODO : Implement to be used in NodeList-View
 | 
			
		||||
	log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
 | 
			
		||||
 | 
			
		||||
	return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
 | 
			
		||||
	return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -31,7 +31,7 @@ type MetricDataRepository interface {
 | 
			
		||||
	LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error)
 | 
			
		||||
 | 
			
		||||
	// Return a map of hosts to a map of metrics to a map of scopes for multiple nodes.
 | 
			
		||||
	LoadNodeListData(cluster, subCluster, nodeFilter string, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, page model.PageRequest, ctx context.Context) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error)
 | 
			
		||||
	LoadNodeListData(cluster, subCluster, nodeFilter string, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, page *model.PageRequest, ctx context.Context) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{}
 | 
			
		||||
 
 | 
			
		||||
@@ -454,12 +454,14 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
 | 
			
		||||
	scopes []schema.MetricScope,
 | 
			
		||||
	resolution int,
 | 
			
		||||
	from, to time.Time,
 | 
			
		||||
	page model.PageRequest,
 | 
			
		||||
	page *model.PageRequest,
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) {
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
 | 
			
		||||
 | 
			
		||||
	var totalNodes int = 0
 | 
			
		||||
	var hasNextPage bool = false
 | 
			
		||||
	// TODO : Implement to be used in NodeList-View
 | 
			
		||||
	log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
 | 
			
		||||
 | 
			
		||||
	return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository")
 | 
			
		||||
	return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository")
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -57,9 +57,9 @@ func (tmdr *TestMetricDataRepository) LoadNodeListData(
 | 
			
		||||
	scopes []schema.MetricScope,
 | 
			
		||||
	resolution int,
 | 
			
		||||
	from, to time.Time,
 | 
			
		||||
	page model.PageRequest,
 | 
			
		||||
	page *model.PageRequest,
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, error) {
 | 
			
		||||
) (map[string]map[string]map[schema.MetricScope]*schema.JobMetric, int, bool, error) {
 | 
			
		||||
 | 
			
		||||
	panic("TODO")
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -44,6 +44,7 @@ var routes []Route = []Route{
 | 
			
		||||
	{"/monitoring/user/{id}", "monitoring/user.tmpl", "User <ID> - ClusterCockpit", true, setupUserRoute},
 | 
			
		||||
	{"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> Overview - ClusterCockpit", false, setupClusterOverviewRoute},
 | 
			
		||||
	{"/monitoring/systems/list/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> List - ClusterCockpit", false, setupClusterListRoute},
 | 
			
		||||
	{"/monitoring/systems/list/{cluster}/{subcluster}", "monitoring/systems.tmpl", "Cluster <ID> List - ClusterCockpit", false, setupClusterListRoute},
 | 
			
		||||
	{"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node <ID> - ClusterCockpit", false, setupNodeRoute},
 | 
			
		||||
	{"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analysis - ClusterCockpit", true, setupAnalysisRoute},
 | 
			
		||||
	{"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of <ID> - ClusterCockpit", false, setupClusterStatusRoute},
 | 
			
		||||
@@ -142,6 +143,7 @@ func setupClusterListRoute(i InfoType, r *http.Request) InfoType {
 | 
			
		||||
	vars := mux.Vars(r)
 | 
			
		||||
	i["id"] = vars["cluster"]
 | 
			
		||||
	i["cluster"] = vars["cluster"]
 | 
			
		||||
	i["subCluster"] = vars["subcluster"]
 | 
			
		||||
	i["displayType"] = "LIST"
 | 
			
		||||
 | 
			
		||||
	from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
 | 
			
		||||
 
 | 
			
		||||
@@ -10,7 +10,6 @@
 | 
			
		||||
 | 
			
		||||
<script>
 | 
			
		||||
  import { getContext } from "svelte";
 | 
			
		||||
  import { queryStore, gql, getContextClient } from "@urql/svelte"
 | 
			
		||||
  import {
 | 
			
		||||
    Row,
 | 
			
		||||
    Col,
 | 
			
		||||
@@ -20,10 +19,9 @@
 | 
			
		||||
    InputGroupText,
 | 
			
		||||
    Icon,
 | 
			
		||||
    Button,
 | 
			
		||||
    Spinner,
 | 
			
		||||
  } from "@sveltestrap/sveltestrap";
 | 
			
		||||
 | 
			
		||||
  import { init, checkMetricsDisabled } from "./generic/utils.js";
 | 
			
		||||
  import { init } from "./generic/utils.js";
 | 
			
		||||
  import NodeOverview from "./systems/NodeOverview.svelte";
 | 
			
		||||
  import NodeList from "./systems/NodeList.svelte";
 | 
			
		||||
  import MetricSelection from "./generic/select/MetricSelection.svelte";
 | 
			
		||||
@@ -32,6 +30,7 @@
 | 
			
		||||
 | 
			
		||||
  export let displayType;
 | 
			
		||||
  export let cluster;
 | 
			
		||||
  export let subCluster = "";
 | 
			
		||||
  export let from = null;
 | 
			
		||||
  export let to = null;
 | 
			
		||||
 | 
			
		||||
@@ -45,7 +44,7 @@
 | 
			
		||||
  if (from == null || to == null) {
 | 
			
		||||
    to = new Date(Date.now());
 | 
			
		||||
    from = new Date(to.getTime());
 | 
			
		||||
    from.setHours(from.getHours() - 2);
 | 
			
		||||
    from.setHours(from.getHours() - 12);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const initialized = getContext("initialized");
 | 
			
		||||
@@ -58,79 +57,15 @@
 | 
			
		||||
  let selectedMetrics = ccconfig[`node_list_selectedMetrics:${cluster}`] || [ccconfig.system_view_selectedMetric];
 | 
			
		||||
  let isMetricsSelectionOpen = false;
 | 
			
		||||
 | 
			
		||||
  // New Jan 2025
 | 
			
		||||
  /*
 | 
			
		||||
  - Toss "add_resolution_node_systems" branch OR include/merge here if resolutions in node-overview useful
 | 
			
		||||
  - Add single object field for nodeData query to CCMS query: "nodeDataQuery"
 | 
			
		||||
    - Contains following fields:
 | 
			
		||||
      - metrics: [String]  // List of metrics to query
 | 
			
		||||
      - page: Int          // Page number
 | 
			
		||||
      - itemsPerPage: Int  // Number of items per page
 | 
			
		||||
      - resolution: Int    // Requested Resolution for all returned data
 | 
			
		||||
      - nodeFilter: String // (partial) hostname string
 | 
			
		||||
    - With this, all use-cases except "scopes" can be handled, if nodeFilter is "" (empty) all nodes are returned by default
 | 
			
		||||
    - Is basically a stepped up version of the "forAllNodes" property, as "these metrics for all nodes" is still the base idea
 | 
			
		||||
  - Required: Handling in CCMS, co-develop in close contact with Aditya
 | 
			
		||||
  - Question: How and where to handle scope queries? (e.g. "node" vs "accelerator") -> NOT handled in ccms!
 | 
			
		||||
  - NOtes: "Sorting" as use-case ignored for now, probably default to alphanumerical on hostnames of cluster
 | 
			
		||||
    Note 1: Scope Selector or Auto-Scoped?
 | 
			
		||||
    Note 2: "Sorting" as use-case ignored for now, probably default to alphanumerical on hostnames of cluster
 | 
			
		||||
    Note 3: Add Idle State Filter (== No allocated Jobs) [Frontend?] : Cannot be handled by CCMS, requires secondary job query and refiltering of visible nodes
 | 
			
		||||
  */
 | 
			
		||||
 | 
			
		||||
  // Todo: Add Idle State Filter (== No allocated Jobs) [Frontend?] : Cannot be handled by CCMS, requires secondary job query and refiltering of visible nodes
 | 
			
		||||
  // Todo: NodeList: Mindestens Accelerator Scope ... "Show Detail" Switch?
 | 
			
		||||
  // Todo: Rework GQL Query: Add Paging (Scrollable / Paging Configbar), Add Nodes Filter (see jobs-onthefly-userfilter: ccms inkompatibel!), add scopes
 | 
			
		||||
  //       All three issues need either new features in ccms (paging, filter) or new implementation of ccms node queries with scopes (currently very job-specific)
 | 
			
		||||
  // Todo: Review performance // observed high client-side load frequency
 | 
			
		||||
  //       Is Svelte {#each} -> <MetricPlot/> -> onMount() related : Cannot be skipped ...
 | 
			
		||||
  //       Will be solved as soon as dedicated paging, itemLimits and filtering is implemented in ccms
 | 
			
		||||
  // ==> Skip for Q4/24 Release, build from ccms upgrade (paging/filter) up
 | 
			
		||||
  
 | 
			
		||||
  const client = getContextClient();
 | 
			
		||||
  const nodeQuery = gql`
 | 
			
		||||
    query ($cluster: String!, $metrics: [String!], $from: Time!, $to: Time!) {
 | 
			
		||||
      nodeMetrics(
 | 
			
		||||
        cluster: $cluster
 | 
			
		||||
        metrics: $metrics
 | 
			
		||||
        from: $from
 | 
			
		||||
        to: $to
 | 
			
		||||
      ) {
 | 
			
		||||
        host
 | 
			
		||||
        subCluster
 | 
			
		||||
        metrics {
 | 
			
		||||
          name
 | 
			
		||||
          scope
 | 
			
		||||
          metric {
 | 
			
		||||
            timestep
 | 
			
		||||
            unit {
 | 
			
		||||
              base
 | 
			
		||||
              prefix
 | 
			
		||||
            }
 | 
			
		||||
            series {
 | 
			
		||||
              statistics {
 | 
			
		||||
                min
 | 
			
		||||
                avg
 | 
			
		||||
                max
 | 
			
		||||
              }
 | 
			
		||||
              data
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  `
 | 
			
		||||
 | 
			
		||||
  $: nodesQuery = queryStore({
 | 
			
		||||
    client: client,
 | 
			
		||||
    query: nodeQuery,
 | 
			
		||||
    variables: {
 | 
			
		||||
      cluster: cluster,
 | 
			
		||||
      metrics: selectedMetrics,
 | 
			
		||||
      from: from.toISOString(),
 | 
			
		||||
      to: to.toISOString(),
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  let systemMetrics = [];
 | 
			
		||||
  let systemUnits = {};
 | 
			
		||||
 | 
			
		||||
  function loadMetrics(isInitialized) {
 | 
			
		||||
    if (!isInitialized) return
 | 
			
		||||
    systemMetrics = [...globalMetrics.filter((gm) => gm?.availability.find((av) => av.cluster == cluster))]
 | 
			
		||||
@@ -145,43 +80,6 @@
 | 
			
		||||
    selectedMetrics = [selectedMetric]
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  let rawData = []
 | 
			
		||||
  $: if ($initq.data && $nodesQuery?.data) {
 | 
			
		||||
    rawData = $nodesQuery?.data?.nodeMetrics.filter((h) => {
 | 
			
		||||
      if (h.subCluster === '') { // Exclude nodes with empty subCluster field
 | 
			
		||||
        console.warn('subCluster not configured for node', h.host)
 | 
			
		||||
        return false
 | 
			
		||||
      } else {
 | 
			
		||||
        return h.metrics.some(
 | 
			
		||||
          (m) => selectedMetrics.includes(m.name) && m.scope == "node",
 | 
			
		||||
        )
 | 
			
		||||
      }
 | 
			
		||||
    })
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  let mappedData = []
 | 
			
		||||
  $: if (rawData?.length > 0) {
 | 
			
		||||
    mappedData = rawData.map((h) => ({
 | 
			
		||||
      host: h.host,
 | 
			
		||||
      subCluster: h.subCluster,
 | 
			
		||||
      data: h.metrics.filter(
 | 
			
		||||
        (m) => selectedMetrics.includes(m.name) && m.scope == "node",
 | 
			
		||||
      ),
 | 
			
		||||
      disabled: checkMetricsDisabled(
 | 
			
		||||
        selectedMetrics,
 | 
			
		||||
        cluster,
 | 
			
		||||
        h.subCluster,
 | 
			
		||||
      ),
 | 
			
		||||
    }))
 | 
			
		||||
    .sort((a, b) => a.host.localeCompare(b.host))
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  let filteredData = []
 | 
			
		||||
  $: if (mappedData?.length > 0) {
 | 
			
		||||
    filteredData = mappedData.filter((h) =>
 | 
			
		||||
      h.host.includes(hostnameFilter)
 | 
			
		||||
    )
 | 
			
		||||
  }
 | 
			
		||||
</script>
 | 
			
		||||
 | 
			
		||||
<!-- ROW1: Tools-->
 | 
			
		||||
@@ -255,25 +153,13 @@
 | 
			
		||||
      <Card body color="danger">Unknown displayList type! </Card>
 | 
			
		||||
    </Col>
 | 
			
		||||
  </Row>
 | 
			
		||||
{:else if $nodesQuery.error}
 | 
			
		||||
  <Row>
 | 
			
		||||
    <Col>
 | 
			
		||||
      <Card body color="danger">{$nodesQuery.error.message}</Card>
 | 
			
		||||
    </Col>
 | 
			
		||||
  </Row>
 | 
			
		||||
{:else if $nodesQuery.fetching }
 | 
			
		||||
  <Row>
 | 
			
		||||
    <Col>
 | 
			
		||||
      <Spinner />
 | 
			
		||||
    </Col>
 | 
			
		||||
  </Row>
 | 
			
		||||
{:else if filteredData?.length > 0}
 | 
			
		||||
{:else}
 | 
			
		||||
  {#if displayNodeOverview}
 | 
			
		||||
    <!-- ROW2-1: Node Overview (Grid Included)-->
 | 
			
		||||
    <NodeOverview {cluster} {ccconfig} data={filteredData}/>
 | 
			
		||||
    <NodeOverview {cluster} {subCluster} {ccconfig} {selectedMetrics} {from} {to} {hostnameFilter}/>
 | 
			
		||||
  {:else}
 | 
			
		||||
    <!-- ROW2-2: Node List (Grid Included)-->
 | 
			
		||||
    <NodeList {cluster} {selectedMetrics} {systemUnits} data={filteredData} bind:selectedMetric/>
 | 
			
		||||
    <NodeList {cluster} {subCluster} {ccconfig} {selectedMetrics} {hostnameFilter} {from} {to} {systemUnits}/>
 | 
			
		||||
  {/if}
 | 
			
		||||
{/if}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -553,7 +553,7 @@
 | 
			
		||||
</script>
 | 
			
		||||
 | 
			
		||||
<!-- Define $width Wrapper and NoData Card -->
 | 
			
		||||
{#if series[0].data.length > 0}
 | 
			
		||||
{#if series[0]?.data && series[0].data.length > 0}
 | 
			
		||||
  <div bind:this={plotWrapper} bind:clientWidth={width}
 | 
			
		||||
        style="background-color: {backgroundColor()};" class={forNode ? 'py-2 rounded' : 'rounded'}
 | 
			
		||||
  />
 | 
			
		||||
 
 | 
			
		||||
@@ -6,6 +6,7 @@ new Systems({
 | 
			
		||||
    props: {
 | 
			
		||||
        displayType: displayType,
 | 
			
		||||
        cluster: infos.cluster,
 | 
			
		||||
        subCluster: infos.subCluster,
 | 
			
		||||
        from: infos.from,
 | 
			
		||||
        to: infos.to
 | 
			
		||||
    },
 | 
			
		||||
 
 | 
			
		||||
@@ -3,33 +3,114 @@
 | 
			
		||||
 | 
			
		||||
    Properties:
 | 
			
		||||
    - `cluster String`: The nodes' cluster
 | 
			
		||||
    - `data [Object]`: The node data array for all nodes
 | 
			
		||||
    - `subCluster String`: The nodes' subCluster
 | 
			
		||||
    - `ccconfig Object?`: The ClusterCockpit Config Context [Default: null]
 | 
			
		||||
    - `selectedMetrics [String]`: The array of selected metrics
 | 
			
		||||
    - `selectedMetrics Object`: The object of metric units
 | 
			
		||||
    - `systemUnits Object`: The object of metric units
 | 
			
		||||
 -->
 | 
			
		||||
 | 
			
		||||
<script>
 | 
			
		||||
  import { Row, Table } from "@sveltestrap/sveltestrap";
 | 
			
		||||
  import {
 | 
			
		||||
    stickyHeader 
 | 
			
		||||
  } from "../generic/utils.js";
 | 
			
		||||
  import { queryStore, gql, getContextClient } from "@urql/svelte";
 | 
			
		||||
  import { Row, Col, Card, Table, Spinner } from "@sveltestrap/sveltestrap";
 | 
			
		||||
  import { init, stickyHeader } from "../generic/utils.js";
 | 
			
		||||
  import NodeListRow from "./nodelist/NodeListRow.svelte";
 | 
			
		||||
  import Pagination from "../generic/joblist/Pagination.svelte";
 | 
			
		||||
 | 
			
		||||
  export let cluster;
 | 
			
		||||
  export let data = null;
 | 
			
		||||
  export let subCluster = "";
 | 
			
		||||
  export const ccconfig = null;
 | 
			
		||||
  export let selectedMetrics = [];
 | 
			
		||||
  export let hostnameFilter = "";
 | 
			
		||||
  export let systemUnits = null;
 | 
			
		||||
  export let from = null;
 | 
			
		||||
  export let to = null;
 | 
			
		||||
 | 
			
		||||
  // Always use ONE BIG list, but: Make copyable markers -> Nodeinfo ! (like in markdown)
 | 
			
		||||
  // let usePaging = ccconfig.node_list_usePaging
 | 
			
		||||
  let itemsPerPage = 10 // usePaging ? ccconfig.node_list_jobsPerPage : 10;
 | 
			
		||||
  let page = 1;
 | 
			
		||||
  let paging = { itemsPerPage, page };
 | 
			
		||||
 | 
			
		||||
  let headerPaddingTop = 0;
 | 
			
		||||
  stickyHeader(
 | 
			
		||||
    ".cc-table-wrapper > table.table >thead > tr > th.position-sticky:nth-child(1)",
 | 
			
		||||
    (x) => (headerPaddingTop = x),
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  const { query: initq } = init();
 | 
			
		||||
  const client = getContextClient();
 | 
			
		||||
  const nodeListQuery = gql`
 | 
			
		||||
    query ($cluster: String!, $subCluster: String!, $nodeFilter: String!, $metrics: [String!], $scopes: [MetricScope!]!, $from: Time!, $to: Time!, $paging: PageRequest!) {
 | 
			
		||||
      nodeMetricsList(
 | 
			
		||||
        cluster: $cluster
 | 
			
		||||
        subCluster: $subCluster
 | 
			
		||||
        nodeFilter: $nodeFilter
 | 
			
		||||
        scopes: $scopes
 | 
			
		||||
        metrics: $metrics
 | 
			
		||||
        from: $from
 | 
			
		||||
        to: $to
 | 
			
		||||
        page: $paging
 | 
			
		||||
      ) {
 | 
			
		||||
        items {
 | 
			
		||||
          host
 | 
			
		||||
          subCluster
 | 
			
		||||
          metrics {
 | 
			
		||||
            name
 | 
			
		||||
            scope
 | 
			
		||||
            metric {
 | 
			
		||||
              timestep
 | 
			
		||||
              unit {
 | 
			
		||||
                base
 | 
			
		||||
                prefix
 | 
			
		||||
              }
 | 
			
		||||
              series {
 | 
			
		||||
                statistics {
 | 
			
		||||
                  min
 | 
			
		||||
                  avg
 | 
			
		||||
                  max
 | 
			
		||||
                }
 | 
			
		||||
                data
 | 
			
		||||
              }
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
        totalNodes
 | 
			
		||||
        hasNextPage
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  `
 | 
			
		||||
 | 
			
		||||
  $: nodesQuery = queryStore({
 | 
			
		||||
    client: client,
 | 
			
		||||
    query: nodeListQuery,
 | 
			
		||||
    variables: {
 | 
			
		||||
      cluster: cluster,
 | 
			
		||||
      subCluster: subCluster,
 | 
			
		||||
      nodeFilter: hostnameFilter,
 | 
			
		||||
      scopes: ["core", "accelerator"],
 | 
			
		||||
      metrics: selectedMetrics,
 | 
			
		||||
      from: from.toISOString(),
 | 
			
		||||
      to: to.toISOString(),
 | 
			
		||||
      paging: paging,
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  $: matchedNodes = $nodesQuery.data?.nodeMetricsList.totalNodes || 0;
 | 
			
		||||
</script>
 | 
			
		||||
 | 
			
		||||
<Row>
 | 
			
		||||
{#if $nodesQuery.error}
 | 
			
		||||
  <Row>
 | 
			
		||||
    <Col>
 | 
			
		||||
      <Card body color="danger">{$nodesQuery.error.message}</Card>
 | 
			
		||||
    </Col>
 | 
			
		||||
  </Row>
 | 
			
		||||
{:else if $nodesQuery.fetching }
 | 
			
		||||
  <Row>
 | 
			
		||||
    <Col>
 | 
			
		||||
      <Spinner />
 | 
			
		||||
    </Col>
 | 
			
		||||
  </Row>
 | 
			
		||||
{:else if $initq?.data && $nodesQuery?.data}
 | 
			
		||||
  <Row>
 | 
			
		||||
    <div class="col cc-table-wrapper">
 | 
			
		||||
      <Table cellspacing="0px" cellpadding="0px">
 | 
			
		||||
        <thead>
 | 
			
		||||
@@ -54,7 +135,7 @@
 | 
			
		||||
          </tr>
 | 
			
		||||
        </thead>
 | 
			
		||||
        <tbody>
 | 
			
		||||
        {#each data as nodeData (nodeData.host)}
 | 
			
		||||
          {#each $nodesQuery.data.nodeMetricsList.items as nodeData (nodeData.host)}
 | 
			
		||||
            <NodeListRow {nodeData} {cluster} {selectedMetrics}/>
 | 
			
		||||
          {:else}
 | 
			
		||||
            <tr>
 | 
			
		||||
@@ -64,7 +145,26 @@
 | 
			
		||||
        </tbody>
 | 
			
		||||
      </Table>
 | 
			
		||||
    </div>
 | 
			
		||||
</Row>
 | 
			
		||||
  </Row>
 | 
			
		||||
{/if}
 | 
			
		||||
 | 
			
		||||
{#if true} <!-- usePaging -->
 | 
			
		||||
  <Pagination
 | 
			
		||||
    bind:page
 | 
			
		||||
    {itemsPerPage}
 | 
			
		||||
    itemText="Nodes"
 | 
			
		||||
    totalItems={matchedNodes}
 | 
			
		||||
    on:update-paging={({ detail }) => {
 | 
			
		||||
      paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }
 | 
			
		||||
      // if (detail.itemsPerPage != itemsPerPage) {
 | 
			
		||||
      //   updateConfiguration(detail.itemsPerPage.toString(), detail.page);
 | 
			
		||||
      // } else {
 | 
			
		||||
      //   // nodes = []
 | 
			
		||||
      //   paging = { itemsPerPage: detail.itemsPerPage, page: detail.page };
 | 
			
		||||
      // }
 | 
			
		||||
    }}
 | 
			
		||||
  />
 | 
			
		||||
{/if}
 | 
			
		||||
 | 
			
		||||
<style>
 | 
			
		||||
  .cc-table-wrapper {
 | 
			
		||||
 
 | 
			
		||||
@@ -3,25 +3,127 @@
 | 
			
		||||
 | 
			
		||||
    Properties:
 | 
			
		||||
    - `ccconfig Object?`: The ClusterCockpit Config Context [Default: null]
 | 
			
		||||
    - `data Object?`: The GQL nodeMetrics data [Default: null]
 | 
			
		||||
    - `cluster String`: The cluster to show status information for
 | 
			
		||||
    - `selectedMetric String?`: The selectedMetric input [Default: ""]
 | 
			
		||||
 -->
 | 
			
		||||
 | 
			
		||||
 <script>
 | 
			
		||||
  import { Row, Col, Card } from "@sveltestrap/sveltestrap";
 | 
			
		||||
  import { queryStore, gql, getContextClient } from "@urql/svelte";
 | 
			
		||||
  import { Row, Col, Card, Spinner } from "@sveltestrap/sveltestrap";
 | 
			
		||||
  import { init, checkMetricsDisabled } from "../generic/utils.js";
 | 
			
		||||
  import MetricPlot from "../generic/plots/MetricPlot.svelte";
 | 
			
		||||
 | 
			
		||||
  export let ccconfig = null;
 | 
			
		||||
  export let data = null;
 | 
			
		||||
  export let cluster = "";
 | 
			
		||||
  export let selectedMetric = "";
 | 
			
		||||
  export const subCluster = "";
 | 
			
		||||
  export let selectedMetrics = null;
 | 
			
		||||
  export let hostnameFilter = "";
 | 
			
		||||
  export let from = null;
 | 
			
		||||
  export let to = null;
 | 
			
		||||
 | 
			
		||||
  const { query: initq } = init();
 | 
			
		||||
  const client = getContextClient();
 | 
			
		||||
  const nodeQuery = gql`
 | 
			
		||||
    query ($cluster: String!, $metrics: [String!], $from: Time!, $to: Time!) {
 | 
			
		||||
      nodeMetrics(
 | 
			
		||||
        cluster: $cluster
 | 
			
		||||
        metrics: $metrics
 | 
			
		||||
        from: $from
 | 
			
		||||
        to: $to
 | 
			
		||||
      ) {
 | 
			
		||||
        host
 | 
			
		||||
        subCluster
 | 
			
		||||
        metrics {
 | 
			
		||||
          name
 | 
			
		||||
          scope
 | 
			
		||||
          metric {
 | 
			
		||||
            timestep
 | 
			
		||||
            unit {
 | 
			
		||||
              base
 | 
			
		||||
              prefix
 | 
			
		||||
            }
 | 
			
		||||
            series {
 | 
			
		||||
              statistics {
 | 
			
		||||
                min
 | 
			
		||||
                avg
 | 
			
		||||
                max
 | 
			
		||||
              }
 | 
			
		||||
              data
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  `
 | 
			
		||||
 | 
			
		||||
  $: selectedMetric = selectedMetrics[0] ? selectedMetrics[0] : "";
 | 
			
		||||
 | 
			
		||||
  $: nodesQuery = queryStore({
 | 
			
		||||
    client: client,
 | 
			
		||||
    query: nodeQuery,
 | 
			
		||||
    variables: {
 | 
			
		||||
      cluster: cluster,
 | 
			
		||||
      metrics: selectedMetrics,
 | 
			
		||||
      from: from.toISOString(),
 | 
			
		||||
      to: to.toISOString(),
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  let rawData = []
 | 
			
		||||
  $: if ($initq.data && $nodesQuery?.data) {
 | 
			
		||||
    rawData = $nodesQuery?.data?.nodeMetrics.filter((h) => {
 | 
			
		||||
      if (h.subCluster === '') { // Exclude nodes with empty subCluster field
 | 
			
		||||
        console.warn('subCluster not configured for node', h.host)
 | 
			
		||||
        return false
 | 
			
		||||
      } else {
 | 
			
		||||
        return h.metrics.some(
 | 
			
		||||
          (m) => selectedMetrics.includes(m.name) && m.scope == "node",
 | 
			
		||||
        )
 | 
			
		||||
      }
 | 
			
		||||
    })
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  let mappedData = []
 | 
			
		||||
  $: if (rawData?.length > 0) {
 | 
			
		||||
    mappedData = rawData.map((h) => ({
 | 
			
		||||
      host: h.host,
 | 
			
		||||
      subCluster: h.subCluster,
 | 
			
		||||
      data: h.metrics.filter(
 | 
			
		||||
        (m) => selectedMetrics.includes(m.name) && m.scope == "node",
 | 
			
		||||
      ),
 | 
			
		||||
      disabled: checkMetricsDisabled(
 | 
			
		||||
        selectedMetrics,
 | 
			
		||||
        cluster,
 | 
			
		||||
        h.subCluster,
 | 
			
		||||
      ),
 | 
			
		||||
    }))
 | 
			
		||||
    .sort((a, b) => a.host.localeCompare(b.host))
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  let filteredData = []
 | 
			
		||||
  $: if (mappedData?.length > 0) {
 | 
			
		||||
    filteredData = mappedData.filter((h) =>
 | 
			
		||||
      h.host.includes(hostnameFilter)
 | 
			
		||||
    )
 | 
			
		||||
  }
 | 
			
		||||
</script>
 | 
			
		||||
 | 
			
		||||
<!-- PlotGrid flattened into this component -->
 | 
			
		||||
<Row cols={{ xs: 1, sm: 2, md: 3, lg: ccconfig.plot_view_plotsPerRow}}>
 | 
			
		||||
  {#each data as item (item.host)}
 | 
			
		||||
{#if $nodesQuery.error}
 | 
			
		||||
  <Row>
 | 
			
		||||
    <Col>
 | 
			
		||||
      <Card body color="danger">{$nodesQuery.error.message}</Card>
 | 
			
		||||
    </Col>
 | 
			
		||||
  </Row>
 | 
			
		||||
{:else if $nodesQuery.fetching }
 | 
			
		||||
  <Row>
 | 
			
		||||
    <Col>
 | 
			
		||||
      <Spinner />
 | 
			
		||||
    </Col>
 | 
			
		||||
  </Row>
 | 
			
		||||
{:else if filteredData?.length > 0}
 | 
			
		||||
  <!-- PlotGrid flattened into this component -->
 | 
			
		||||
  <Row cols={{ xs: 1, sm: 2, md: 3, lg: ccconfig.plot_view_plotsPerRow}}>
 | 
			
		||||
    {#each filteredData as item (item.host)}
 | 
			
		||||
      <Col class="px-1">
 | 
			
		||||
        <h4 style="width: 100%; text-align: center;">
 | 
			
		||||
          <a
 | 
			
		||||
@@ -49,4 +151,5 @@
 | 
			
		||||
        {/if}
 | 
			
		||||
      </Col>
 | 
			
		||||
    {/each}
 | 
			
		||||
</Row>
 | 
			
		||||
  </Row>
 | 
			
		||||
{/if}
 | 
			
		||||
@@ -9,6 +9,7 @@
 | 
			
		||||
 | 
			
		||||
<script>
 | 
			
		||||
  import { Card } from "@sveltestrap/sveltestrap";
 | 
			
		||||
  import { maxScope, checkMetricDisabled } from "../../generic/utils.js";
 | 
			
		||||
  import MetricPlot from "../../generic/plots/MetricPlot.svelte";
 | 
			
		||||
  import NodeInfo from "./NodeInfo.svelte";
 | 
			
		||||
 | 
			
		||||
@@ -16,28 +17,55 @@
 | 
			
		||||
  export let nodeData;
 | 
			
		||||
  export let selectedMetrics;
 | 
			
		||||
 | 
			
		||||
  const sortOrder = (nodeMetrics) =>
 | 
			
		||||
    selectedMetrics.map((name) => nodeMetrics.find((nodeMetric) => nodeMetric.name == name));
 | 
			
		||||
  // Helper
 | 
			
		||||
  const selectScope = (nodeMetrics) =>
 | 
			
		||||
    nodeMetrics.reduce(
 | 
			
		||||
      (a, b) =>
 | 
			
		||||
        maxScope([a.scope, b.scope]) == a.scope ? b : a,
 | 
			
		||||
      nodeMetrics[0],
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
  const sortAndSelectScope = (allNodeMetrics) =>
 | 
			
		||||
    selectedMetrics
 | 
			
		||||
      .map((selectedName) => allNodeMetrics.filter((nodeMetric) => nodeMetric.name == selectedName))
 | 
			
		||||
      .map((matchedNodeMetrics) => ({
 | 
			
		||||
        disabled: false,
 | 
			
		||||
        data: matchedNodeMetrics.length > 0 ? selectScope(matchedNodeMetrics) : null,
 | 
			
		||||
      }))
 | 
			
		||||
      .map((scopedNodeMetric) => {
 | 
			
		||||
        if (scopedNodeMetric?.data) {
 | 
			
		||||
          return {
 | 
			
		||||
            disabled: checkMetricDisabled(
 | 
			
		||||
              scopedNodeMetric.data.name,
 | 
			
		||||
              cluster,
 | 
			
		||||
              nodeData.subCluster,
 | 
			
		||||
            ),
 | 
			
		||||
            data: scopedNodeMetric.data,
 | 
			
		||||
          };
 | 
			
		||||
        } else {
 | 
			
		||||
          return scopedNodeMetric;
 | 
			
		||||
        }
 | 
			
		||||
      });
 | 
			
		||||
</script>
 | 
			
		||||
 | 
			
		||||
<tr>
 | 
			
		||||
  <td>
 | 
			
		||||
    <NodeInfo {cluster} subCluster={nodeData.subCluster} hostname={nodeData.host} />
 | 
			
		||||
  </td>
 | 
			
		||||
  {#each sortOrder(nodeData?.data) as metricData (metricData.name)}
 | 
			
		||||
  {#each sortAndSelectScope(nodeData?.metrics) as metricData (metricData.data.name)}
 | 
			
		||||
    <td>
 | 
			
		||||
      {#if nodeData?.disabled[metricData.name]}
 | 
			
		||||
      {#if metricData?.disabled}
 | 
			
		||||
        <Card body class="mx-3" color="info"
 | 
			
		||||
          >Metric disabled for subcluster <code
 | 
			
		||||
            >{metricData.name}:{nodeData.subCluster}</code
 | 
			
		||||
            >{metricData.data.name}:{nodeData.subCluster}</code
 | 
			
		||||
          ></Card
 | 
			
		||||
        >
 | 
			
		||||
      {:else}
 | 
			
		||||
        <!-- "No Data"-Warning included in MetricPlot-Component -->
 | 
			
		||||
        <MetricPlot
 | 
			
		||||
          timestep={metricData.metric.timestep}
 | 
			
		||||
          series={metricData.metric.series}
 | 
			
		||||
          metric={metricData.name}
 | 
			
		||||
          timestep={metricData.data.metric.timestep}
 | 
			
		||||
          series={metricData.data.metric.series}
 | 
			
		||||
          metric={metricData.data.name}
 | 
			
		||||
          {cluster}
 | 
			
		||||
          subCluster={nodeData.subCluster}
 | 
			
		||||
          forNode
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user