Merge branch 'dev' of github.com:ClusterCockpit/cc-backend into dev

This commit is contained in:
2025-10-16 13:01:35 +02:00
7 changed files with 271 additions and 143 deletions

View File

@@ -820,22 +820,12 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
// SubCluster returns generated.SubClusterResolver implementation. // SubCluster returns generated.SubClusterResolver implementation.
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
type clusterResolver struct{ *Resolver } type (
type jobResolver struct{ *Resolver } clusterResolver struct{ *Resolver }
type metricValueResolver struct{ *Resolver } jobResolver struct{ *Resolver }
type mutationResolver struct{ *Resolver } metricValueResolver struct{ *Resolver }
type nodeResolver struct{ *Resolver } mutationResolver struct{ *Resolver }
type queryResolver struct{ *Resolver } nodeResolver struct{ *Resolver }
type subClusterResolver struct{ *Resolver } queryResolver struct{ *Resolver }
subClusterResolver struct{ *Resolver }
// !!! WARNING !!! )
// The code below was going to be deleted when updating resolvers. It has been copied here so you have
// one last chance to move it out of harms way if you want. There are two reasons this happens:
// - When renaming or deleting a resolver the old code will be put in here. You can safely delete
// it when you're done.
// - You have helper methods in this file. Move them out to keep these resolver files clean.
/*
func (r *nodeResolver) NodeState(ctx context.Context, obj *model.Node) (string, error) {
panic(fmt.Errorf("not implemented: NodeState - nodeState"))
}
*/

View File

@@ -27,10 +27,105 @@ CREATE TABLE "node_state" (
FOREIGN KEY (node_id) REFERENCES node (id) FOREIGN KEY (node_id) REFERENCES node (id)
); );
-- Add Indices For New Node Table VARCHAR Fields -- DROP indices using old column name "cluster"
DROP INDEX IF EXISTS jobs_cluster;
DROP INDEX IF EXISTS jobs_cluster_user;
DROP INDEX IF EXISTS jobs_cluster_project;
DROP INDEX IF EXISTS jobs_cluster_subcluster;
DROP INDEX IF EXISTS jobs_cluster_starttime;
DROP INDEX IF EXISTS jobs_cluster_duration;
DROP INDEX IF EXISTS jobs_cluster_numnodes;
DROP INDEX IF EXISTS jobs_cluster_numhwthreads;
DROP INDEX IF EXISTS jobs_cluster_numacc;
DROP INDEX IF EXISTS jobs_cluster_energy;
DROP INDEX IF EXISTS jobs_cluster_partition;
DROP INDEX IF EXISTS jobs_cluster_partition_starttime;
DROP INDEX IF EXISTS jobs_cluster_partition_duration;
DROP INDEX IF EXISTS jobs_cluster_partition_numnodes;
DROP INDEX IF EXISTS jobs_cluster_partition_numhwthreads;
DROP INDEX IF EXISTS jobs_cluster_partition_numacc;
DROP INDEX IF EXISTS jobs_cluster_partition_energy;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_user;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_project;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_starttime;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_duration;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numnodes;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numhwthreads;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numacc;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_energy;
DROP INDEX IF EXISTS jobs_cluster_jobstate;
DROP INDEX IF EXISTS jobs_cluster_jobstate_user;
DROP INDEX IF EXISTS jobs_cluster_jobstate_project;
DROP INDEX IF EXISTS jobs_cluster_jobstate_starttime;
DROP INDEX IF EXISTS jobs_cluster_jobstate_duration;
DROP INDEX IF EXISTS jobs_cluster_jobstate_numnodes;
DROP INDEX IF EXISTS jobs_cluster_jobstate_numhwthreads;
DROP INDEX IF EXISTS jobs_cluster_jobstate_numacc;
DROP INDEX IF EXISTS jobs_cluster_jobstate_energy;
-- -- CREATE UPDATED indices with new column names
-- Cluster Filter
CREATE INDEX IF NOT EXISTS jobs_cluster ON job (hpc_cluster);
CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (hpc_cluster, hpc_user);
CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (hpc_cluster, project);
CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (hpc_cluster, subcluster);
-- Cluster Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (hpc_cluster, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_duration ON job (hpc_cluster, duration);
CREATE INDEX IF NOT EXISTS jobs_cluster_numnodes ON job (hpc_cluster, num_nodes);
CREATE INDEX IF NOT EXISTS jobs_cluster_numhwthreads ON job (hpc_cluster, num_hwthreads);
CREATE INDEX IF NOT EXISTS jobs_cluster_numacc ON job (hpc_cluster, num_acc);
CREATE INDEX IF NOT EXISTS jobs_cluster_energy ON job (hpc_cluster, energy);
-- Cluster+Partition Filter
CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (hpc_cluster, cluster_partition);
-- Cluster+Partition Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (hpc_cluster, cluster_partition, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (hpc_cluster, cluster_partition, duration);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (hpc_cluster, cluster_partition, num_nodes);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numhwthreads ON job (hpc_cluster, cluster_partition, num_hwthreads);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numacc ON job (hpc_cluster, cluster_partition, num_acc);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_energy ON job (hpc_cluster, cluster_partition, energy);
-- Cluster+Partition+Jobstate Filter
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (hpc_cluster, cluster_partition, job_state);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (hpc_cluster, cluster_partition, job_state, hpc_user);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (hpc_cluster, cluster_partition, job_state, project);
-- Cluster+Partition+Jobstate Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (hpc_cluster, cluster_partition, job_state, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (hpc_cluster, cluster_partition, job_state, duration);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (hpc_cluster, cluster_partition, job_state, num_nodes);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numhwthreads ON job (hpc_cluster, cluster_partition, job_state, num_hwthreads);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numacc ON job (hpc_cluster, cluster_partition, job_state, num_acc);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_energy ON job (hpc_cluster, cluster_partition, job_state, energy);
-- Cluster+JobState Filter
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (hpc_cluster, job_state);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (hpc_cluster, job_state, hpc_user);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (hpc_cluster, job_state, project);
-- Cluster+JobState Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (hpc_cluster, job_state, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_duration ON job (hpc_cluster, job_state, duration);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numnodes ON job (hpc_cluster, job_state, num_nodes);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numhwthreads ON job (hpc_cluster, job_state, num_hwthreads);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numacc ON job (hpc_cluster, job_state, num_acc);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_energy ON job (hpc_cluster, job_state, energy);
--- --- END UPDATE existing indices
-- Add NEW Indices For New Job Table Columns
CREATE INDEX IF NOT EXISTS jobs_cluster_submittime ON job (hpc_cluster, submit_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_submittime ON job (hpc_cluster, cluster_partition, submit_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_submittime ON job (hpc_cluster, cluster_partition, job_state, submit_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_submittime ON job (hpc_cluster, job_state, submit_time);
-- Add NEW Indices For New Node Table VARCHAR Fields
CREATE INDEX IF NOT EXISTS nodes_cluster ON node (cluster); CREATE INDEX IF NOT EXISTS nodes_cluster ON node (cluster);
CREATE INDEX IF NOT EXISTS nodes_cluster_subcluster ON node (cluster, subcluster); CREATE INDEX IF NOT EXISTS nodes_cluster_subcluster ON node (cluster, subcluster);
-- Add Indices For Increased Amounts of Tags -- Add NEW Indices For New Node_State Table Fields
CREATE INDEX IF NOT EXISTS nodeStates_state ON node_state (node_state);
CREATE INDEX IF NOT EXISTS nodeStates_health ON node_state (health_state);
CREATE INDEX IF NOT EXISTS nodeStates_nodeid_state ON node (node_id, node_state);
CREATE INDEX IF NOT EXISTS nodeStates_nodeid_health ON node (node_id, health_state);
-- Add NEW Indices For Increased Amounts of Tags
CREATE INDEX IF NOT EXISTS tags_jobid ON jobtag (job_id); CREATE INDEX IF NOT EXISTS tags_jobid ON jobtag (job_id);
CREATE INDEX IF NOT EXISTS tags_tagid ON jobtag (tag_id); CREATE INDEX IF NOT EXISTS tags_tagid ON jobtag (tag_id);

View File

@@ -250,6 +250,10 @@ func (r *NodeRepository) QueryNodes(
return nil, qerr return nil, qerr
} }
// Get latest Info aka closest Timestamp to $now
now := time.Now().Unix()
query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct()
for _, f := range filters { for _, f := range filters {
if f.Hostname != nil { if f.Hostname != nil {
query = buildStringCondition("node.hostname", f.Hostname, query) query = buildStringCondition("node.hostname", f.Hostname, query)
@@ -291,104 +295,140 @@ func (r *NodeRepository) QueryNodes(
return nodes, nil return nodes, nil
} }
// func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
// func (r *NodeRepository) CountNodeStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { // Get latest Info aka closest Timestamo to $now
// query, qerr := AccessCheck(ctx, sq.Select("node_state AS state", "count(*) AS count").From("node")) now := time.Now().Unix()
// if qerr != nil { q := sq.Select("hostname", "cluster", "subcluster", "node_state", "health_state").
// return nil, qerr From("node").
// } Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}).
// Where("node.cluster = ?", cluster).OrderBy("node.hostname ASC")
// for _, f := range filters {
// if f.Hostname != nil { rows, err := q.RunWith(r.DB).Query()
// query = buildStringCondition("node.hostname", f.Hostname, query) if err != nil {
// } cclog.Warn("Error while querying node list")
// if f.Cluster != nil { return nil, err
// query = buildStringCondition("node.cluster", f.Cluster, query) }
// } nodeList := make([]*schema.Node, 0, 100)
// if f.Subcluster != nil { defer rows.Close()
// query = buildStringCondition("node.subcluster", f.Subcluster, query) for rows.Next() {
// } node := &schema.Node{}
// if f.NodeState != nil { if err := rows.Scan(&node.Hostname, &node.Cluster,
// query = query.Where("node.node_state = ?", f.NodeState) &node.SubCluster, &node.NodeState, &node.HealthState); err != nil {
// } cclog.Warn("Error while scanning node list")
// if f.HealthState != nil { return nil, err
// query = query.Where("node.health_state = ?", f.HealthState) }
// }
// } nodeList = append(nodeList, node)
// }
// // Add Group and Order
// query = query.GroupBy("state").OrderBy("count DESC") return nodeList, nil
// }
// rows, err := query.RunWith(r.stmtCache).Query()
// if err != nil { func (r *NodeRepository) CountNodeStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) {
// queryString, queryVars, _ := query.ToSql() query, qerr := AccessCheck(ctx, sq.Select("node_state", "count(*) AS count").From("node"))
// cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) if qerr != nil {
// return nil, err return nil, qerr
// } }
//
// nodes := make([]*model.NodeStates, 0) // Get latest Info aka closest Timestamp to $now
// for rows.Next() { now := time.Now().Unix()
// node := model.NodeStates{} query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct()
//
// if err := rows.Scan(&node.State, &node.Count); err != nil { for _, f := range filters {
// rows.Close() if f.Hostname != nil {
// cclog.Warn("Error while scanning rows (NodeStates)") query = buildStringCondition("node.hostname", f.Hostname, query)
// return nil, err }
// } if f.Cluster != nil {
// nodes = append(nodes, &node) query = buildStringCondition("node.cluster", f.Cluster, query)
// } }
// if f.Subcluster != nil {
// return nodes, nil query = buildStringCondition("node.subcluster", f.Subcluster, query)
// } }
// if f.SchedulerState != nil {
// func (r *NodeRepository) CountHealthStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { query = query.Where("node.node_state = ?", f.SchedulerState)
// query, qerr := AccessCheck(ctx, sq.Select("health_state AS state", "count(*) AS count").From("node")) }
// if qerr != nil { if f.HealthState != nil {
// return nil, qerr query = query.Where("node.health_state = ?", f.HealthState)
// } }
// }
// for _, f := range filters {
// if f.Hostname != nil { // Add Group and Order
// query = buildStringCondition("node.hostname", f.Hostname, query) query = query.GroupBy("node_state").OrderBy("count DESC")
// }
// if f.Cluster != nil { rows, err := query.RunWith(r.stmtCache).Query()
// query = buildStringCondition("node.cluster", f.Cluster, query) if err != nil {
// } queryString, queryVars, _ := query.ToSql()
// if f.Subcluster != nil { cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
// query = buildStringCondition("node.subcluster", f.Subcluster, query) return nil, err
// } }
// if f.NodeState != nil {
// query = query.Where("node.node_state = ?", f.NodeState) nodes := make([]*model.NodeStates, 0)
// } for rows.Next() {
// if f.HealthState != nil { node := model.NodeStates{}
// query = query.Where("node.health_state = ?", f.HealthState)
// } if err := rows.Scan(&node.State, &node.Count); err != nil {
// } rows.Close()
// cclog.Warn("Error while scanning rows (NodeStates)")
// // Add Group and Order return nil, err
// query = query.GroupBy("state").OrderBy("count DESC") }
// nodes = append(nodes, &node)
// rows, err := query.RunWith(r.stmtCache).Query() }
// if err != nil {
// queryString, queryVars, _ := query.ToSql() return nodes, nil
// cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) }
// return nil, err
// } func (r *NodeRepository) CountHealthStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) {
// query, qerr := AccessCheck(ctx, sq.Select("health_state", "count(*) AS count").From("node"))
// nodes := make([]*model.NodeStates, 0) if qerr != nil {
// for rows.Next() { return nil, qerr
// node := model.NodeStates{} }
//
// if err := rows.Scan(&node.State, &node.Count); err != nil { // Get latest Info aka closest Timestamp to $now
// rows.Close() now := time.Now().Unix()
// cclog.Warn("Error while scanning rows (NodeStates)") query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct()
// return nil, err
// } for _, f := range filters {
// nodes = append(nodes, &node) if f.Hostname != nil {
// } query = buildStringCondition("node.hostname", f.Hostname, query)
// }
// return nodes, nil if f.Cluster != nil {
// } query = buildStringCondition("node.cluster", f.Cluster, query)
}
if f.Subcluster != nil {
query = buildStringCondition("node.subcluster", f.Subcluster, query)
}
if f.SchedulerState != nil {
query = query.Where("node.node_state = ?", f.SchedulerState)
}
if f.HealthState != nil {
query = query.Where("node.health_state = ?", f.HealthState)
}
}
// Add Group and Order
query = query.GroupBy("health_state").OrderBy("count DESC")
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
queryString, queryVars, _ := query.ToSql()
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
nodes := make([]*model.NodeStates, 0)
for rows.Next() {
node := model.NodeStates{}
if err := rows.Scan(&node.State, &node.Count); err != nil {
rows.Close()
cclog.Warn("Error while scanning rows (NodeStates)")
return nil, err
}
nodes = append(nodes, &node)
}
return nodes, nil
}
func AccessCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) { func AccessCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) {
user := GetUserFromContext(ctx) user := GetUserFromContext(ctx)

View File

@@ -24,6 +24,7 @@ var groupBy2column = map[model.Aggregate]string{
model.AggregateUser: "job.hpc_user", model.AggregateUser: "job.hpc_user",
model.AggregateProject: "job.project", model.AggregateProject: "job.project",
model.AggregateCluster: "job.hpc_cluster", model.AggregateCluster: "job.hpc_cluster",
model.AggregateSubcluster: "job.subcluster",
} }
var sortBy2column = map[model.SortByAggregate]string{ var sortBy2column = map[model.SortByAggregate]string{
@@ -176,7 +177,7 @@ func (r *JobRepository) JobsStatsGrouped(
var name sql.NullString var name sql.NullString
var jobs, users, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64 var jobs, users, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := rows.Scan(&id, &name, &jobs, &users, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil { if err := rows.Scan(&id, &name, &jobs, &users, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
cclog.Warn("Error while scanning rows") cclog.Warnf("Error while scanning rows: %s", err.Error())
return nil, err return nil, err
} }

View File

@@ -43,8 +43,6 @@
let cluster = $state(presetCluster); let cluster = $state(presetCluster);
// Histogram // Histogram
let isHistogramSelectionOpen = $state(false); let isHistogramSelectionOpen = $state(false);
let from = $state(new Date(Date.now() - (30 * 24 * 60 * 60 * 1000))); // Simple way to retrigger GQL: Jobs Started last Month
let to = $state(new Date(Date.now()));
/* Derived */ /* Derived */
let selectedHistograms = $derived(cluster let selectedHistograms = $derived(cluster
@@ -74,11 +72,11 @@
} }
`, `,
variables: { variables: {
filter: [{ state: ["running"] }, { cluster: { eq: cluster}}, {startTime: { from, to }}], filter: [{ state: ["running"] }, { cluster: { eq: cluster} }],
selectedHistograms: selectedHistograms, selectedHistograms: selectedHistograms
}, },
requestPolicy: "network-only"
})); }));
</script> </script>
<!-- Loading indicators & Metric Sleect --> <!-- Loading indicators & Metric Sleect -->
@@ -96,8 +94,7 @@
<Refresher <Refresher
initially={120} initially={120}
onRefresh={() => { onRefresh={() => {
from = new Date(Date.now() - (30 * 24 * 60 * 60 * 1000)); // Triggers GQL selectedHistograms = [...$state.snapshot(selectedHistograms)]
to = new Date(Date.now());
}} }}
/> />
</Col> </Col>

View File

@@ -185,6 +185,7 @@
paging: { itemsPerPage: -1, page: 1 }, // Get all: -1 paging: { itemsPerPage: -1, page: 1 }, // Get all: -1
sorting: { field: "startTime", type: "col", order: "DESC" } sorting: { field: "startTime", type: "col", order: "DESC" }
}, },
requestPolicy: "network-only"
})); }));
/* Effects */ /* Effects */
@@ -363,6 +364,7 @@
<Refresher <Refresher
initially={120} initially={120}
onRefresh={() => { onRefresh={() => {
console.log('Trigger Refresh StatusTab')
from = new Date(Date.now() - 5 * 60 * 1000); from = new Date(Date.now() - 5 * 60 * 1000);
to = new Date(Date.now()); to = new Date(Date.now());
}} }}

View File

@@ -47,8 +47,8 @@
/* State Init */ /* State Init */
let cluster = $state(presetCluster) let cluster = $state(presetCluster)
let from = $state(new Date(Date.now() - (30 * 24 * 60 * 60 * 1000))); // Simple way to retrigger GQL: Jobs Started last Month let pagingState = $state({page: 1, itemsPerPage: 10}) // Top 10
let to = $state(new Date(Date.now())); let selectedHistograms = $state([]) // Dummy For Refresh
let colWidthJobs = $state(0); let colWidthJobs = $state(0);
let colWidthNodes = $state(0); let colWidthNodes = $state(0);
let colWidthAccs = $state(0); let colWidthAccs = $state(0);
@@ -84,9 +84,10 @@
} }
`, `,
variables: { variables: {
filter: [{ state: ["running"] }, { cluster: { eq: cluster}}, {startTime: { from, to }}], filter: [{ state: ["running"] }, { cluster: { eq: cluster} }],
paging: { itemsPerPage: 10, page: 1 } // Top 10 paging: pagingState // Top 10
}, },
requestPolicy: "network-only"
})); }));
const topNodesQuery = $derived(queryStore({ const topNodesQuery = $derived(queryStore({
@@ -118,9 +119,10 @@
} }
`, `,
variables: { variables: {
filter: [{ state: ["running"] }, { cluster: { eq: cluster }}, {startTime: { from, to }}], filter: [{ state: ["running"] }, { cluster: { eq: cluster } }],
paging: { itemsPerPage: 10, page: 1 } // Top 10 paging: pagingState
}, },
requestPolicy: "network-only"
})); }));
const topAccsQuery = $derived(queryStore({ const topAccsQuery = $derived(queryStore({
@@ -152,9 +154,10 @@
} }
`, `,
variables: { variables: {
filter: [{ state: ["running"] }, { cluster: { eq: cluster }}, {startTime: { from, to }}], filter: [{ state: ["running"] }, { cluster: { eq: cluster } }],
paging: { itemsPerPage: 10, page: 1 } // Top 10 paging: pagingState
}, },
requestPolicy: "network-only"
})); }));
// Note: nodeMetrics are requested on configured $timestep resolution // Note: nodeMetrics are requested on configured $timestep resolution
@@ -183,10 +186,11 @@
} }
`, `,
variables: { variables: {
filter: [{ state: ["running"] }, { cluster: { eq: cluster }}, {startTime: { from, to }}], filter: [{ state: ["running"] }, { cluster: { eq: cluster } }],
selectedHistograms: [], // No Metrics requested for node hardware stats selectedHistograms: selectedHistograms, // No Metrics requested for node hardware stats
numDurationBins: numDurationBins, numDurationBins: numDurationBins,
}, },
requestPolicy: "network-only"
})); }));
/* Functions */ /* Functions */
@@ -202,7 +206,6 @@
} }
return c[(c.length + targetIdx) % c.length]; return c[(c.length + targetIdx) % c.length];
} }
</script> </script>
<!-- Refresher and space for other options --> <!-- Refresher and space for other options -->
@@ -226,8 +229,8 @@
<Refresher <Refresher
initially={120} initially={120}
onRefresh={() => { onRefresh={() => {
from = new Date(Date.now() - (30 * 24 * 60 * 60 * 1000)); // Triggers GQL pagingState = { page:1, itemsPerPage: 10 };
to = new Date(Date.now()); selectedHistograms = [...$state.snapshot(selectedHistograms)];
}} }}
/> />
</Col> </Col>