diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 9e93b4b..85d13a2 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -820,22 +820,12 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } // SubCluster returns generated.SubClusterResolver implementation. func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } -type clusterResolver struct{ *Resolver } -type jobResolver struct{ *Resolver } -type metricValueResolver struct{ *Resolver } -type mutationResolver struct{ *Resolver } -type nodeResolver struct{ *Resolver } -type queryResolver struct{ *Resolver } -type subClusterResolver struct{ *Resolver } - -// !!! WARNING !!! -// The code below was going to be deleted when updating resolvers. It has been copied here so you have -// one last chance to move it out of harms way if you want. There are two reasons this happens: -// - When renaming or deleting a resolver the old code will be put in here. You can safely delete -// it when you're done. -// - You have helper methods in this file. Move them out to keep these resolver files clean. -/* - func (r *nodeResolver) NodeState(ctx context.Context, obj *model.Node) (string, error) { - panic(fmt.Errorf("not implemented: NodeState - nodeState")) -} -*/ +type ( + clusterResolver struct{ *Resolver } + jobResolver struct{ *Resolver } + metricValueResolver struct{ *Resolver } + mutationResolver struct{ *Resolver } + nodeResolver struct{ *Resolver } + queryResolver struct{ *Resolver } + subClusterResolver struct{ *Resolver } +) diff --git a/internal/repository/migrations/sqlite3/10_node-table.up.sql b/internal/repository/migrations/sqlite3/10_node-table.up.sql index 39656f9..372669f 100644 --- a/internal/repository/migrations/sqlite3/10_node-table.up.sql +++ b/internal/repository/migrations/sqlite3/10_node-table.up.sql @@ -27,10 +27,105 @@ CREATE TABLE "node_state" ( FOREIGN KEY (node_id) REFERENCES node (id) ); --- Add Indices For New Node Table VARCHAR Fields +-- DROP indices using old column name "cluster" +DROP INDEX IF EXISTS jobs_cluster; +DROP INDEX IF EXISTS jobs_cluster_user; +DROP INDEX IF EXISTS jobs_cluster_project; +DROP INDEX IF EXISTS jobs_cluster_subcluster; +DROP INDEX IF EXISTS jobs_cluster_starttime; +DROP INDEX IF EXISTS jobs_cluster_duration; +DROP INDEX IF EXISTS jobs_cluster_numnodes; +DROP INDEX IF EXISTS jobs_cluster_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_numacc; +DROP INDEX IF EXISTS jobs_cluster_energy; +DROP INDEX IF EXISTS jobs_cluster_partition; +DROP INDEX IF EXISTS jobs_cluster_partition_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_numnodes; +DROP INDEX IF EXISTS jobs_cluster_partition_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_partition_numacc; +DROP INDEX IF EXISTS jobs_cluster_partition_energy; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_project; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numnodes; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numacc; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_energy; +DROP INDEX IF EXISTS jobs_cluster_jobstate; +DROP INDEX IF EXISTS jobs_cluster_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_jobstate_project; +DROP INDEX IF EXISTS jobs_cluster_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numnodes; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numacc; +DROP INDEX IF EXISTS jobs_cluster_jobstate_energy; + +-- -- CREATE UPDATED indices with new column names +-- Cluster Filter +CREATE INDEX IF NOT EXISTS jobs_cluster ON job (hpc_cluster); +CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (hpc_cluster, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (hpc_cluster, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (hpc_cluster, subcluster); +-- Cluster Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (hpc_cluster, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_duration ON job (hpc_cluster, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_numnodes ON job (hpc_cluster, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_numhwthreads ON job (hpc_cluster, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_numacc ON job (hpc_cluster, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_energy ON job (hpc_cluster, energy); +-- Cluster+Partition Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (hpc_cluster, cluster_partition); +-- Cluster+Partition Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (hpc_cluster, cluster_partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (hpc_cluster, cluster_partition, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (hpc_cluster, cluster_partition, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numhwthreads ON job (hpc_cluster, cluster_partition, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numacc ON job (hpc_cluster, cluster_partition, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_energy ON job (hpc_cluster, cluster_partition, energy); +-- Cluster+Partition+Jobstate Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (hpc_cluster, cluster_partition, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (hpc_cluster, cluster_partition, job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (hpc_cluster, cluster_partition, job_state, project); +-- Cluster+Partition+Jobstate Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (hpc_cluster, cluster_partition, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (hpc_cluster, cluster_partition, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (hpc_cluster, cluster_partition, job_state, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numhwthreads ON job (hpc_cluster, cluster_partition, job_state, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numacc ON job (hpc_cluster, cluster_partition, job_state, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_energy ON job (hpc_cluster, cluster_partition, job_state, energy); +-- Cluster+JobState Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (hpc_cluster, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (hpc_cluster, job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (hpc_cluster, job_state, project); +-- Cluster+JobState Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (hpc_cluster, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_duration ON job (hpc_cluster, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numnodes ON job (hpc_cluster, job_state, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numhwthreads ON job (hpc_cluster, job_state, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numacc ON job (hpc_cluster, job_state, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_energy ON job (hpc_cluster, job_state, energy); +--- --- END UPDATE existing indices + +-- Add NEW Indices For New Job Table Columns +CREATE INDEX IF NOT EXISTS jobs_cluster_submittime ON job (hpc_cluster, submit_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_submittime ON job (hpc_cluster, cluster_partition, submit_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_submittime ON job (hpc_cluster, cluster_partition, job_state, submit_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_submittime ON job (hpc_cluster, job_state, submit_time); + +-- Add NEW Indices For New Node Table VARCHAR Fields CREATE INDEX IF NOT EXISTS nodes_cluster ON node (cluster); CREATE INDEX IF NOT EXISTS nodes_cluster_subcluster ON node (cluster, subcluster); --- Add Indices For Increased Amounts of Tags +-- Add NEW Indices For New Node_State Table Fields +CREATE INDEX IF NOT EXISTS nodeStates_state ON node_state (node_state); +CREATE INDEX IF NOT EXISTS nodeStates_health ON node_state (health_state); +CREATE INDEX IF NOT EXISTS nodeStates_nodeid_state ON node (node_id, node_state); +CREATE INDEX IF NOT EXISTS nodeStates_nodeid_health ON node (node_id, health_state); + +-- Add NEW Indices For Increased Amounts of Tags CREATE INDEX IF NOT EXISTS tags_jobid ON jobtag (job_id); CREATE INDEX IF NOT EXISTS tags_tagid ON jobtag (tag_id); diff --git a/internal/repository/node.go b/internal/repository/node.go index ae82fb1..b4fac9e 100644 --- a/internal/repository/node.go +++ b/internal/repository/node.go @@ -250,6 +250,10 @@ func (r *NodeRepository) QueryNodes( return nil, qerr } + // Get latest Info aka closest Timestamp to $now + now := time.Now().Unix() + query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct() + for _, f := range filters { if f.Hostname != nil { query = buildStringCondition("node.hostname", f.Hostname, query) @@ -291,104 +295,140 @@ func (r *NodeRepository) QueryNodes( return nodes, nil } -// -// func (r *NodeRepository) CountNodeStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { -// query, qerr := AccessCheck(ctx, sq.Select("node_state AS state", "count(*) AS count").From("node")) -// if qerr != nil { -// return nil, qerr -// } -// -// for _, f := range filters { -// if f.Hostname != nil { -// query = buildStringCondition("node.hostname", f.Hostname, query) -// } -// if f.Cluster != nil { -// query = buildStringCondition("node.cluster", f.Cluster, query) -// } -// if f.Subcluster != nil { -// query = buildStringCondition("node.subcluster", f.Subcluster, query) -// } -// if f.NodeState != nil { -// query = query.Where("node.node_state = ?", f.NodeState) -// } -// if f.HealthState != nil { -// query = query.Where("node.health_state = ?", f.HealthState) -// } -// } -// -// // Add Group and Order -// query = query.GroupBy("state").OrderBy("count DESC") -// -// rows, err := query.RunWith(r.stmtCache).Query() -// if err != nil { -// queryString, queryVars, _ := query.ToSql() -// cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) -// return nil, err -// } -// -// nodes := make([]*model.NodeStates, 0) -// for rows.Next() { -// node := model.NodeStates{} -// -// if err := rows.Scan(&node.State, &node.Count); err != nil { -// rows.Close() -// cclog.Warn("Error while scanning rows (NodeStates)") -// return nil, err -// } -// nodes = append(nodes, &node) -// } -// -// return nodes, nil -// } -// -// func (r *NodeRepository) CountHealthStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { -// query, qerr := AccessCheck(ctx, sq.Select("health_state AS state", "count(*) AS count").From("node")) -// if qerr != nil { -// return nil, qerr -// } -// -// for _, f := range filters { -// if f.Hostname != nil { -// query = buildStringCondition("node.hostname", f.Hostname, query) -// } -// if f.Cluster != nil { -// query = buildStringCondition("node.cluster", f.Cluster, query) -// } -// if f.Subcluster != nil { -// query = buildStringCondition("node.subcluster", f.Subcluster, query) -// } -// if f.NodeState != nil { -// query = query.Where("node.node_state = ?", f.NodeState) -// } -// if f.HealthState != nil { -// query = query.Where("node.health_state = ?", f.HealthState) -// } -// } -// -// // Add Group and Order -// query = query.GroupBy("state").OrderBy("count DESC") -// -// rows, err := query.RunWith(r.stmtCache).Query() -// if err != nil { -// queryString, queryVars, _ := query.ToSql() -// cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) -// return nil, err -// } -// -// nodes := make([]*model.NodeStates, 0) -// for rows.Next() { -// node := model.NodeStates{} -// -// if err := rows.Scan(&node.State, &node.Count); err != nil { -// rows.Close() -// cclog.Warn("Error while scanning rows (NodeStates)") -// return nil, err -// } -// nodes = append(nodes, &node) -// } -// -// return nodes, nil -// } +func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) { + // Get latest Info aka closest Timestamo to $now + now := time.Now().Unix() + q := sq.Select("hostname", "cluster", "subcluster", "node_state", "health_state"). + From("node"). + Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}). + Where("node.cluster = ?", cluster).OrderBy("node.hostname ASC") + + rows, err := q.RunWith(r.DB).Query() + if err != nil { + cclog.Warn("Error while querying node list") + return nil, err + } + nodeList := make([]*schema.Node, 0, 100) + defer rows.Close() + for rows.Next() { + node := &schema.Node{} + if err := rows.Scan(&node.Hostname, &node.Cluster, + &node.SubCluster, &node.NodeState, &node.HealthState); err != nil { + cclog.Warn("Error while scanning node list") + return nil, err + } + + nodeList = append(nodeList, node) + } + + return nodeList, nil +} + +func (r *NodeRepository) CountNodeStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { + query, qerr := AccessCheck(ctx, sq.Select("node_state", "count(*) AS count").From("node")) + if qerr != nil { + return nil, qerr + } + + // Get latest Info aka closest Timestamp to $now + now := time.Now().Unix() + query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct() + + for _, f := range filters { + if f.Hostname != nil { + query = buildStringCondition("node.hostname", f.Hostname, query) + } + if f.Cluster != nil { + query = buildStringCondition("node.cluster", f.Cluster, query) + } + if f.Subcluster != nil { + query = buildStringCondition("node.subcluster", f.Subcluster, query) + } + if f.SchedulerState != nil { + query = query.Where("node.node_state = ?", f.SchedulerState) + } + if f.HealthState != nil { + query = query.Where("node.health_state = ?", f.HealthState) + } + } + + // Add Group and Order + query = query.GroupBy("node_state").OrderBy("count DESC") + + rows, err := query.RunWith(r.stmtCache).Query() + if err != nil { + queryString, queryVars, _ := query.ToSql() + cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) + return nil, err + } + + nodes := make([]*model.NodeStates, 0) + for rows.Next() { + node := model.NodeStates{} + + if err := rows.Scan(&node.State, &node.Count); err != nil { + rows.Close() + cclog.Warn("Error while scanning rows (NodeStates)") + return nil, err + } + nodes = append(nodes, &node) + } + + return nodes, nil +} + +func (r *NodeRepository) CountHealthStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { + query, qerr := AccessCheck(ctx, sq.Select("health_state", "count(*) AS count").From("node")) + if qerr != nil { + return nil, qerr + } + + // Get latest Info aka closest Timestamp to $now + now := time.Now().Unix() + query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct() + + for _, f := range filters { + if f.Hostname != nil { + query = buildStringCondition("node.hostname", f.Hostname, query) + } + if f.Cluster != nil { + query = buildStringCondition("node.cluster", f.Cluster, query) + } + if f.Subcluster != nil { + query = buildStringCondition("node.subcluster", f.Subcluster, query) + } + if f.SchedulerState != nil { + query = query.Where("node.node_state = ?", f.SchedulerState) + } + if f.HealthState != nil { + query = query.Where("node.health_state = ?", f.HealthState) + } + } + + // Add Group and Order + query = query.GroupBy("health_state").OrderBy("count DESC") + + rows, err := query.RunWith(r.stmtCache).Query() + if err != nil { + queryString, queryVars, _ := query.ToSql() + cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) + return nil, err + } + + nodes := make([]*model.NodeStates, 0) + for rows.Next() { + node := model.NodeStates{} + + if err := rows.Scan(&node.State, &node.Count); err != nil { + rows.Close() + cclog.Warn("Error while scanning rows (NodeStates)") + return nil, err + } + nodes = append(nodes, &node) + } + + return nodes, nil +} func AccessCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) { user := GetUserFromContext(ctx) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 19d17bd..825033d 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -21,9 +21,10 @@ import ( // GraphQL validation should make sure that no unkown values can be specified. var groupBy2column = map[model.Aggregate]string{ - model.AggregateUser: "job.hpc_user", - model.AggregateProject: "job.project", - model.AggregateCluster: "job.hpc_cluster", + model.AggregateUser: "job.hpc_user", + model.AggregateProject: "job.project", + model.AggregateCluster: "job.hpc_cluster", + model.AggregateSubcluster: "job.subcluster", } var sortBy2column = map[model.SortByAggregate]string{ @@ -176,7 +177,7 @@ func (r *JobRepository) JobsStatsGrouped( var name sql.NullString var jobs, users, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64 if err := rows.Scan(&id, &name, &jobs, &users, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil { - cclog.Warn("Error while scanning rows") + cclog.Warnf("Error while scanning rows: %s", err.Error()) return nil, err } diff --git a/web/frontend/src/status/StatisticsDash.svelte b/web/frontend/src/status/StatisticsDash.svelte index f299439..fb2161b 100644 --- a/web/frontend/src/status/StatisticsDash.svelte +++ b/web/frontend/src/status/StatisticsDash.svelte @@ -43,8 +43,6 @@ let cluster = $state(presetCluster); // Histogram let isHistogramSelectionOpen = $state(false); - let from = $state(new Date(Date.now() - (30 * 24 * 60 * 60 * 1000))); // Simple way to retrigger GQL: Jobs Started last Month - let to = $state(new Date(Date.now())); /* Derived */ let selectedHistograms = $derived(cluster @@ -74,11 +72,11 @@ } `, variables: { - filter: [{ state: ["running"] }, { cluster: { eq: cluster}}, {startTime: { from, to }}], - selectedHistograms: selectedHistograms, + filter: [{ state: ["running"] }, { cluster: { eq: cluster} }], + selectedHistograms: selectedHistograms }, + requestPolicy: "network-only" })); - @@ -96,8 +94,7 @@ { - from = new Date(Date.now() - (30 * 24 * 60 * 60 * 1000)); // Triggers GQL - to = new Date(Date.now()); + selectedHistograms = [...$state.snapshot(selectedHistograms)] }} /> diff --git a/web/frontend/src/status/StatusDash.svelte b/web/frontend/src/status/StatusDash.svelte index 280b04b..9579c0f 100644 --- a/web/frontend/src/status/StatusDash.svelte +++ b/web/frontend/src/status/StatusDash.svelte @@ -185,6 +185,7 @@ paging: { itemsPerPage: -1, page: 1 }, // Get all: -1 sorting: { field: "startTime", type: "col", order: "DESC" } }, + requestPolicy: "network-only" })); /* Effects */ @@ -363,6 +364,7 @@ { + console.log('Trigger Refresh StatusTab') from = new Date(Date.now() - 5 * 60 * 1000); to = new Date(Date.now()); }} diff --git a/web/frontend/src/status/UsageDash.svelte b/web/frontend/src/status/UsageDash.svelte index 16575e4..3b39e55 100644 --- a/web/frontend/src/status/UsageDash.svelte +++ b/web/frontend/src/status/UsageDash.svelte @@ -47,8 +47,8 @@ /* State Init */ let cluster = $state(presetCluster) - let from = $state(new Date(Date.now() - (30 * 24 * 60 * 60 * 1000))); // Simple way to retrigger GQL: Jobs Started last Month - let to = $state(new Date(Date.now())); + let pagingState = $state({page: 1, itemsPerPage: 10}) // Top 10 + let selectedHistograms = $state([]) // Dummy For Refresh let colWidthJobs = $state(0); let colWidthNodes = $state(0); let colWidthAccs = $state(0); @@ -84,9 +84,10 @@ } `, variables: { - filter: [{ state: ["running"] }, { cluster: { eq: cluster}}, {startTime: { from, to }}], - paging: { itemsPerPage: 10, page: 1 } // Top 10 + filter: [{ state: ["running"] }, { cluster: { eq: cluster} }], + paging: pagingState // Top 10 }, + requestPolicy: "network-only" })); const topNodesQuery = $derived(queryStore({ @@ -118,9 +119,10 @@ } `, variables: { - filter: [{ state: ["running"] }, { cluster: { eq: cluster }}, {startTime: { from, to }}], - paging: { itemsPerPage: 10, page: 1 } // Top 10 + filter: [{ state: ["running"] }, { cluster: { eq: cluster } }], + paging: pagingState }, + requestPolicy: "network-only" })); const topAccsQuery = $derived(queryStore({ @@ -152,9 +154,10 @@ } `, variables: { - filter: [{ state: ["running"] }, { cluster: { eq: cluster }}, {startTime: { from, to }}], - paging: { itemsPerPage: 10, page: 1 } // Top 10 + filter: [{ state: ["running"] }, { cluster: { eq: cluster } }], + paging: pagingState }, + requestPolicy: "network-only" })); // Note: nodeMetrics are requested on configured $timestep resolution @@ -183,10 +186,11 @@ } `, variables: { - filter: [{ state: ["running"] }, { cluster: { eq: cluster }}, {startTime: { from, to }}], - selectedHistograms: [], // No Metrics requested for node hardware stats + filter: [{ state: ["running"] }, { cluster: { eq: cluster } }], + selectedHistograms: selectedHistograms, // No Metrics requested for node hardware stats numDurationBins: numDurationBins, }, + requestPolicy: "network-only" })); /* Functions */ @@ -202,7 +206,6 @@ } return c[(c.length + targetIdx) % c.length]; } - @@ -226,8 +229,8 @@ { - from = new Date(Date.now() - (30 * 24 * 60 * 60 * 1000)); // Triggers GQL - to = new Date(Date.now()); + pagingState = { page:1, itemsPerPage: 10 }; + selectedHistograms = [...$state.snapshot(selectedHistograms)]; }} />