mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-11-20 16:57:22 +01:00
switch nodeList logic to SQLite as source of truth, fix nodeList continuous scroll
- keep notindb logic for now
This commit is contained in:
@@ -9,13 +9,10 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
@@ -678,84 +675,20 @@ func (ccms *CCMetricStoreInternal) LoadNodeData(
|
||||
|
||||
// Used for Systems-View Node-List
|
||||
func (ccms *CCMetricStoreInternal) LoadNodeListData(
|
||||
cluster, subCluster, nodeFilter string,
|
||||
preFiltered []string,
|
||||
cluster, subCluster string,
|
||||
nodes []string,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
resolution int,
|
||||
from, to time.Time,
|
||||
page *model.PageRequest,
|
||||
ctx context.Context,
|
||||
) (map[string]schema.JobData, int, bool, error) {
|
||||
// 0) Init additional vars
|
||||
var totalNodes int = 0
|
||||
var hasNextPage bool = false
|
||||
|
||||
// 1) Get list of all nodes
|
||||
var nodes []string
|
||||
if subCluster != "" {
|
||||
scNodes := archive.NodeLists[cluster][subCluster]
|
||||
nodes = scNodes.PrintList()
|
||||
} else {
|
||||
subClusterNodeLists := archive.NodeLists[cluster]
|
||||
for _, nodeList := range subClusterNodeLists {
|
||||
nodes = append(nodes, nodeList.PrintList()...)
|
||||
}
|
||||
}
|
||||
|
||||
// 2.1) Filter nodes by name
|
||||
if nodeFilter != "" {
|
||||
filteredNodesByName := []string{}
|
||||
for _, node := range nodes {
|
||||
if strings.Contains(node, nodeFilter) {
|
||||
filteredNodesByName = append(filteredNodesByName, node)
|
||||
}
|
||||
}
|
||||
nodes = filteredNodesByName
|
||||
}
|
||||
|
||||
// 2.2) Filter nodes by state using prefiltered match array
|
||||
if len(preFiltered) > 0 {
|
||||
filteredNodesByState := []string{}
|
||||
if preFiltered[0] == "exclude" { // Backwards: PreFiltered contains all Nodes in DB > Return Missing Nodes
|
||||
for _, node := range nodes {
|
||||
if !slices.Contains(preFiltered, node) {
|
||||
filteredNodesByState = append(filteredNodesByState, node)
|
||||
}
|
||||
}
|
||||
} else { // Forwards: Prefiltered contains specific nodeState > Return Matches
|
||||
for _, node := range nodes {
|
||||
if slices.Contains(preFiltered, node) {
|
||||
filteredNodesByState = append(filteredNodesByState, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
nodes = filteredNodesByState
|
||||
}
|
||||
|
||||
// 2.3) Count total nodes && Sort nodes -> Sorting invalidated after return ...
|
||||
totalNodes = len(nodes)
|
||||
sort.Strings(nodes)
|
||||
|
||||
// 3) Apply paging
|
||||
if len(nodes) > page.ItemsPerPage {
|
||||
start := (page.Page - 1) * page.ItemsPerPage
|
||||
end := start + page.ItemsPerPage
|
||||
if end >= len(nodes) {
|
||||
end = len(nodes)
|
||||
hasNextPage = false
|
||||
} else {
|
||||
hasNextPage = true
|
||||
}
|
||||
nodes = nodes[start:end]
|
||||
}
|
||||
|
||||
// Note: Order of node data is not guaranteed after this point, but contents match page and filter criteria
|
||||
) (map[string]schema.JobData, error) {
|
||||
|
||||
// Note: Order of node data is not guaranteed after this point
|
||||
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, int64(resolution))
|
||||
if err != nil {
|
||||
cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := memorystore.APIQueryRequest{
|
||||
@@ -770,7 +703,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData(
|
||||
resBody, err := memorystore.FetchData(req)
|
||||
if err != nil {
|
||||
cclog.Errorf("Error while fetching data : %s", err.Error())
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var errors []string
|
||||
@@ -850,10 +783,10 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData(
|
||||
|
||||
if len(errors) != 0 {
|
||||
/* Returns list of "partial errors" */
|
||||
return data, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
|
||||
return data, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
|
||||
}
|
||||
|
||||
return data, totalNodes, hasNextPage, nil
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
||||
|
||||
@@ -11,12 +11,9 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
@@ -800,85 +797,20 @@ func (ccms *CCMetricStore) LoadNodeData(
|
||||
|
||||
// Used for Systems-View Node-List
|
||||
func (ccms *CCMetricStore) LoadNodeListData(
|
||||
cluster, subCluster, nodeFilter string,
|
||||
preFiltered []string,
|
||||
cluster, subCluster string,
|
||||
nodes []string,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
resolution int,
|
||||
from, to time.Time,
|
||||
page *model.PageRequest,
|
||||
ctx context.Context,
|
||||
) (map[string]schema.JobData, int, bool, error) {
|
||||
|
||||
// 0) Init additional vars
|
||||
var totalNodes int = 0
|
||||
var hasNextPage bool = false
|
||||
|
||||
// 1) Get list of all nodes
|
||||
var nodes []string
|
||||
if subCluster != "" {
|
||||
scNodes := archive.NodeLists[cluster][subCluster]
|
||||
nodes = scNodes.PrintList()
|
||||
} else {
|
||||
subClusterNodeLists := archive.NodeLists[cluster]
|
||||
for _, nodeList := range subClusterNodeLists {
|
||||
nodes = append(nodes, nodeList.PrintList()...)
|
||||
}
|
||||
}
|
||||
|
||||
// 2.1) Filter nodes by name
|
||||
if nodeFilter != "" {
|
||||
filteredNodesByName := []string{}
|
||||
for _, node := range nodes {
|
||||
if strings.Contains(node, nodeFilter) {
|
||||
filteredNodesByName = append(filteredNodesByName, node)
|
||||
}
|
||||
}
|
||||
nodes = filteredNodesByName
|
||||
}
|
||||
|
||||
// 2.2) Filter nodes by state using prefiltered match array
|
||||
if len(preFiltered) > 0 {
|
||||
filteredNodesByState := []string{}
|
||||
if preFiltered[0] == "exclude" { // Backwards: PreFiltered contains all Nodes in DB > Return Missing Nodes
|
||||
for _, node := range nodes {
|
||||
if !slices.Contains(preFiltered, node) {
|
||||
filteredNodesByState = append(filteredNodesByState, node)
|
||||
}
|
||||
}
|
||||
} else { // Forwards: Prefiltered contains specific nodeState > Return Matches
|
||||
for _, node := range nodes {
|
||||
if slices.Contains(preFiltered, node) {
|
||||
filteredNodesByState = append(filteredNodesByState, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
nodes = filteredNodesByState
|
||||
}
|
||||
|
||||
// 2.3) Count total nodes && Sort nodes -> Sorting invalidated after return ...
|
||||
totalNodes = len(nodes)
|
||||
sort.Strings(nodes)
|
||||
|
||||
// 3) Apply paging
|
||||
if len(nodes) > page.ItemsPerPage {
|
||||
start := (page.Page - 1) * page.ItemsPerPage
|
||||
end := start + page.ItemsPerPage
|
||||
if end > len(nodes) {
|
||||
end = len(nodes)
|
||||
hasNextPage = false
|
||||
} else {
|
||||
hasNextPage = true
|
||||
}
|
||||
nodes = nodes[start:end]
|
||||
}
|
||||
|
||||
// Note: Order of node data is not guaranteed after this point, but contents match page and filter criteria
|
||||
) (map[string]schema.JobData, error) {
|
||||
|
||||
// Note: Order of node data is not guaranteed after this point
|
||||
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
|
||||
if err != nil {
|
||||
cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := ApiQueryRequest{
|
||||
@@ -893,7 +825,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
|
||||
resBody, err := ccms.doRequest(ctx, &req)
|
||||
if err != nil {
|
||||
cclog.Errorf("Error while performing request: %s", err.Error())
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var errors []string
|
||||
@@ -973,10 +905,10 @@ func (ccms *CCMetricStore) LoadNodeListData(
|
||||
|
||||
if len(errors) != 0 {
|
||||
/* Returns list of "partial errors" */
|
||||
return data, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
|
||||
return data, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
|
||||
}
|
||||
|
||||
return data, totalNodes, hasNextPage, nil
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (ccms *CCMetricStore) buildNodeQueries(
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
@@ -36,7 +35,7 @@ type MetricDataRepository interface {
|
||||
LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error)
|
||||
|
||||
// Return a map of hosts to a map of metrics to a map of scopes for multiple nodes.
|
||||
LoadNodeListData(cluster, subCluster, nodeFilter string, preFiltered []string, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, page *model.PageRequest, ctx context.Context) (map[string]schema.JobData, int, bool, error)
|
||||
LoadNodeListData(cluster, subCluster string, nodes, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, ctx context.Context) (map[string]schema.JobData, error)
|
||||
}
|
||||
|
||||
var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{}
|
||||
|
||||
@@ -14,14 +14,12 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
@@ -495,82 +493,17 @@ func (pdb *PrometheusDataRepository) LoadScopedStats(
|
||||
|
||||
// Implemented by NHR@FAU; Used in NodeList-View
|
||||
func (pdb *PrometheusDataRepository) LoadNodeListData(
|
||||
cluster, subCluster, nodeFilter string,
|
||||
preFiltered []string,
|
||||
cluster, subCluster string,
|
||||
nodes []string,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
resolution int,
|
||||
from, to time.Time,
|
||||
page *model.PageRequest,
|
||||
ctx context.Context,
|
||||
) (map[string]schema.JobData, int, bool, error) {
|
||||
) (map[string]schema.JobData, error) {
|
||||
// Assumption: pdb.loadData() only returns series node-scope - use node scope for NodeList
|
||||
|
||||
// 0) Init additional vars
|
||||
var totalNodes int = 0
|
||||
var hasNextPage bool = false
|
||||
|
||||
// 1) Get list of all nodes
|
||||
var nodes []string
|
||||
if subCluster != "" {
|
||||
scNodes := archive.NodeLists[cluster][subCluster]
|
||||
nodes = scNodes.PrintList()
|
||||
} else {
|
||||
subClusterNodeLists := archive.NodeLists[cluster]
|
||||
for _, nodeList := range subClusterNodeLists {
|
||||
nodes = append(nodes, nodeList.PrintList()...)
|
||||
}
|
||||
}
|
||||
|
||||
// 2.1) Filter nodes by name
|
||||
if nodeFilter != "" {
|
||||
filteredNodesByName := []string{}
|
||||
for _, node := range nodes {
|
||||
if strings.Contains(node, nodeFilter) {
|
||||
filteredNodesByName = append(filteredNodesByName, node)
|
||||
}
|
||||
}
|
||||
nodes = filteredNodesByName
|
||||
}
|
||||
|
||||
// 2.2) Filter nodes by state using prefiltered match array
|
||||
if len(preFiltered) > 0 {
|
||||
filteredNodesByState := []string{}
|
||||
if preFiltered[0] == "exclude" { // Backwards: PreFiltered contains all Nodes in DB > Return Missing Nodes
|
||||
for _, node := range nodes {
|
||||
if !slices.Contains(preFiltered, node) {
|
||||
filteredNodesByState = append(filteredNodesByState, node)
|
||||
}
|
||||
}
|
||||
} else { // Forwards: Prefiltered contains specific nodeState > Return Matches
|
||||
for _, node := range nodes {
|
||||
if slices.Contains(preFiltered, node) {
|
||||
filteredNodesByState = append(filteredNodesByState, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
nodes = filteredNodesByState
|
||||
}
|
||||
|
||||
// 2.3) Count total nodes && Sort nodes -> Sorting invalidated after return ...
|
||||
totalNodes = len(nodes)
|
||||
sort.Strings(nodes)
|
||||
|
||||
// 3) Apply paging
|
||||
if len(nodes) > page.ItemsPerPage {
|
||||
start := (page.Page - 1) * page.ItemsPerPage
|
||||
end := start + page.ItemsPerPage
|
||||
if end >= len(nodes) {
|
||||
end = len(nodes)
|
||||
hasNextPage = false
|
||||
} else {
|
||||
hasNextPage = true
|
||||
}
|
||||
nodes = nodes[start:end]
|
||||
}
|
||||
|
||||
// 4) Fetch Data, based on pdb.LoadNodeData()
|
||||
|
||||
// Fetch Data, based on pdb.LoadNodeData()
|
||||
t0 := time.Now()
|
||||
// Map of hosts of jobData
|
||||
data := make(map[string]schema.JobData)
|
||||
@@ -593,12 +526,12 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
|
||||
metricConfig := archive.GetMetricConfig(cluster, metric)
|
||||
if metricConfig == nil {
|
||||
cclog.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
|
||||
return nil, totalNodes, hasNextPage, errors.New("Prometheus config error")
|
||||
return nil, errors.New("Prometheus config error")
|
||||
}
|
||||
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
||||
if err != nil {
|
||||
cclog.Warn("Error while formatting prometheus query")
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ranged query over all nodes
|
||||
@@ -610,7 +543,7 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
|
||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||
if err != nil {
|
||||
cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
||||
return nil, totalNodes, hasNextPage, errors.New("Prometheus query error")
|
||||
return nil, errors.New("Prometheus query error")
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
cclog.Warnf("Warnings: %v\n", warnings)
|
||||
@@ -650,5 +583,5 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
|
||||
}
|
||||
t1 := time.Since(t0)
|
||||
cclog.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
|
||||
return data, totalNodes, hasNextPage, nil
|
||||
return data, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
)
|
||||
|
||||
@@ -63,15 +62,14 @@ func (tmdr *TestMetricDataRepository) LoadNodeData(
|
||||
}
|
||||
|
||||
func (tmdr *TestMetricDataRepository) LoadNodeListData(
|
||||
cluster, subCluster, nodeFilter string,
|
||||
preFiltered []string,
|
||||
cluster, subCluster string,
|
||||
nodes []string,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
resolution int,
|
||||
from, to time.Time,
|
||||
page *model.PageRequest,
|
||||
ctx context.Context,
|
||||
) (map[string]schema.JobData, int, bool, error) {
|
||||
) (map[string]schema.JobData, error) {
|
||||
panic("TODO")
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user