From 363e839c49c9013a764a7da1653c637e82e09769 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sat, 7 Feb 2026 07:05:33 +0100 Subject: [PATCH 1/9] Add simple log viewer in web frontend --- cmd/cc-backend/server.go | 1 + internal/api/log.go | 167 ++++++++++++++++++++ internal/config/config.go | 3 + internal/routerConfig/routes.go | 1 + web/frontend/package-lock.json | 26 ---- web/frontend/rollup.config.mjs | 3 +- web/frontend/src/Header.svelte | 10 ++ web/frontend/src/Logs.root.svelte | 228 ++++++++++++++++++++++++++++ web/frontend/src/logs.entrypoint.js | 10 ++ web/templates/monitoring/logs.tmpl | 13 ++ 10 files changed, 435 insertions(+), 27 deletions(-) create mode 100644 internal/api/log.go create mode 100644 web/frontend/src/Logs.root.svelte create mode 100644 web/frontend/src/logs.entrypoint.js create mode 100644 web/templates/monitoring/logs.tmpl diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index 4035c430..68cc4736 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -245,6 +245,7 @@ func (s *Server) init() error { s.restAPIHandle.MountAPIRoutes(securedapi) s.restAPIHandle.MountUserAPIRoutes(userapi) s.restAPIHandle.MountConfigAPIRoutes(configapi) + s.restAPIHandle.MountLogAPIRoutes(configapi) s.restAPIHandle.MountFrontendAPIRoutes(frontendapi) if config.Keys.APISubjects != nil { diff --git a/internal/api/log.go b/internal/api/log.go new file mode 100644 index 00000000..4ad07589 --- /dev/null +++ b/internal/api/log.go @@ -0,0 +1,167 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. This file is part of cc-backend. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package api + +import ( + "bufio" + "encoding/json" + "fmt" + "net/http" + "os/exec" + "regexp" + "strconv" + + "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/internal/repository" + cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" + "github.com/ClusterCockpit/cc-lib/v2/schema" + "github.com/gorilla/mux" +) + +type LogEntry struct { + Timestamp string `json:"timestamp"` + Priority int `json:"priority"` + Message string `json:"message"` + Unit string `json:"unit"` +} + +var safePattern = regexp.MustCompile(`^[a-zA-Z0-9 :\-\.]+$`) + +func (api *RestAPI) getJournalLog(rw http.ResponseWriter, r *http.Request) { + user := repository.GetUserFromContext(r.Context()) + if !user.HasRole(schema.RoleAdmin) { + handleError(fmt.Errorf("only admins are allowed to view logs"), http.StatusForbidden, rw) + return + } + + since := r.URL.Query().Get("since") + if since == "" { + since = "1 hour ago" + } + if !safePattern.MatchString(since) { + handleError(fmt.Errorf("invalid 'since' parameter"), http.StatusBadRequest, rw) + return + } + + lines := 200 + if l := r.URL.Query().Get("lines"); l != "" { + n, err := strconv.Atoi(l) + if err != nil || n < 1 { + handleError(fmt.Errorf("invalid 'lines' parameter"), http.StatusBadRequest, rw) + return + } + if n > 1000 { + n = 1000 + } + lines = n + } + + unit := config.Keys.SystemdUnit + if unit == "" { + unit = "clustercockpit" + } + + args := []string{ + "--output=json", + "--no-pager", + fmt.Sprintf("-n %d", lines), + fmt.Sprintf("--since=%s", since), + fmt.Sprintf("-u %s", unit), + } + + if level := r.URL.Query().Get("level"); level != "" { + n, err := strconv.Atoi(level) + if err != nil || n < 0 || n > 7 { + handleError(fmt.Errorf("invalid 'level' parameter (must be 0-7)"), http.StatusBadRequest, rw) + return + } + args = append(args, fmt.Sprintf("--priority=%d", n)) + } + + if search := r.URL.Query().Get("search"); search != "" { + if !safePattern.MatchString(search) { + handleError(fmt.Errorf("invalid 'search' parameter"), http.StatusBadRequest, rw) + return + } + args = append(args, fmt.Sprintf("--grep=%s", search)) + } + + cmd := exec.CommandContext(r.Context(), "journalctl", args...) + stdout, err := cmd.StdoutPipe() + if err != nil { + handleError(fmt.Errorf("failed to create pipe: %w", err), http.StatusInternalServerError, rw) + return + } + + if err := cmd.Start(); err != nil { + handleError(fmt.Errorf("failed to start journalctl: %w", err), http.StatusInternalServerError, rw) + return + } + + entries := make([]LogEntry, 0, lines) + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + var raw map[string]any + if err := json.Unmarshal(scanner.Bytes(), &raw); err != nil { + continue + } + + priority := 6 // default info + if p, ok := raw["PRIORITY"]; ok { + switch v := p.(type) { + case string: + if n, err := strconv.Atoi(v); err == nil { + priority = n + } + case float64: + priority = int(v) + } + } + + msg := "" + if m, ok := raw["MESSAGE"]; ok { + if s, ok := m.(string); ok { + msg = s + } + } + + ts := "" + if t, ok := raw["__REALTIME_TIMESTAMP"]; ok { + if s, ok := t.(string); ok { + ts = s + } + } + + unitName := "" + if u, ok := raw["_SYSTEMD_UNIT"]; ok { + if s, ok := u.(string); ok { + unitName = s + } + } + + entries = append(entries, LogEntry{ + Timestamp: ts, + Priority: priority, + Message: msg, + Unit: unitName, + }) + } + + if err := cmd.Wait(); err != nil { + // journalctl returns exit code 1 when --grep matches nothing + if len(entries) == 0 { + cclog.Debugf("journalctl exited with: %v", err) + } + } + + rw.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(rw).Encode(entries); err != nil { + cclog.Errorf("Failed to encode log entries: %v", err) + } +} + +func (api *RestAPI) MountLogAPIRoutes(r *mux.Router) { + r.HandleFunc("/logs/", api.getJournalLog).Methods(http.MethodGet) +} diff --git a/internal/config/config.go b/internal/config/config.go index 4e6fe975..d5a4df48 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -71,6 +71,9 @@ type ProgramConfig struct { // If exists, will enable dynamic zoom in frontend metric plots using the configured values EnableResampling *ResampleConfig `json:"resampling"` + + // Systemd unit name for log viewer (default: "clustercockpit") + SystemdUnit string `json:"systemd-unit"` } type ResampleConfig struct { diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index b8f6de95..59491297 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -50,6 +50,7 @@ var routes []Route = []Route{ {"/monitoring/status/{cluster}", "monitoring/status.tmpl", " Dashboard - ClusterCockpit", false, setupClusterStatusRoute}, {"/monitoring/status/detail/{cluster}", "monitoring/status.tmpl", "Status of - ClusterCockpit", false, setupClusterDetailRoute}, {"/monitoring/dashboard/{cluster}", "monitoring/dashboard.tmpl", " Dashboard - ClusterCockpit", false, setupDashboardRoute}, + {"/monitoring/logs", "monitoring/logs.tmpl", "Logs - ClusterCockpit", false, func(i InfoType, r *http.Request) InfoType { return i }}, } func setupHomeRoute(i InfoType, r *http.Request) InfoType { diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index e3451242..6962dc1b 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -250,7 +250,6 @@ "cpu": [ "arm" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -264,7 +263,6 @@ "cpu": [ "arm64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -278,7 +276,6 @@ "cpu": [ "arm64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -292,7 +289,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -306,7 +302,6 @@ "cpu": [ "arm64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -320,7 +315,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -334,7 +328,6 @@ "cpu": [ "arm" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -348,7 +341,6 @@ "cpu": [ "arm" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -362,7 +354,6 @@ "cpu": [ "arm64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -376,7 +367,6 @@ "cpu": [ "arm64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -390,7 +380,6 @@ "cpu": [ "loong64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -404,7 +393,6 @@ "cpu": [ "loong64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -418,7 +406,6 @@ "cpu": [ "ppc64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -432,7 +419,6 @@ "cpu": [ "ppc64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -446,7 +432,6 @@ "cpu": [ "riscv64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -460,7 +445,6 @@ "cpu": [ "riscv64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -474,7 +458,6 @@ "cpu": [ "s390x" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -488,7 +471,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -502,7 +484,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -516,7 +497,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -530,7 +510,6 @@ "cpu": [ "arm64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -544,7 +523,6 @@ "cpu": [ "arm64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -558,7 +536,6 @@ "cpu": [ "ia32" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -572,7 +549,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -586,7 +562,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -837,7 +812,6 @@ "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, "hasInstallScript": true, "license": "MIT", "optional": true, diff --git a/web/frontend/rollup.config.mjs b/web/frontend/rollup.config.mjs index 6b7cf884..8aca6161 100644 --- a/web/frontend/rollup.config.mjs +++ b/web/frontend/rollup.config.mjs @@ -75,5 +75,6 @@ export default [ entrypoint('analysis', 'src/analysis.entrypoint.js'), entrypoint('status', 'src/status.entrypoint.js'), entrypoint('dashpublic', 'src/dashpublic.entrypoint.js'), - entrypoint('config', 'src/config.entrypoint.js') + entrypoint('config', 'src/config.entrypoint.js'), + entrypoint('logs', 'src/logs.entrypoint.js') ]; diff --git a/web/frontend/src/Header.svelte b/web/frontend/src/Header.svelte index c173a9f4..862981fd 100644 --- a/web/frontend/src/Header.svelte +++ b/web/frontend/src/Header.svelte @@ -135,6 +135,16 @@ listOptions: true, menu: "Info", }, + { + title: "Logs", + // svelte-ignore state_referenced_locally + requiredRole: roles.admin, + href: "/monitoring/logs", + icon: "journal-text", + perCluster: false, + listOptions: false, + menu: "Info", + }, ]; /* State Init */ diff --git a/web/frontend/src/Logs.root.svelte b/web/frontend/src/Logs.root.svelte new file mode 100644 index 00000000..3f23b297 --- /dev/null +++ b/web/frontend/src/Logs.root.svelte @@ -0,0 +1,228 @@ + + + + +{#if !isAdmin} + + +

Access denied. Admin privileges required.

+
+
+{:else} + + +
+ + + {#each timeRanges as tr} + + {/each} + + + + + + {#each levels as lv} + + {/each} + + + + + Lines + + + + + + + + + + { if (e.key === "Enter") fetchLogs(); }} + /> + + + + + + Auto + + {#each refreshIntervals as ri} + + {/each} + + + + {#if entries.length > 0} + {entries.length} entries + {/if} +
+
+ + {#if error} +
{error}
+ {/if} + +
+ + + + + + + + + + {#each entries as entry} + + + + + + {:else} + {#if !loading && !error} + + {/if} + {/each} + +
TimestampLevelMessage
{formatTimestamp(entry.timestamp)}{levelName(entry.priority)}{entry.message}
No log entries found
+
+
+
+{/if} diff --git a/web/frontend/src/logs.entrypoint.js b/web/frontend/src/logs.entrypoint.js new file mode 100644 index 00000000..5eb3c0c8 --- /dev/null +++ b/web/frontend/src/logs.entrypoint.js @@ -0,0 +1,10 @@ +import { mount } from 'svelte'; +import {} from './header.entrypoint.js' +import Logs from './Logs.root.svelte' + +mount(Logs, { + target: document.getElementById('svelte-app'), + props: { + isAdmin: isAdmin, + } +}) diff --git a/web/templates/monitoring/logs.tmpl b/web/templates/monitoring/logs.tmpl new file mode 100644 index 00000000..1613edc1 --- /dev/null +++ b/web/templates/monitoring/logs.tmpl @@ -0,0 +1,13 @@ +{{define "content"}} +
+{{end}} + +{{define "stylesheets"}} + +{{end}} +{{define "javascript"}} + + +{{end}} From 48729b172df47e56aff753cf4084a7fe792ad36e Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 12 Feb 2026 14:27:41 +0100 Subject: [PATCH 2/9] improve nodeList loading indicator, streamlining --- web/frontend/src/generic/JobList.svelte | 2 +- web/frontend/src/systems/NodeList.svelte | 20 +-- .../src/systems/nodelist/NodeListRow.svelte | 134 ++++++++++-------- 3 files changed, 83 insertions(+), 73 deletions(-) diff --git a/web/frontend/src/generic/JobList.svelte b/web/frontend/src/generic/JobList.svelte index 278f189e..3ccbb560 100644 --- a/web/frontend/src/generic/JobList.svelte +++ b/web/frontend/src/generic/JobList.svelte @@ -305,7 +305,7 @@ {#if $jobsStore.fetching || !$jobsStore.data} -
+
diff --git a/web/frontend/src/systems/NodeList.svelte b/web/frontend/src/systems/NodeList.svelte index da196b82..2e342168 100644 --- a/web/frontend/src/systems/NodeList.svelte +++ b/web/frontend/src/systems/NodeList.svelte @@ -104,7 +104,7 @@ let itemsPerPage = $derived(usePaging ? (ccconfig?.nodeList_nodesPerPage || 10) : 10); let paging = $derived({ itemsPerPage, page }); - const nodesQuery = $derived(queryStore({ + const nodesStore = $derived(queryStore({ client: client, query: nodeListQuery, variables: { @@ -122,7 +122,7 @@ requestPolicy: "network-only", // Resolution queries are cached, but how to access them? For now: reload on every change })); - const matchedNodes = $derived($nodesQuery?.data?.nodeMetricsList?.totalNodes || 0); + const matchedNodes = $derived($nodesStore?.data?.nodeMetricsList?.totalNodes || 0); /* Effects */ $effect(() => { @@ -135,7 +135,7 @@ } = document.documentElement; // Add 100 px offset to trigger load earlier - if (scrollTop + clientHeight >= scrollHeight - 100 && $nodesQuery?.data?.nodeMetricsList?.hasNextPage) { + if (scrollTop + clientHeight >= scrollHeight - 100 && $nodesStore?.data?.nodeMetricsList?.hasNextPage) { page += 1 }; }); @@ -143,9 +143,9 @@ }); $effect(() => { - if ($nodesQuery?.data) { + if ($nodesStore?.data) { untrack(() => { - handleNodes($nodesQuery?.data?.nodeMetricsList?.items); + handleNodes($nodesStore?.data?.nodeMetricsList?.items); }); selectedMetrics = [...pendingSelectedMetrics]; // Trigger Rerender in NodeListRow Only After Data is Fetched }; @@ -228,7 +228,7 @@ style="padding-top: {headerPaddingTop}px;" > {cluster} Node Info - {#if $nodesQuery.fetching} + {#if $nodesStore.fetching} {/if} @@ -245,22 +245,22 @@ - {#if $nodesQuery.error} + {#if $nodesStore.error} - {$nodesQuery.error.message} + {$nodesStore.error.message} {:else} {#each nodes as nodeData (nodeData.host)} - + {:else} No nodes found {/each} {/if} - {#if $nodesQuery.fetching || !$nodesQuery.data} + {#if $nodesStore.fetching || !$nodesStore.data}
diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte index 46f8c4a4..1fca83f2 100644 --- a/web/frontend/src/systems/nodelist/NodeListRow.svelte +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -4,6 +4,7 @@ Properties: - `cluster String`: The nodes' cluster - `nodeData Object`: The node data object including metric data + - `nodeDataFetching Bool`: Whether the metric query still runs - `selectedMetrics [String]`: The array of selected metrics - `globalMetrics [Obj]`: Includes the backend supplied availabilities for cluster and subCluster --> @@ -24,6 +25,7 @@ let { cluster, nodeData, + nodeDataFetching, selectedMetrics, globalMetrics } = $props(); @@ -72,7 +74,7 @@ ); const extendedLegendData = $derived($nodeJobsData?.data ? buildExtendedLegend() : null); - const refinedData = $derived(nodeData?.metrics ? sortAndSelectScope(selectedMetrics, nodeData.metrics) : []); + const refinedData = $derived(!nodeDataFetching ? sortAndSelectScope(selectedMetrics, nodeData.metrics) : []); const dataHealth = $derived(refinedData.filter((rd) => rd.availability == "configured").map((enabled) => (enabled?.data?.metric?.series?.length > 0))); /* Functions */ @@ -150,65 +152,73 @@ hoststate={nodeData?.state? nodeData.state: 'notindb'}/> {/if} - {#each refinedData as metricData, i (metricData?.data?.name || i)} - {#key metricData} - - {#if metricData?.availability == "none"} - -

No dataset(s) returned for {selectedMetrics[i]}

-

Metric is not configured for cluster {cluster}.

-
- {:else if metricData?.availability == "disabled"} - -

No dataset(s) returned for {selectedMetrics[i]}

-

Metric has been disabled for subcluster {nodeData.subCluster}.

-
- {:else if !metricData?.data} - -

No dataset(s) returned for {selectedMetrics[i]}

-

Metric or host was not found in metric store for cluster {cluster}.

-
- {:else if !!metricData.data?.metric.statisticsSeries} - - -
- - {:else} - - {/if} - - {/key} - {/each} + {#if nodeDataFetching} + +
+ +
+ + {:else} + {#each refinedData as metricData, i (metricData?.data?.name || i)} + {#key metricData} + + {#if metricData?.availability == "none"} + +

No dataset(s) returned for {selectedMetrics[i]}

+

Metric is not configured for cluster {cluster}.

+
+ {:else if metricData?.availability == "disabled"} + +

No dataset(s) returned for {selectedMetrics[i]}

+

Metric has been disabled for subcluster {nodeData.subCluster}.

+
+ {:else if !metricData?.data} + +

No dataset(s) returned for {selectedMetrics[i]}

+

Metric or host was not found in metric store for cluster {cluster}.

+
+ {:else if !!metricData.data?.metric.statisticsSeries} + + +
+ + {:else} + + {/if} + + {/key} + {/each} + {/if} From c15f1117f553010e6e9c55331f45e3dcd2ab1c71 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 12 Feb 2026 15:45:15 +0100 Subject: [PATCH 3/9] Review and improve node repo queries --- .../migrations/sqlite3/10_node-table.up.sql | 1 + internal/repository/node.go | 226 +++++++----------- 2 files changed, 83 insertions(+), 144 deletions(-) diff --git a/internal/repository/migrations/sqlite3/10_node-table.up.sql b/internal/repository/migrations/sqlite3/10_node-table.up.sql index fd118f5d..b788a8a9 100644 --- a/internal/repository/migrations/sqlite3/10_node-table.up.sql +++ b/internal/repository/migrations/sqlite3/10_node-table.up.sql @@ -38,6 +38,7 @@ CREATE INDEX IF NOT EXISTS nodestates_state_timestamp ON node_state (node_state, CREATE INDEX IF NOT EXISTS nodestates_health_timestamp ON node_state (health_state, time_stamp); CREATE INDEX IF NOT EXISTS nodestates_nodeid_state ON node_state (node_id, node_state); CREATE INDEX IF NOT EXISTS nodestates_nodeid_health ON node_state (node_id, health_state); +CREATE INDEX IF NOT EXISTS nodestates_nodeid_timestamp ON node_state (node_id, time_stamp DESC); -- Add NEW Indices For Increased Amounts of Tags CREATE INDEX IF NOT EXISTS tags_jobid ON jobtag (job_id); diff --git a/internal/repository/node.go b/internal/repository/node.go index 08a694c6..2ffe6698 100644 --- a/internal/repository/node.go +++ b/internal/repository/node.go @@ -52,6 +52,38 @@ func GetNodeRepository() *NodeRepository { return nodeRepoInstance } +// latestStateCondition returns a squirrel expression that restricts node_state +// rows to the latest per node_id using a correlated subquery. +// Requires the query to join node and node_state tables. +func latestStateCondition() sq.Sqlizer { + return sq.Expr( + "node_state.id = (SELECT ns2.id FROM node_state ns2 WHERE ns2.node_id = node.id ORDER BY ns2.time_stamp DESC LIMIT 1)", + ) +} + +// applyNodeFilters applies common NodeFilter conditions to a query that joins +// the node and node_state tables with latestStateCondition. +func applyNodeFilters(query sq.SelectBuilder, filters []*model.NodeFilter) sq.SelectBuilder { + for _, f := range filters { + if f.Cluster != nil { + query = buildStringCondition("node.cluster", f.Cluster, query) + } + if f.SubCluster != nil { + query = buildStringCondition("node.subcluster", f.SubCluster, query) + } + if f.Hostname != nil { + query = buildStringCondition("node.hostname", f.Hostname, query) + } + if f.SchedulerState != nil { + query = query.Where("node_state.node_state = ?", f.SchedulerState) + } + if f.HealthState != nil { + query = query.Where("node_state.health_state = ?", f.HealthState) + } + } + return query +} + func (r *NodeRepository) FetchMetadata(hostname string, cluster string) (map[string]string, error) { start := time.Now() @@ -82,17 +114,16 @@ func (r *NodeRepository) FetchMetadata(hostname string, cluster string) (map[str func (r *NodeRepository) GetNode(hostname string, cluster string, withMeta bool) (*schema.Node, error) { node := &schema.Node{} - var timestamp int - if err := sq.Select("node.hostname", "node.cluster", "node.subcluster", "node_state.node_state", - "node_state.health_state", "MAX(node_state.time_stamp) as time"). - From("node_state"). - Join("node ON node_state.node_id = node.id"). + if err := sq.Select("node.hostname", "node.cluster", "node.subcluster", + "node_state.node_state", "node_state.health_state"). + From("node"). + Join("node_state ON node_state.node_id = node.id"). + Where(latestStateCondition()). Where("node.hostname = ?", hostname). Where("node.cluster = ?", cluster). - GroupBy("node_state.node_id"). RunWith(r.DB). - QueryRow().Scan(&node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState, &node.HealthState, ×tamp); err != nil { - cclog.Warnf("Error while querying node '%s' at time '%d' from database: %v", hostname, timestamp, err) + QueryRow().Scan(&node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState, &node.HealthState); err != nil { + cclog.Warnf("Error while querying node '%s' from database: %v", hostname, err) return nil, err } @@ -111,16 +142,15 @@ func (r *NodeRepository) GetNode(hostname string, cluster string, withMeta bool) func (r *NodeRepository) GetNodeByID(id int64, withMeta bool) (*schema.Node, error) { node := &schema.Node{} - var timestamp int - if err := sq.Select("node.hostname", "node.cluster", "node.subcluster", "node_state.node_state", - "node_state.health_state", "MAX(node_state.time_stamp) as time"). - From("node_state"). - Join("node ON node_state.node_id = node.id"). + if err := sq.Select("node.hostname", "node.cluster", "node.subcluster", + "node_state.node_state", "node_state.health_state"). + From("node"). + Join("node_state ON node_state.node_id = node.id"). + Where(latestStateCondition()). Where("node.id = ?", id). - GroupBy("node_state.node_id"). RunWith(r.DB). - QueryRow().Scan(&node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState, &node.HealthState, ×tamp); err != nil { - cclog.Warnf("Error while querying node ID '%d' at time '%d' from database: %v", id, timestamp, err) + QueryRow().Scan(&node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState, &node.HealthState); err != nil { + cclog.Warnf("Error while querying node ID '%d' from database: %v", id, err) return nil, err } @@ -313,40 +343,17 @@ func (r *NodeRepository) QueryNodes( order *model.OrderByInput, // Currently unused! ) ([]*schema.Node, error) { query, qerr := AccessCheck(ctx, - sq.Select("hostname", "cluster", "subcluster", "node_state", "health_state", "MAX(time_stamp) as time"). + sq.Select("node.hostname", "node.cluster", "node.subcluster", + "node_state.node_state", "node_state.health_state"). From("node"). - Join("node_state ON node_state.node_id = node.id")) + Join("node_state ON node_state.node_id = node.id"). + Where(latestStateCondition())) if qerr != nil { return nil, qerr } - for _, f := range filters { - if f.Cluster != nil { - query = buildStringCondition("cluster", f.Cluster, query) - } - if f.SubCluster != nil { - query = buildStringCondition("subcluster", f.SubCluster, query) - } - if f.Hostname != nil { - query = buildStringCondition("hostname", f.Hostname, query) - } - if f.SchedulerState != nil { - query = query.Where("node_state = ?", f.SchedulerState) - // Requires Additional time_stamp Filter: Else the last (past!) time_stamp with queried state will be returned - // TODO: Hardcoded TimeDiff Suboptimal - Use Config Option? - now := time.Now().Unix() - query = query.Where(sq.Gt{"time_stamp": (now - 300)}) - } - if f.HealthState != nil { - query = query.Where("health_state = ?", f.HealthState) - // Requires Additional time_stamp Filter: Else the last (past!) time_stamp with queried state will be returned - // TODO: Hardcoded TimeDiff Suboptimal - Use Config Option? - now := time.Now().Unix() - query = query.Where(sq.Gt{"time_stamp": (now - 300)}) - } - } - - query = query.GroupBy("node_id").OrderBy("hostname ASC") + query = applyNodeFilters(query, filters) + query = query.OrderBy("node.hostname ASC") if page != nil && page.ItemsPerPage != -1 { limit := uint64(page.ItemsPerPage) @@ -363,11 +370,10 @@ func (r *NodeRepository) QueryNodes( nodes := make([]*schema.Node, 0) for rows.Next() { node := schema.Node{} - var timestamp int if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster, - &node.NodeState, &node.HealthState, ×tamp); err != nil { + &node.NodeState, &node.HealthState); err != nil { rows.Close() - cclog.Warnf("Error while scanning rows (QueryNodes) at time '%d'", timestamp) + cclog.Warn("Error while scanning rows (QueryNodes)") return nil, err } nodes = append(nodes, &node) @@ -377,74 +383,39 @@ func (r *NodeRepository) QueryNodes( } // CountNodes returns the total matched nodes based on a node filter. It always operates -// on the last state (largest timestamp). +// on the last state (largest timestamp) per node. func (r *NodeRepository) CountNodes( ctx context.Context, filters []*model.NodeFilter, ) (int, error) { query, qerr := AccessCheck(ctx, - sq.Select("time_stamp", "count(*) as countRes"). + sq.Select("COUNT(*)"). From("node"). - Join("node_state ON node_state.node_id = node.id")) + Join("node_state ON node_state.node_id = node.id"). + Where(latestStateCondition())) if qerr != nil { return 0, qerr } - for _, f := range filters { - if f.Cluster != nil { - query = buildStringCondition("cluster", f.Cluster, query) - } - if f.SubCluster != nil { - query = buildStringCondition("subcluster", f.SubCluster, query) - } - if f.Hostname != nil { - query = buildStringCondition("hostname", f.Hostname, query) - } - if f.SchedulerState != nil { - query = query.Where("node_state = ?", f.SchedulerState) - // Requires Additional time_stamp Filter: Else the last (past!) time_stamp with queried state will be returned - // TODO: Hardcoded TimeDiff Suboptimal - Use Config Option? - now := time.Now().Unix() - query = query.Where(sq.Gt{"time_stamp": (now - 300)}) - } - if f.HealthState != nil { - query = query.Where("health_state = ?", f.HealthState) - // Requires Additional time_stamp Filter: Else the last (past!) time_stamp with queried state will be returned - // TODO: Hardcoded TimeDiff Suboptimal - Use Config Option? - now := time.Now().Unix() - query = query.Where(sq.Gt{"time_stamp": (now - 300)}) - } - } + query = applyNodeFilters(query, filters) - query = query.GroupBy("time_stamp").OrderBy("time_stamp DESC").Limit(1) - - rows, err := query.RunWith(r.stmtCache).Query() - if err != nil { + var count int + if err := query.RunWith(r.stmtCache).QueryRow().Scan(&count); err != nil { queryString, queryVars, _ := query.ToSql() cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) return 0, err } - var totalNodes int - for rows.Next() { - var timestamp int - if err := rows.Scan(×tamp, &totalNodes); err != nil { - rows.Close() - cclog.Warnf("Error while scanning rows (CountNodes) at time '%d'", timestamp) - return 0, err - } - } - - return totalNodes, nil + return count, nil } func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) { - q := sq.Select("node.hostname", "node.cluster", "node.subcluster", "node_state.node_state", - "node_state.health_state", "MAX(node_state.time_stamp) as time"). + q := sq.Select("node.hostname", "node.cluster", "node.subcluster", + "node_state.node_state", "node_state.health_state"). From("node"). Join("node_state ON node_state.node_id = node.id"). + Where(latestStateCondition()). Where("node.cluster = ?", cluster). - GroupBy("node_state.node_id"). OrderBy("node.hostname ASC") rows, err := q.RunWith(r.DB).Query() @@ -456,10 +427,9 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) { defer rows.Close() for rows.Next() { node := &schema.Node{} - var timestamp int if err := rows.Scan(&node.Hostname, &node.Cluster, - &node.SubCluster, &node.NodeState, &node.HealthState, ×tamp); err != nil { - cclog.Warnf("Error while scanning node list (ListNodes) at time '%d'", timestamp) + &node.SubCluster, &node.NodeState, &node.HealthState); err != nil { + cclog.Warn("Error while scanning node list (ListNodes)") return nil, err } @@ -470,11 +440,11 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) { } func (r *NodeRepository) MapNodes(cluster string) (map[string]string, error) { - q := sq.Select("node.hostname", "node_state.node_state", "MAX(node_state.time_stamp) as time"). + q := sq.Select("node.hostname", "node_state.node_state"). From("node"). Join("node_state ON node_state.node_id = node.id"). + Where(latestStateCondition()). Where("node.cluster = ?", cluster). - GroupBy("node_state.node_id"). OrderBy("node.hostname ASC") rows, err := q.RunWith(r.DB).Query() @@ -487,9 +457,8 @@ func (r *NodeRepository) MapNodes(cluster string) (map[string]string, error) { defer rows.Close() for rows.Next() { var hostname, nodestate string - var timestamp int - if err := rows.Scan(&hostname, &nodestate, ×tamp); err != nil { - cclog.Warnf("Error while scanning node list (MapNodes) at time '%d'", timestamp) + if err := rows.Scan(&hostname, &nodestate); err != nil { + cclog.Warn("Error while scanning node list (MapNodes)") return nil, err } @@ -500,33 +469,16 @@ func (r *NodeRepository) MapNodes(cluster string) (map[string]string, error) { } func (r *NodeRepository) CountStates(ctx context.Context, filters []*model.NodeFilter, column string) ([]*model.NodeStates, error) { - query, qerr := AccessCheck(ctx, sq.Select("hostname", column, "MAX(time_stamp) as time").From("node")) + query, qerr := AccessCheck(ctx, + sq.Select(column). + From("node"). + Join("node_state ON node_state.node_id = node.id"). + Where(latestStateCondition())) if qerr != nil { return nil, qerr } - query = query.Join("node_state ON node_state.node_id = node.id") - - for _, f := range filters { - if f.Hostname != nil { - query = buildStringCondition("hostname", f.Hostname, query) - } - if f.Cluster != nil { - query = buildStringCondition("cluster", f.Cluster, query) - } - if f.SubCluster != nil { - query = buildStringCondition("subcluster", f.SubCluster, query) - } - if f.SchedulerState != nil { - query = query.Where("node_state = ?", f.SchedulerState) - } - if f.HealthState != nil { - query = query.Where("health_state = ?", f.HealthState) - } - } - - // Add Group and Order - query = query.GroupBy("hostname").OrderBy("hostname DESC") + query = applyNodeFilters(query, filters) rows, err := query.RunWith(r.stmtCache).Query() if err != nil { @@ -537,12 +489,10 @@ func (r *NodeRepository) CountStates(ctx context.Context, filters []*model.NodeF stateMap := map[string]int{} for rows.Next() { - var hostname, state string - var timestamp int - - if err := rows.Scan(&hostname, &state, ×tamp); err != nil { + var state string + if err := rows.Scan(&state); err != nil { rows.Close() - cclog.Warnf("Error while scanning rows (CountStates) at time '%d'", timestamp) + cclog.Warn("Error while scanning rows (CountStates)") return nil, err } @@ -735,26 +685,14 @@ func (r *NodeRepository) GetNodesForList( } } else { - // DB Nodes: Count and Find Next Page + // DB Nodes: Count and derive hasNextPage from count var cerr error countNodes, cerr = r.CountNodes(ctx, queryFilters) if cerr != nil { cclog.Warn("error while counting node database data (Resolver.NodeMetricsList)") return nil, nil, 0, false, cerr } - - // Example Page 4 @ 10 IpP : Does item 41 exist? - // Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 exists. - nextPage := &model.PageRequest{ - ItemsPerPage: 1, - Page: ((page.Page * page.ItemsPerPage) + 1), - } - nextNodes, err := r.QueryNodes(ctx, queryFilters, nextPage, nil) // Order not Used - if err != nil { - cclog.Warn("Error while querying next nodes") - return nil, nil, 0, false, err - } - hasNextPage = len(nextNodes) == 1 + hasNextPage = page.Page*page.ItemsPerPage < countNodes } // Fallback for non-init'd node table in DB; Ignores stateFilter From 3215bc3de0a09888db800770da8d59a718a166e3 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 13 Feb 2026 11:58:52 +0100 Subject: [PATCH 4/9] review loading indicators in nodeList --- web/frontend/src/systems/NodeList.svelte | 21 ++- .../src/systems/nodelist/NodeInfo.svelte | 15 +- .../src/systems/nodelist/NodeListRow.svelte | 138 +++++++++--------- 3 files changed, 95 insertions(+), 79 deletions(-) diff --git a/web/frontend/src/systems/NodeList.svelte b/web/frontend/src/systems/NodeList.svelte index 2e342168..403a8030 100644 --- a/web/frontend/src/systems/NodeList.svelte +++ b/web/frontend/src/systems/NodeList.svelte @@ -152,12 +152,21 @@ }); $effect(() => { - // Triggers (Except Paging) + // Update NodeListRows metrics only: Keep ordered nodes on page 1 from, to pendingSelectedMetrics, selectedResolution + // Continous Scroll: Paging if parameters change: Existing entries will not match new selections + if (!usePaging) { + nodes = []; + page = 1; + } + }); + + $effect(() => { + // Update NodeListRows metrics only: Keep ordered nodes on page 1 hostnameFilter, hoststateFilter // Continous Scroll: Paging if parameters change: Existing entries will not match new selections - // Nodes Array Reset in HandleNodes func + nodes = []; if (!usePaging) { page = 1; } @@ -255,9 +264,11 @@ {#each nodes as nodeData (nodeData.host)} {:else} - - No nodes found - + {#if !$nodesStore.fetching} + + No nodes found + + {/if} {/each} {/if} {#if $nodesStore.fetching || !$nodesStore.data} diff --git a/web/frontend/src/systems/nodelist/NodeInfo.svelte b/web/frontend/src/systems/nodelist/NodeInfo.svelte index 39716ca2..4b616f10 100644 --- a/web/frontend/src/systems/nodelist/NodeInfo.svelte +++ b/web/frontend/src/systems/nodelist/NodeInfo.svelte @@ -51,6 +51,8 @@ /* Derived */ // Not at least one returned, selected metric: NodeHealth warning + const fetchInfo = $derived(dataHealth.includes('fetching')); + // Not at least one returned, selected metric: NodeHealth warning const healthWarn = $derived(!dataHealth.includes(true)); // At least one non-returned selected metric: Metric config error? const metricWarn = $derived(dataHealth.includes(false)); @@ -84,10 +86,17 @@ - {#if healthWarn} + {#if fetchInfo} + + + + + {:else if healthWarn} - Jobs + Info