Merge branch 'dev' into rework_status_view

This commit is contained in:
Christoph Kluge
2025-08-13 14:29:19 +02:00
50 changed files with 1489 additions and 1912 deletions

View File

@@ -74,7 +74,7 @@ func scanJob(row interface{ Scan(...any) error }) (*schema.Job, error) {
if err := row.Scan(
&job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster,
&job.StartTime, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads,
&job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
&job.NumAcc, &job.Shared, &job.MonitoringStatus, &job.SMT, &job.State,
&job.Duration, &job.Walltime, &job.RawResources, &job.RawFootprint, &job.Energy); err != nil {
cclog.Warnf("Error while scanning rows (Job): %v", err)
return nil, err

View File

@@ -12,6 +12,7 @@ import (
"strings"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
@@ -216,7 +217,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
return query
}
func buildIntCondition(field string, cond *schema.IntRange, query sq.SelectBuilder) sq.SelectBuilder {
func buildIntCondition(field string, cond *config.IntRange, query sq.SelectBuilder) sq.SelectBuilder {
return query.Where(field+" BETWEEN ? AND ?", cond.From, cond.To)
}
@@ -224,7 +225,7 @@ func buildFloatCondition(field string, cond *model.FloatRange, query sq.SelectBu
return query.Where(field+" BETWEEN ? AND ?", cond.From, cond.To)
}
func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBuilder) sq.SelectBuilder {
func buildTimeCondition(field string, cond *config.TimeRange, query sq.SelectBuilder) sq.SelectBuilder {
if cond.From != nil && cond.To != nil {
return query.Where(field+" BETWEEN ? AND ?", cond.From.Unix(), cond.To.Unix())
} else if cond.From != nil {

View File

@@ -1,9 +1,10 @@
CREATE TABLE "job_cache" (
id INTEGER PRIMARY KEY,
job_id BIGINT NOT NULL,
cluster VARCHAR(255) NOT NULL,
hpc_cluster VARCHAR(255) NOT NULL,
subcluster VARCHAR(255) NOT NULL,
start_time BIGINT NOT NULL, -- Unix timestamp
submit_time BIGINT NOT NULL, -- Unix timestamp
start_time BIGINT NOT NULL DEFAULT 0, -- Unix timestamp
hpc_user VARCHAR(255) NOT NULL,
project VARCHAR(255) NOT NULL,
cluster_partition VARCHAR(255),
@@ -12,8 +13,9 @@ CREATE TABLE "job_cache" (
walltime INT NOT NULL,
job_state VARCHAR(255) NOT NULL
CHECK (job_state IN (
'running', 'completed', 'failed', 'cancelled',
'stopped', 'timeout', 'preempted', 'out_of_memory'
'boot_fail', 'cancelled', 'completed', 'deadline',
'failed', 'node_fail', 'out-of-memory', 'pending',
'preempted', 'running', 'suspended', 'timeout'
)),
meta_data TEXT, -- JSON
resources TEXT NOT NULL, -- JSON
@@ -21,7 +23,8 @@ CREATE TABLE "job_cache" (
num_hwthreads INT,
num_acc INT,
smt TINYINT NOT NULL DEFAULT 1 CHECK (smt IN (0, 1)),
exclusive TINYINT NOT NULL DEFAULT 1 CHECK (exclusive IN (0, 1, 2)),
shared TEXT NOT NULL
CHECK (shared IN ("none", "single_user", "multi_user")),
monitoring_status TINYINT NOT NULL DEFAULT 1
CHECK (monitoring_status IN (0, 1, 2, 3)),
energy REAL NOT NULL DEFAULT 0.0,
@@ -29,3 +32,43 @@ CREATE TABLE "job_cache" (
footprint TEXT DEFAULT NULL,
UNIQUE (job_id, cluster, start_time)
);
CREATE TABLE "job_new" (
id INTEGER PRIMARY KEY,
job_id BIGINT NOT NULL,
hpc_cluster TEXT NOT NULL,
subcluster TEXT NOT NULL,
submit_time BIGINT NOT NULL DEFAULT 0, -- Unix timestamp
start_time BIGINT NOT NULL DEFAULT 0, -- Unix timestamp
hpc_user TEXT NOT NULL,
project TEXT NOT NULL,
cluster_partition TEXT,
array_job_id BIGINT,
duration INT NOT NULL,
walltime INT NOT NULL,
job_state TEXT NOT NULL
CHECK (job_state IN (
'boot_fail', 'cancelled', 'completed', 'deadline',
'failed', 'node_fail', 'out-of-memory', 'pending',
'preempted', 'running', 'suspended', 'timeout'
)),
meta_data TEXT, -- JSON
resources TEXT NOT NULL, -- JSON
num_nodes INT NOT NULL,
num_hwthreads INT,
num_acc INT,
smt INT NOT NULL DEFAULT 1,
shared TEXT NOT NULL
CHECK (shared IN ("none", "single_user", "multi_user")),
monitoring_status TINYINT NOT NULL DEFAULT 1
CHECK (monitoring_status IN (0, 1, 2, 3)),
energy REAL NOT NULL DEFAULT 0.0,
energy_footprint TEXT DEFAULT NULL,
footprint TEXT DEFAULT NULL,
UNIQUE (job_id, cluster, start_time)
);
ALTER TABLE job RENAME COLUMN cluster TO hpc_cluster;
INSERT INTO job_new SELECT * FROM job;
DROP TABLE job;
ALTER TABLE job_new RENAME TO job;

View File

@@ -1,5 +1,6 @@
CREATE TABLE "node" (
id INTEGER PRIMARY KEY,
time_stamp INTEGER NOT NULL,
hostname VARCHAR(255) NOT NULL,
cluster VARCHAR(255) NOT NULL,
subcluster VARCHAR(255) NOT NULL,
@@ -33,4 +34,4 @@ CREATE INDEX IF NOT EXISTS nodes_cluster_health ON node (cluster, health_state);
-- Add Indices For Increased Amounts of Tags
CREATE INDEX IF NOT EXISTS tags_jobid ON jobtag (job_id);
CREATE INDEX IF NOT EXISTS tags_tagid ON jobtag (tag_id);
CREATE INDEX IF NOT EXISTS tags_tagid ON jobtag (tag_id);

View File

@@ -6,6 +6,7 @@ package repository
import (
"encoding/json"
"maps"
"sync"
"time"
@@ -24,7 +25,7 @@ var (
type UserCfgRepo struct {
DB *sqlx.DB
Lookup *sqlx.Stmt
uiDefaults map[string]interface{}
uiDefaults map[string]any
cache *lrucache.Cache
lock sync.RWMutex
}
@@ -51,22 +52,18 @@ func GetUserCfgRepo() *UserCfgRepo {
// Return the personalised UI config for the currently authenticated
// user or return the plain default config.
func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{}, error) {
func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]any, error) {
if user == nil {
uCfg.lock.RLock()
copy := make(map[string]interface{}, len(uCfg.uiDefaults))
for k, v := range uCfg.uiDefaults {
copy[k] = v
}
copy := make(map[string]any, len(uCfg.uiDefaults))
maps.Copy(copy, uCfg.uiDefaults)
uCfg.lock.RUnlock()
return copy, nil
}
data := uCfg.cache.Get(user.Username, func() (interface{}, time.Duration, int) {
uiconfig := make(map[string]interface{}, len(uCfg.uiDefaults))
for k, v := range uCfg.uiDefaults {
uiconfig[k] = v
}
data := uCfg.cache.Get(user.Username, func() (any, time.Duration, int) {
uiconfig := make(map[string]any, len(uCfg.uiDefaults))
maps.Copy(uiconfig, uCfg.uiDefaults)
rows, err := uCfg.Lookup.Query(user.Username)
if err != nil {
@@ -83,7 +80,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
return err, 0, 0
}
var val interface{}
var val any
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
cclog.Warn("Error while unmarshaling raw user uiconfig json")
return err, 0, 0
@@ -104,7 +101,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
return nil, err
}
return data.(map[string]interface{}), nil
return data.(map[string]any), nil
}
// If the context does not have a user, update the global ui configuration
@@ -115,7 +112,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
user *schema.User,
) error {
if user == nil {
var val interface{}
var val any
if err := json.Unmarshal([]byte(value), &val); err != nil {
cclog.Warn("Error while unmarshaling raw user config json")
return err

View File

@@ -10,6 +10,7 @@ import (
"testing"
"github.com/ClusterCockpit/cc-backend/internal/config"
ccconf "github.com/ClusterCockpit/cc-lib/ccConfig"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
_ "github.com/mattn/go-sqlite3"
@@ -17,17 +18,16 @@ import (
func setupUserTest(t *testing.T) *UserCfgRepo {
const testconfig = `{
"addr": "0.0.0.0:8080",
"main": {
"addr": "0.0.0.0:8080",
"apiAllowedIPs": [
"*"
]
},
"archive": {
"kind": "file",
"path": "./var/job-archive"
},
"jwts": {
"max-age": "2m"
},
"apiAllowedIPs": [
"*"
],
"clusters": [
{
"name": "testcluster",
@@ -36,7 +36,8 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
"numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 },
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
} } ]
}
}]
}`
cclog.Init("info", true)
@@ -53,7 +54,19 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
t.Fatal(err)
}
config.Init(cfgFilePath)
ccconf.Init(cfgFilePath)
// Load and check main configuration
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
config.Init(cfg, clustercfg)
} else {
t.Fatal("Cluster configuration must be present")
}
} else {
t.Fatal("Main configuration must be present")
}
return GetUserCfgRepo()
}