Port to new job structs

Backup commit: Does not build.
This commit is contained in:
2025-08-05 10:23:54 +02:00
parent 44cd8d258d
commit 86453e7e11
11 changed files with 195 additions and 1312 deletions

View File

@@ -297,7 +297,6 @@ func TestRestApi(t *testing.T) {
job.NumNodes != 1 ||
job.NumHWThreads != 8 ||
job.NumAcc != 0 ||
job.Exclusive != 1 ||
job.MonitoringStatus != 1 ||
job.SMT != 1 ||
!reflect.DeepEqual(job.Resources, []*schema.Resource{{Hostname: "host123", HWThreads: []int{0, 1, 2, 3, 4, 5, 6, 7}}}) ||

View File

@@ -647,7 +647,7 @@ func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) {
// @router /api/jobs/start_job/ [post]
func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
req := schema.Job{
Exclusive: 1,
Shared: "none",
MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
}
if err := decode(r.Body, &req); err != nil {

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@ package graph
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.76
// Code generated by github.com/99designs/gqlgen version v0.17.78
import (
"context"
@@ -35,6 +35,11 @@ func (r *jobResolver) StartTime(ctx context.Context, obj *schema.Job) (*time.Tim
return &timestamp, nil
}
// Exclusive is the resolver for the exclusive field.
func (r *jobResolver) Exclusive(ctx context.Context, obj *schema.Job) (int, error) {
panic(fmt.Errorf("not implemented: Exclusive - exclusive"))
}
// Tags is the resolver for the tags field.
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
return r.Repo.GetTags(repository.GetUserFromContext(ctx), obj.ID)
@@ -43,7 +48,7 @@ func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag,
// ConcurrentJobs is the resolver for the concurrentJobs field.
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
// FIXME: Make the hardcoded duration configurable
if obj.Exclusive != 1 && obj.Duration > 600 {
if obj.Shared != "none" && obj.Duration > 600 {
return r.Repo.FindConcurrentJobs(ctx, obj)
}

View File

@@ -43,7 +43,7 @@ func HandleImportFlag(flag string) error {
dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
job := schema.Job{
Exclusive: 1,
Shared: "none",
MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
}
if err = dec.Decode(&job); err != nil {

View File

@@ -74,7 +74,7 @@ func scanJob(row interface{ Scan(...any) error }) (*schema.Job, error) {
if err := row.Scan(
&job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster,
&job.StartTime, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads,
&job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
&job.NumAcc, &job.Shared, &job.MonitoringStatus, &job.SMT, &job.State,
&job.Duration, &job.Walltime, &job.RawResources, &job.RawFootprint, &job.Energy); err != nil {
cclog.Warnf("Error while scanning rows (Job): %v", err)
return nil, err

View File

@@ -1,9 +1,10 @@
CREATE TABLE "job_cache" (
id INTEGER PRIMARY KEY,
job_id BIGINT NOT NULL,
cluster VARCHAR(255) NOT NULL,
hpc_cluster VARCHAR(255) NOT NULL,
subcluster VARCHAR(255) NOT NULL,
start_time BIGINT NOT NULL, -- Unix timestamp
submit_time BIGINT NOT NULL, -- Unix timestamp
start_time BIGINT NOT NULL DEFAULT 0, -- Unix timestamp
hpc_user VARCHAR(255) NOT NULL,
project VARCHAR(255) NOT NULL,
cluster_partition VARCHAR(255),
@@ -12,8 +13,9 @@ CREATE TABLE "job_cache" (
walltime INT NOT NULL,
job_state VARCHAR(255) NOT NULL
CHECK (job_state IN (
'running', 'completed', 'failed', 'cancelled',
'stopped', 'timeout', 'preempted', 'out_of_memory'
'boot_fail', 'cancelled', 'completed', 'deadline',
'failed', 'node_fail', 'out-of-memory', 'pending',
'preempted', 'running', 'suspended', 'timeout'
)),
meta_data TEXT, -- JSON
resources TEXT NOT NULL, -- JSON
@@ -21,7 +23,8 @@ CREATE TABLE "job_cache" (
num_hwthreads INT,
num_acc INT,
smt TINYINT NOT NULL DEFAULT 1 CHECK (smt IN (0, 1)),
exclusive TINYINT NOT NULL DEFAULT 1 CHECK (exclusive IN (0, 1, 2)),
shared TEXT NOT NULL
CHECK (shared IN ("none", "single_user", "multi_user")),
monitoring_status TINYINT NOT NULL DEFAULT 1
CHECK (monitoring_status IN (0, 1, 2, 3)),
energy REAL NOT NULL DEFAULT 0.0,
@@ -29,3 +32,43 @@ CREATE TABLE "job_cache" (
footprint TEXT DEFAULT NULL,
UNIQUE (job_id, cluster, start_time)
);
CREATE TABLE "job_new" (
id INTEGER PRIMARY KEY,
job_id BIGINT NOT NULL,
hpc_cluster TEXT NOT NULL,
subcluster TEXT NOT NULL,
submit_time BIGINT NOT NULL DEFAULT 0, -- Unix timestamp
start_time BIGINT NOT NULL DEFAULT 0, -- Unix timestamp
hpc_user TEXT NOT NULL,
project TEXT NOT NULL,
cluster_partition TEXT,
array_job_id BIGINT,
duration INT NOT NULL,
walltime INT NOT NULL,
job_state TEXT NOT NULL
CHECK (job_state IN (
'boot_fail', 'cancelled', 'completed', 'deadline',
'failed', 'node_fail', 'out-of-memory', 'pending',
'preempted', 'running', 'suspended', 'timeout'
)),
meta_data TEXT, -- JSON
resources TEXT NOT NULL, -- JSON
num_nodes INT NOT NULL,
num_hwthreads INT,
num_acc INT,
smt INT NOT NULL DEFAULT 1,
shared TEXT NOT NULL
CHECK (shared IN ("none", "single_user", "multi_user")),
monitoring_status TINYINT NOT NULL DEFAULT 1
CHECK (monitoring_status IN (0, 1, 2, 3)),
energy REAL NOT NULL DEFAULT 0.0,
energy_footprint TEXT DEFAULT NULL,
footprint TEXT DEFAULT NULL,
UNIQUE (job_id, cluster, start_time)
);
ALTER TABLE job RENAME COLUMN cluster TO hpc_cluster;
INSERT INTO job_new SELECT * FROM job;
DROP TABLE job;
ALTER TABLE job_new RENAME TO job;

View File

@@ -240,13 +240,13 @@ func (t *JobClassTagger) Match(job *schema.Job) {
// Initialize environment
env["job"] = map[string]any{
"exclusive": job.Exclusive,
"duration": job.Duration,
"numCores": job.NumHWThreads,
"numNodes": job.NumNodes,
"jobState": job.State,
"numAcc": job.NumAcc,
"smt": job.SMT,
"shared": job.Shared,
"duration": job.Duration,
"numCores": job.NumHWThreads,
"numNodes": job.NumNodes,
"jobState": job.State,
"numAcc": job.NumAcc,
"smt": job.SMT,
}
// add metrics to env