2022-07-29 06:29:21 +02:00
|
|
|
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
|
|
|
// All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
2022-03-15 08:29:29 +01:00
|
|
|
package repository
|
2021-10-11 11:11:14 +02:00
|
|
|
|
|
|
|
import (
|
2022-09-13 15:20:07 +02:00
|
|
|
"bytes"
|
|
|
|
"database/sql"
|
2021-10-11 11:11:14 +02:00
|
|
|
"encoding/json"
|
2021-10-20 09:30:50 +02:00
|
|
|
"fmt"
|
2022-09-13 15:20:07 +02:00
|
|
|
"os"
|
|
|
|
"strings"
|
2021-10-20 09:30:50 +02:00
|
|
|
"time"
|
|
|
|
|
2022-09-13 15:20:07 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
2022-09-05 17:46:38 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
2022-06-21 17:52:36 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
2021-10-11 11:11:14 +02:00
|
|
|
)
|
|
|
|
|
2022-01-20 10:00:55 +01:00
|
|
|
// `AUTO_INCREMENT` is in a comment because of this hack:
|
|
|
|
// https://stackoverflow.com/a/41028314 (sqlite creates unique ids automatically)
|
2022-03-15 08:29:29 +01:00
|
|
|
const JobsDBSchema string = `
|
2022-01-20 10:00:55 +01:00
|
|
|
DROP TABLE IF EXISTS jobtag;
|
2021-10-11 11:11:14 +02:00
|
|
|
DROP TABLE IF EXISTS job;
|
|
|
|
DROP TABLE IF EXISTS tag;
|
|
|
|
|
|
|
|
CREATE TABLE job (
|
2022-01-20 10:00:55 +01:00
|
|
|
id INTEGER PRIMARY KEY /*!40101 AUTO_INCREMENT */,
|
2021-12-16 13:17:48 +01:00
|
|
|
job_id BIGINT NOT NULL,
|
|
|
|
cluster VARCHAR(255) NOT NULL,
|
2022-03-14 09:08:02 +01:00
|
|
|
subcluster VARCHAR(255) NOT NULL,
|
2022-01-20 10:00:55 +01:00
|
|
|
start_time BIGINT NOT NULL, -- Unix timestamp
|
2021-12-16 13:17:48 +01:00
|
|
|
|
|
|
|
user VARCHAR(255) NOT NULL,
|
|
|
|
project VARCHAR(255) NOT NULL,
|
2022-01-20 10:00:55 +01:00
|
|
|
` + "`partition`" + ` VARCHAR(255) NOT NULL, -- partition is a keyword in mysql -.-
|
2021-12-16 13:17:48 +01:00
|
|
|
array_job_id BIGINT NOT NULL,
|
2022-03-15 08:29:29 +01:00
|
|
|
duration INT NOT NULL DEFAULT 0,
|
|
|
|
walltime INT NOT NULL DEFAULT 0,
|
2022-02-17 08:56:37 +01:00
|
|
|
job_state VARCHAR(255) NOT NULL CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory')),
|
2022-01-20 10:00:55 +01:00
|
|
|
meta_data TEXT, -- JSON
|
|
|
|
resources TEXT NOT NULL, -- JSON
|
2021-12-16 13:17:48 +01:00
|
|
|
|
|
|
|
num_nodes INT NOT NULL,
|
|
|
|
num_hwthreads INT NOT NULL,
|
|
|
|
num_acc INT NOT NULL,
|
2022-01-20 10:00:55 +01:00
|
|
|
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
|
|
|
|
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
|
2022-02-15 14:25:39 +01:00
|
|
|
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
|
2021-12-16 13:17:48 +01:00
|
|
|
|
|
|
|
mem_used_max REAL NOT NULL DEFAULT 0.0,
|
|
|
|
flops_any_avg REAL NOT NULL DEFAULT 0.0,
|
|
|
|
mem_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
|
|
load_avg REAL NOT NULL DEFAULT 0.0,
|
|
|
|
net_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
|
|
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
|
|
|
|
file_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
|
|
file_data_vol_total REAL NOT NULL DEFAULT 0.0);
|
|
|
|
|
2021-10-11 11:11:14 +02:00
|
|
|
CREATE TABLE tag (
|
|
|
|
id INTEGER PRIMARY KEY,
|
2021-12-16 13:17:48 +01:00
|
|
|
tag_type VARCHAR(255) NOT NULL,
|
2022-02-08 12:49:28 +01:00
|
|
|
tag_name VARCHAR(255) NOT NULL,
|
|
|
|
CONSTRAINT be_unique UNIQUE (tag_type, tag_name));
|
2021-12-16 13:17:48 +01:00
|
|
|
|
2021-10-11 11:11:14 +02:00
|
|
|
CREATE TABLE jobtag (
|
|
|
|
job_id INTEGER,
|
|
|
|
tag_id INTEGER,
|
|
|
|
PRIMARY KEY (job_id, tag_id),
|
2021-12-16 13:17:48 +01:00
|
|
|
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
|
|
|
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
|
|
|
`
|
|
|
|
|
2022-03-15 08:29:29 +01:00
|
|
|
// Indexes are created after the job-archive is traversed for faster inserts.
|
|
|
|
const JobsDbIndexes string = `
|
2023-02-15 09:50:27 +01:00
|
|
|
CREATE INDEX job_stats ON job (cluster,subcluster,user);
|
2022-02-09 15:03:12 +01:00
|
|
|
CREATE INDEX job_by_user ON job (user);
|
|
|
|
CREATE INDEX job_by_starttime ON job (start_time);
|
|
|
|
CREATE INDEX job_by_job_id ON job (job_id);
|
2022-03-08 10:33:56 +01:00
|
|
|
CREATE INDEX job_by_state ON job (job_state);
|
2022-02-09 15:03:12 +01:00
|
|
|
`
|
2022-09-05 17:46:38 +02:00
|
|
|
const NamedJobInsert string = `INSERT INTO job (
|
|
|
|
job_id, user, project, cluster, subcluster, ` + "`partition`" + `, array_job_id, num_nodes, num_hwthreads, num_acc,
|
|
|
|
exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, resources, meta_data,
|
|
|
|
mem_used_max, flops_any_avg, mem_bw_avg, load_avg, net_bw_avg, net_data_vol_total, file_bw_avg, file_data_vol_total
|
|
|
|
) VALUES (
|
|
|
|
:job_id, :user, :project, :cluster, :subcluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc,
|
|
|
|
:exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :resources, :meta_data,
|
|
|
|
:mem_used_max, :flops_any_avg, :mem_bw_avg, :load_avg, :net_bw_avg, :net_data_vol_total, :file_bw_avg, :file_data_vol_total
|
|
|
|
);`
|
2022-02-09 15:03:12 +01:00
|
|
|
|
2022-09-13 15:20:07 +02:00
|
|
|
// Import all jobs specified as `<path-to-meta.json>:<path-to-data.json>,...`
|
|
|
|
func HandleImportFlag(flag string) error {
|
|
|
|
for _, pair := range strings.Split(flag, ",") {
|
|
|
|
files := strings.Split(pair, ":")
|
|
|
|
if len(files) != 2 {
|
2023-01-19 16:59:14 +01:00
|
|
|
return fmt.Errorf("REPOSITORY/INIT > invalid import flag format")
|
2022-09-13 15:20:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
raw, err := os.ReadFile(files[0])
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while reading metadata file for import")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.Keys.Validate {
|
|
|
|
if err := schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil {
|
2023-01-19 16:59:14 +01:00
|
|
|
return fmt.Errorf("REPOSITORY/INIT > validate job meta: %v", err)
|
2022-09-13 15:20:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
dec := json.NewDecoder(bytes.NewReader(raw))
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
|
|
|
|
if err := dec.Decode(&jobMeta); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while decoding raw json metadata for import")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
raw, err = os.ReadFile(files[1])
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while reading jobdata file for import")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.Keys.Validate {
|
|
|
|
if err := schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil {
|
2023-01-19 16:59:14 +01:00
|
|
|
return fmt.Errorf("REPOSITORY/INIT > validate job data: %v", err)
|
2022-09-13 15:20:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
dec = json.NewDecoder(bytes.NewReader(raw))
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
jobData := schema.JobData{}
|
|
|
|
if err := dec.Decode(&jobData); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while decoding raw json jobdata for import")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
SanityChecks(&jobMeta.BaseJob)
|
|
|
|
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
|
|
|
|
if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while finding job in jobRepository")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-01-19 16:59:14 +01:00
|
|
|
return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID)
|
2022-09-13 15:20:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
job := schema.Job{
|
|
|
|
BaseJob: jobMeta.BaseJob,
|
|
|
|
StartTime: time.Unix(jobMeta.StartTime, 0),
|
|
|
|
StartTimeUnix: jobMeta.StartTime,
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Other metrics...
|
|
|
|
job.FlopsAnyAvg = loadJobStat(&jobMeta, "flops_any")
|
|
|
|
job.MemBwAvg = loadJobStat(&jobMeta, "mem_bw")
|
|
|
|
job.NetBwAvg = loadJobStat(&jobMeta, "net_bw")
|
|
|
|
job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
|
|
|
|
job.RawResources, err = json.Marshal(job.Resources)
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while marshaling job resources")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
job.RawMetaData, err = json.Marshal(job.MetaData)
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while marshaling job metadata")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := SanityChecks(&job.BaseJob); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("BaseJob SanityChecks failed")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := archive.GetHandle().ImportJob(&jobMeta, &jobData); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while importing job")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
res, err := GetConnection().DB.NamedExec(NamedJobInsert, job)
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while NamedJobInsert")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
id, err := res.LastInsertId()
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while getting last insert ID")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tag := range job.Tags {
|
|
|
|
if _, err := GetJobRepository().AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while adding or creating tag")
|
2022-09-13 15:20:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
|
2022-09-13 15:20:07 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-12-16 13:17:48 +01:00
|
|
|
// Delete the tables "job", "tag" and "jobtag" from the database and
|
|
|
|
// repopulate them using the jobs found in `archive`.
|
2022-09-05 17:46:38 +02:00
|
|
|
func InitDB() error {
|
|
|
|
db := GetConnection()
|
2021-12-16 13:17:48 +01:00
|
|
|
starttime := time.Now()
|
2022-01-31 15:14:37 +01:00
|
|
|
log.Print("Building job table...")
|
2021-12-16 13:17:48 +01:00
|
|
|
|
|
|
|
// Basic database structure:
|
2022-09-05 17:46:38 +02:00
|
|
|
_, err := db.DB.Exec(JobsDBSchema)
|
2021-10-20 09:30:50 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while initializing basic DB structure")
|
2021-10-20 09:30:50 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-03-15 08:29:29 +01:00
|
|
|
// Inserts are bundled into transactions because in sqlite,
|
|
|
|
// that speeds up inserts A LOT.
|
2022-09-05 17:46:38 +02:00
|
|
|
tx, err := db.DB.Beginx()
|
2021-12-08 10:08:41 +01:00
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while bundling transactions")
|
2021-12-08 10:08:41 +01:00
|
|
|
return err
|
|
|
|
}
|
2021-12-17 15:49:22 +01:00
|
|
|
|
2022-03-15 08:29:29 +01:00
|
|
|
stmt, err := tx.PrepareNamed(NamedJobInsert)
|
2021-12-17 15:49:22 +01:00
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while preparing namedJobInsert")
|
2021-12-17 15:49:22 +01:00
|
|
|
return err
|
|
|
|
}
|
2022-09-11 07:13:08 +02:00
|
|
|
tags := make(map[string]int64)
|
2021-12-17 15:49:22 +01:00
|
|
|
|
2022-03-15 08:29:29 +01:00
|
|
|
// Not using log.Print because we want the line to end with `\r` and
|
|
|
|
// this function is only ever called when a special command line flag
|
|
|
|
// is passed anyways.
|
2022-01-31 15:14:37 +01:00
|
|
|
fmt.Printf("%d jobs inserted...\r", 0)
|
2022-09-05 17:46:38 +02:00
|
|
|
|
|
|
|
ar := archive.GetHandle()
|
|
|
|
i := 0
|
2022-09-13 15:20:07 +02:00
|
|
|
errorOccured := 0
|
2022-09-05 17:46:38 +02:00
|
|
|
|
|
|
|
for jobMeta := range ar.Iter() {
|
2022-09-11 07:13:08 +02:00
|
|
|
|
|
|
|
// // Bundle 100 inserts into one transaction for better performance:
|
|
|
|
if i%10 == 0 {
|
2021-12-08 10:08:41 +01:00
|
|
|
if tx != nil {
|
|
|
|
if err := tx.Commit(); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while committing transactions for jobMeta")
|
2021-12-08 10:08:41 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
tx, err = db.DB.Beginx()
|
2021-12-08 10:08:41 +01:00
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while bundling transactions for jobMeta")
|
2021-12-08 10:08:41 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-12-17 15:49:22 +01:00
|
|
|
stmt = tx.NamedStmt(stmt)
|
2021-12-08 10:08:41 +01:00
|
|
|
fmt.Printf("%d jobs inserted...\r", i)
|
|
|
|
}
|
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
|
|
|
|
job := schema.Job{
|
|
|
|
BaseJob: jobMeta.BaseJob,
|
|
|
|
StartTime: time.Unix(jobMeta.StartTime, 0),
|
|
|
|
StartTimeUnix: jobMeta.StartTime,
|
2021-12-08 10:08:41 +01:00
|
|
|
}
|
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
// TODO: Other metrics...
|
|
|
|
job.FlopsAnyAvg = loadJobStat(jobMeta, "flops_any")
|
|
|
|
job.MemBwAvg = loadJobStat(jobMeta, "mem_bw")
|
|
|
|
job.NetBwAvg = loadJobStat(jobMeta, "net_bw")
|
|
|
|
job.FileBwAvg = loadJobStat(jobMeta, "file_bw")
|
2021-12-08 10:08:41 +01:00
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
job.RawResources, err = json.Marshal(job.Resources)
|
2021-10-11 11:11:14 +02:00
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Errorf("repository initDB(): %v", err)
|
2022-09-13 15:20:07 +02:00
|
|
|
errorOccured++
|
2022-09-11 07:13:08 +02:00
|
|
|
continue
|
2021-10-11 11:11:14 +02:00
|
|
|
}
|
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
job.RawMetaData, err = json.Marshal(job.MetaData)
|
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Errorf("repository initDB(): %v", err)
|
2022-09-13 15:20:07 +02:00
|
|
|
errorOccured++
|
2022-09-11 07:13:08 +02:00
|
|
|
continue
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
2021-10-11 11:11:14 +02:00
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
if err := SanityChecks(&job.BaseJob); err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Errorf("repository initDB(): %v", err)
|
2022-09-13 15:20:07 +02:00
|
|
|
errorOccured++
|
2022-09-11 07:13:08 +02:00
|
|
|
continue
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2022-09-11 07:13:08 +02:00
|
|
|
res, err := stmt.Exec(job)
|
2022-09-05 17:46:38 +02:00
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Errorf("repository initDB(): %v", err)
|
2022-09-13 15:20:07 +02:00
|
|
|
errorOccured++
|
2022-09-11 07:13:08 +02:00
|
|
|
continue
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
2021-10-11 11:11:14 +02:00
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
id, err := res.LastInsertId()
|
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Errorf("repository initDB(): %v", err)
|
2022-09-13 15:20:07 +02:00
|
|
|
errorOccured++
|
2022-09-11 07:13:08 +02:00
|
|
|
continue
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tag := range job.Tags {
|
|
|
|
tagstr := tag.Name + ":" + tag.Type
|
|
|
|
tagId, ok := tags[tagstr]
|
|
|
|
if !ok {
|
|
|
|
res, err := tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type)
|
2021-12-08 10:08:41 +01:00
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Errorf("Error while inserting tag into tag table: %v (Type %v)", tag.Name, tag.Type)
|
2021-12-08 10:08:41 +01:00
|
|
|
return err
|
2021-10-20 09:30:50 +02:00
|
|
|
}
|
2022-09-05 17:46:38 +02:00
|
|
|
tagId, err = res.LastInsertId()
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while getting last insert ID")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
2021-10-11 11:11:14 +02:00
|
|
|
}
|
2022-09-05 17:46:38 +02:00
|
|
|
tags[tagstr] = tagId
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, id, tagId); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Errorf("Error while inserting jobtag into jobtag table: %v (TagID %v)", id, tagId)
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
2021-10-11 11:11:14 +02:00
|
|
|
}
|
|
|
|
}
|
2022-09-05 17:46:38 +02:00
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
i += 1
|
|
|
|
}
|
2021-10-11 11:11:14 +02:00
|
|
|
}
|
|
|
|
|
2022-09-13 15:20:07 +02:00
|
|
|
if errorOccured > 0 {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warnf("Error in import of %d jobs!", errorOccured)
|
2022-09-11 07:13:08 +02:00
|
|
|
}
|
|
|
|
|
2021-10-20 09:30:50 +02:00
|
|
|
if err := tx.Commit(); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while committing SQL transactions")
|
2021-10-20 09:30:50 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create indexes after inserts so that they do not
|
|
|
|
// need to be continually updated.
|
2022-09-05 17:46:38 +02:00
|
|
|
if _, err := db.DB.Exec(JobsDbIndexes); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while creating indices after inserts")
|
2021-10-20 09:30:50 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-12-08 10:08:41 +01:00
|
|
|
log.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
|
2021-10-11 11:11:14 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
// This function also sets the subcluster if necessary!
|
|
|
|
func SanityChecks(job *schema.BaseJob) error {
|
|
|
|
if c := archive.GetCluster(job.Cluster); c == nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
return fmt.Errorf("no such cluster: %v", job.Cluster)
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
if err := archive.AssignSubCluster(job); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while assigning subcluster to job")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !job.State.Valid() {
|
2023-02-01 11:58:27 +01:00
|
|
|
return fmt.Errorf("not a valid job state: %v", job.State)
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
if len(job.Resources) == 0 || len(job.User) == 0 {
|
|
|
|
return fmt.Errorf("'resources' and 'user' should not be empty")
|
|
|
|
}
|
|
|
|
if job.NumAcc < 0 || job.NumHWThreads < 0 || job.NumNodes < 1 {
|
|
|
|
return fmt.Errorf("'numNodes', 'numAcc' or 'numHWThreads' invalid")
|
|
|
|
}
|
|
|
|
if len(job.Resources) != int(job.NumNodes) {
|
|
|
|
return fmt.Errorf("len(resources) does not equal numNodes (%d vs %d)", len(job.Resources), job.NumNodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func loadJobStat(job *schema.JobMeta, metric string) float64 {
|
|
|
|
if stats, ok := job.Statistics[metric]; ok {
|
|
|
|
return stats.Avg
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0.0
|
|
|
|
}
|