2024-04-11 23:04:30 +02:00
|
|
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
2023-04-28 08:49:58 +02:00
|
|
|
// All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package importer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Import all jobs specified as `<path-to-meta.json>:<path-to-data.json>,...`
|
|
|
|
func HandleImportFlag(flag string) error {
|
|
|
|
r := repository.GetJobRepository()
|
|
|
|
|
|
|
|
for _, pair := range strings.Split(flag, ",") {
|
|
|
|
files := strings.Split(pair, ":")
|
|
|
|
if len(files) != 2 {
|
|
|
|
return fmt.Errorf("REPOSITORY/INIT > invalid import flag format")
|
|
|
|
}
|
|
|
|
|
|
|
|
raw, err := os.ReadFile(files[0])
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Error while reading metadata file for import")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.Keys.Validate {
|
2023-05-04 07:00:30 +02:00
|
|
|
if err = schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil {
|
2023-04-28 08:49:58 +02:00
|
|
|
return fmt.Errorf("REPOSITORY/INIT > validate job meta: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dec := json.NewDecoder(bytes.NewReader(raw))
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
|
2023-05-04 07:00:30 +02:00
|
|
|
if err = dec.Decode(&jobMeta); err != nil {
|
2023-04-28 08:49:58 +02:00
|
|
|
log.Warn("Error while decoding raw json metadata for import")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
raw, err = os.ReadFile(files[1])
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Error while reading jobdata file for import")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.Keys.Validate {
|
2023-05-04 07:00:30 +02:00
|
|
|
if err = schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil {
|
2023-04-28 08:49:58 +02:00
|
|
|
return fmt.Errorf("REPOSITORY/INIT > validate job data: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dec = json.NewDecoder(bytes.NewReader(raw))
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
jobData := schema.JobData{}
|
2023-05-04 07:00:30 +02:00
|
|
|
if err = dec.Decode(&jobData); err != nil {
|
2023-04-28 08:49:58 +02:00
|
|
|
log.Warn("Error while decoding raw json jobdata for import")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-05-04 15:34:36 +02:00
|
|
|
// checkJobData(&jobData)
|
2023-04-28 08:49:58 +02:00
|
|
|
|
2023-05-04 18:14:46 +02:00
|
|
|
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
|
2023-04-28 08:49:58 +02:00
|
|
|
|
2023-05-04 18:14:46 +02:00
|
|
|
// if _, err = r.Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
|
|
|
|
// if err != nil {
|
|
|
|
// log.Warn("Error while finding job in jobRepository")
|
|
|
|
// return err
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist")
|
|
|
|
// }
|
|
|
|
//
|
2023-04-28 08:49:58 +02:00
|
|
|
job := schema.Job{
|
|
|
|
BaseJob: jobMeta.BaseJob,
|
|
|
|
StartTime: time.Unix(jobMeta.StartTime, 0),
|
|
|
|
StartTimeUnix: jobMeta.StartTime,
|
|
|
|
}
|
|
|
|
|
2024-06-28 17:08:28 +02:00
|
|
|
// TODO: Do loop for new sub structure for stats
|
|
|
|
// job.LoadAvg = loadJobStat(&jobMeta, "cpu_load")
|
|
|
|
// job.FlopsAnyAvg = loadJobStat(&jobMeta, "flops_any")
|
|
|
|
// job.MemUsedMax = loadJobStat(&jobMeta, "mem_used")
|
|
|
|
// job.MemBwAvg = loadJobStat(&jobMeta, "mem_bw")
|
|
|
|
// job.NetBwAvg = loadJobStat(&jobMeta, "net_bw")
|
|
|
|
// job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
|
2023-06-15 11:07:48 +02:00
|
|
|
|
2023-04-28 08:49:58 +02:00
|
|
|
job.RawResources, err = json.Marshal(job.Resources)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Error while marshaling job resources")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
job.RawMetaData, err = json.Marshal(job.MetaData)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Error while marshaling job metadata")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-05-04 07:00:30 +02:00
|
|
|
if err = SanityChecks(&job.BaseJob); err != nil {
|
2023-04-28 08:49:58 +02:00
|
|
|
log.Warn("BaseJob SanityChecks failed")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-05-04 07:00:30 +02:00
|
|
|
if err = archive.GetHandle().ImportJob(&jobMeta, &jobData); err != nil {
|
2023-04-28 08:49:58 +02:00
|
|
|
log.Error("Error while importing job")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
id, err := r.InsertJob(&job)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Error while job db insert")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tag := range job.Tags {
|
|
|
|
if _, err := r.AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
|
|
|
|
log.Error("Error while adding or creating tag")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|