cc-backend/tools/archive-migration/main.go

372 lines
8.5 KiB
Go
Raw Permalink Normal View History

// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
import (
2023-03-10 12:14:33 +01:00
"bufio"
"encoding/json"
2023-03-27 13:24:06 +02:00
"errors"
"flag"
"fmt"
2022-11-10 13:37:53 +01:00
"os"
"path/filepath"
2023-04-05 15:55:04 +02:00
"sync"
2022-11-10 13:37:53 +01:00
2023-04-12 12:27:15 +02:00
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/log"
2022-11-10 13:37:53 +01:00
"github.com/ClusterCockpit/cc-backend/pkg/schema"
2023-05-04 17:20:33 +02:00
ccunits "github.com/ClusterCockpit/cc-units"
)
2023-03-27 13:24:06 +02:00
const Version = 1
var ar FsArchive
2023-06-12 14:44:54 +02:00
var srcPath string
var dstPath string
2023-03-10 12:14:33 +01:00
func loadJobData(filename string) (*JobData, error) {
f, err := os.Open(filename)
if err != nil {
return &JobData{}, fmt.Errorf("fsBackend loadJobData()- %v", err)
2023-03-10 12:14:33 +01:00
}
defer f.Close()
return DecodeJobData(bufio.NewReader(f))
}
func ConvertUnitString(us string) schema.Unit {
var nu schema.Unit
2023-05-04 17:20:33 +02:00
if us == "CPI" ||
us == "IPC" ||
us == "load" ||
us == "" {
nu.Base = us
return nu
}
u := ccunits.NewUnit(us)
p := u.GetPrefix()
if p.Prefix() != "" {
prefix := p.Prefix()
nu.Prefix = prefix
}
m := u.GetMeasure()
d := u.GetUnitDenominator()
if d.Short() != "inval" {
nu.Base = fmt.Sprintf("%s/%s", m.Short(), d.Short())
} else {
nu.Base = m.Short()
}
return nu
}
2023-02-28 09:33:55 +01:00
func deepCopyJobMeta(j *JobMeta) schema.JobMeta {
var jn schema.JobMeta
2023-03-29 06:46:33 +02:00
//required properties
jn.JobID = j.JobID
2023-03-10 12:14:33 +01:00
jn.User = j.User
jn.Project = j.Project
jn.Cluster = j.Cluster
jn.SubCluster = j.SubCluster
jn.NumNodes = j.NumNodes
jn.Exclusive = j.Exclusive
2023-03-29 06:46:33 +02:00
jn.StartTime = j.StartTime
2023-03-10 12:14:33 +01:00
jn.State = schema.JobState(j.State)
2023-03-29 06:46:33 +02:00
jn.Duration = j.Duration
2023-03-10 12:14:33 +01:00
for _, ro := range j.Resources {
var rn schema.Resource
rn.Hostname = ro.Hostname
rn.Configuration = ro.Configuration
2023-04-05 15:55:04 +02:00
hwt := make([]int, len(ro.HWThreads))
2023-03-29 06:46:33 +02:00
if ro.HWThreads != nil {
copy(hwt, ro.HWThreads)
}
2023-04-05 15:55:04 +02:00
rn.HWThreads = hwt
acc := make([]string, len(ro.Accelerators))
2023-03-29 06:46:33 +02:00
if ro.Accelerators != nil {
copy(acc, ro.Accelerators)
}
2023-04-05 15:55:04 +02:00
rn.Accelerators = acc
2023-03-10 12:14:33 +01:00
jn.Resources = append(jn.Resources, &rn)
}
2023-04-05 15:55:04 +02:00
jn.MetaData = make(map[string]string)
2023-02-28 09:33:55 +01:00
2023-03-10 12:14:33 +01:00
for k, v := range j.MetaData {
jn.MetaData[k] = v
}
2023-04-05 15:55:04 +02:00
jn.Statistics = make(map[string]schema.JobStatistics)
for k, v := range j.Statistics {
var sn schema.JobStatistics
sn.Avg = v.Avg
sn.Max = v.Max
sn.Min = v.Min
tmpUnit := ConvertUnitString(v.Unit)
2023-04-05 15:55:04 +02:00
if tmpUnit.Base == "inval" {
sn.Unit = schema.Unit{Base: ""}
} else {
sn.Unit = tmpUnit
}
jn.Statistics[k] = sn
}
2023-03-29 06:46:33 +02:00
//optional properties
jn.Partition = j.Partition
jn.ArrayJobId = j.ArrayJobId
jn.NumHWThreads = j.NumHWThreads
jn.NumAcc = j.NumAcc
jn.MonitoringStatus = j.MonitoringStatus
jn.SMT = j.SMT
jn.Walltime = j.Walltime
for _, t := range j.Tags {
jn.Tags = append(jn.Tags, t)
}
2023-03-10 12:14:33 +01:00
return jn
}
func deepCopyJobData(d *JobData, cluster string, subCluster string) *schema.JobData {
2023-03-23 10:29:11 +01:00
var dn = make(schema.JobData)
2023-03-10 12:14:33 +01:00
for k, v := range *d {
2023-04-05 15:55:04 +02:00
// fmt.Printf("Metric %s\n", k)
dn[k] = make(map[schema.MetricScope]*schema.JobMetric)
2023-03-10 12:14:33 +01:00
for mk, mv := range v {
2023-04-05 15:55:04 +02:00
// fmt.Printf("Scope %s\n", mk)
2023-03-10 12:14:33 +01:00
var mn schema.JobMetric
tmpUnit := ConvertUnitString(mv.Unit)
2023-04-05 15:55:04 +02:00
if tmpUnit.Base == "inval" {
mn.Unit = schema.Unit{Base: ""}
} else {
mn.Unit = tmpUnit
}
2023-03-10 12:14:33 +01:00
mn.Timestep = mv.Timestep
for _, v := range mv.Series {
var sn schema.Series
sn.Hostname = v.Hostname
2023-03-23 10:29:11 +01:00
if v.Id != nil {
var id = new(string)
if mk == schema.MetricScopeAccelerator {
s := GetSubCluster(cluster, subCluster)
var err error
*id, err = s.Topology.GetAcceleratorID(*v.Id)
if err != nil {
log.Fatal(err)
}
} else {
*id = fmt.Sprint(*v.Id)
}
2023-03-23 10:29:11 +01:00
sn.Id = id
}
if v.Statistics != nil {
sn.Statistics = schema.MetricStatistics{
Avg: v.Statistics.Avg,
Min: v.Statistics.Min,
Max: v.Statistics.Max}
}
2023-03-10 12:14:33 +01:00
sn.Data = make([]schema.Float, len(v.Data))
copy(sn.Data, v.Data)
mn.Series = append(mn.Series, sn)
}
dn[k][mk] = &mn
}
2023-04-05 15:55:04 +02:00
// fmt.Printf("FINISH %s\n", k)
2023-03-10 12:14:33 +01:00
}
2023-04-05 15:55:04 +02:00
return &dn
2023-02-28 09:33:55 +01:00
}
2022-11-10 13:37:53 +01:00
func deepCopyClusterConfig(co *Cluster) schema.Cluster {
var cn schema.Cluster
cn.Name = co.Name
for _, sco := range co.SubClusters {
var scn schema.SubCluster
scn.Name = sco.Name
scn.Nodes = sco.Nodes
scn.ProcessorType = sco.ProcessorType
scn.SocketsPerNode = sco.SocketsPerNode
scn.CoresPerSocket = sco.CoresPerSocket
scn.ThreadsPerCore = sco.ThreadsPerCore
scn.FlopRateScalar = schema.MetricValue{
Unit: schema.Unit{Base: "F/s", Prefix: "G"},
Value: float64(sco.FlopRateScalar)}
scn.FlopRateSimd = schema.MetricValue{
Unit: schema.Unit{Base: "F/s", Prefix: "G"},
Value: float64(sco.FlopRateSimd)}
scn.MemoryBandwidth = schema.MetricValue{
Unit: schema.Unit{Base: "B/s", Prefix: "G"},
Value: float64(sco.MemoryBandwidth)}
scn.Topology = *sco.Topology
cn.SubClusters = append(cn.SubClusters, &scn)
}
2022-11-10 13:37:53 +01:00
for _, mco := range co.MetricConfig {
var mcn schema.MetricConfig
mcn.Name = mco.Name
mcn.Scope = mco.Scope
if mco.Aggregation == "" {
2023-05-09 09:33:51 +02:00
fmt.Println("cluster.json - Property aggregation missing! Please review file!")
mcn.Aggregation = "sum"
} else {
mcn.Aggregation = mco.Aggregation
}
2022-11-10 13:37:53 +01:00
mcn.Timestep = mco.Timestep
tmpUnit := ConvertUnitString(mco.Unit)
2023-04-05 15:55:04 +02:00
if tmpUnit.Base == "inval" {
mcn.Unit = schema.Unit{Base: ""}
} else {
mcn.Unit = tmpUnit
}
mcn.Peak = mco.Peak
mcn.Normal = mco.Normal
mcn.Caution = mco.Caution
mcn.Alert = mco.Alert
mcn.SubClusters = mco.SubClusters
2023-04-05 15:55:04 +02:00
2022-11-10 13:37:53 +01:00
cn.MetricConfig = append(cn.MetricConfig, &mcn)
}
return cn
}
2023-06-12 14:44:54 +02:00
func convertJob(job *JobMeta) {
// check if source data is available, otherwise skip job
src_data_path := getPath(job, srcPath, "data.json")
info, err := os.Stat(src_data_path)
if err != nil {
log.Fatal(err)
}
if info.Size() == 0 {
fmt.Printf("Skip path %s, filesize is 0 Bytes.", src_data_path)
return
}
path := getPath(job, dstPath, "meta.json")
err = os.MkdirAll(filepath.Dir(path), 0750)
if err != nil {
log.Fatal(err)
}
f, err := os.Create(path)
if err != nil {
log.Fatal(err)
}
jmn := deepCopyJobMeta(job)
if err = EncodeJobMeta(f, &jmn); err != nil {
log.Fatal(err)
}
if err = f.Close(); err != nil {
log.Fatal(err)
}
f, err = os.Create(getPath(job, dstPath, "data.json"))
if err != nil {
log.Fatal(err)
}
var jd *JobData
jd, err = loadJobData(src_data_path)
if err != nil {
log.Fatal(err)
}
jdn := deepCopyJobData(jd, job.Cluster, job.SubCluster)
if err := EncodeJobData(f, jdn); err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
func main() {
2023-04-12 12:27:15 +02:00
var flagLogLevel, flagConfigFile string
2023-06-12 14:44:54 +02:00
var flagLogDateTime, debug bool
2023-04-12 12:27:15 +02:00
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
2023-06-12 14:44:54 +02:00
flag.BoolVar(&debug, "debug", false, "Set this flag to force sequential execution for debugging")
2023-04-12 12:27:15 +02:00
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`")
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
2023-06-12 14:44:54 +02:00
flag.StringVar(&srcPath, "src", "./var/job-archive", "Specify the source job archive path")
flag.StringVar(&dstPath, "dst", "./var/job-archive-new", "Specify the destination job archive path")
2023-03-31 11:25:12 +02:00
flag.Parse()
2023-03-27 15:34:56 +02:00
if _, err := os.Stat(filepath.Join(srcPath, "version.txt")); !errors.Is(err, os.ErrNotExist) {
2023-03-27 13:24:06 +02:00
log.Fatal("Archive version exists!")
}
2023-04-12 12:27:15 +02:00
log.Init(flagLogLevel, flagLogDateTime)
config.Init(flagConfigFile)
srcConfig := fmt.Sprintf("{\"path\": \"%s\"}", srcPath)
err := ar.Init(json.RawMessage(srcConfig))
if err != nil {
log.Fatal(err)
}
2022-11-10 13:37:53 +01:00
err = initClusterConfig()
if err != nil {
log.Fatal(err)
}
// setup new job archive
err = os.Mkdir(dstPath, 0750)
if err != nil {
log.Fatal(err)
}
for _, c := range Clusters {
path := fmt.Sprintf("%s/%s", dstPath, c.Name)
fmt.Println(path)
err = os.Mkdir(path, 0750)
if err != nil {
log.Fatal(err)
}
cn := deepCopyClusterConfig(c)
f, err := os.Create(fmt.Sprintf("%s/%s/cluster.json", dstPath, c.Name))
if err != nil {
log.Fatal(err)
}
if err := EncodeCluster(f, &cn); err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
2022-11-10 13:37:53 +01:00
2023-04-05 15:55:04 +02:00
var wg sync.WaitGroup
2023-02-28 09:33:55 +01:00
for job := range ar.Iter() {
2023-06-12 14:44:54 +02:00
if debug {
fmt.Printf("Job %d\n", job.JobID)
convertJob(job)
} else {
job := job
wg.Add(1)
2023-03-10 12:14:33 +01:00
2023-06-12 14:44:54 +02:00
go func() {
defer wg.Done()
convertJob(job)
}()
}
2023-02-28 09:33:55 +01:00
}
2023-03-27 13:24:06 +02:00
2023-04-05 15:55:04 +02:00
wg.Wait()
2023-03-27 15:34:56 +02:00
os.WriteFile(filepath.Join(dstPath, "version.txt"), []byte(fmt.Sprintf("%d", Version)), 0644)
}