2024-04-11 23:04:30 +02:00
|
|
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
2022-09-05 17:46:38 +02:00
|
|
|
// All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package archive
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
2022-09-13 15:22:20 +02:00
|
|
|
"bytes"
|
2023-03-27 11:11:14 +02:00
|
|
|
"compress/gzip"
|
2022-09-05 17:46:38 +02:00
|
|
|
"encoding/json"
|
2023-03-27 11:11:14 +02:00
|
|
|
"errors"
|
2022-09-05 17:46:38 +02:00
|
|
|
"fmt"
|
2023-05-15 14:32:23 +02:00
|
|
|
"math"
|
2022-09-05 17:46:38 +02:00
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
2023-03-27 14:41:00 +02:00
|
|
|
"strings"
|
2023-05-15 14:32:23 +02:00
|
|
|
"text/tabwriter"
|
2022-09-05 17:46:38 +02:00
|
|
|
"time"
|
|
|
|
|
2022-09-13 15:22:20 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
2023-05-12 15:09:39 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/util"
|
2022-09-05 17:46:38 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
2023-03-31 15:28:35 +02:00
|
|
|
"github.com/santhosh-tekuri/jsonschema/v5"
|
2022-09-05 17:46:38 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type FsArchiveConfig struct {
|
2022-09-06 14:40:14 +02:00
|
|
|
Path string `json:"path"`
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type FsArchive struct {
|
2022-09-06 08:57:38 +02:00
|
|
|
path string
|
|
|
|
clusters []string
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2023-06-10 07:49:02 +02:00
|
|
|
type clusterInfo struct {
|
|
|
|
numJobs int
|
|
|
|
dateFirst int64
|
|
|
|
dateLast int64
|
|
|
|
diskSize float64
|
|
|
|
}
|
|
|
|
|
2023-05-09 16:33:26 +02:00
|
|
|
func getDirectory(
|
2022-09-07 12:24:45 +02:00
|
|
|
job *schema.Job,
|
|
|
|
rootPath string,
|
2023-05-09 16:33:26 +02:00
|
|
|
) string {
|
2022-09-05 17:46:38 +02:00
|
|
|
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
|
2023-05-09 16:33:26 +02:00
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
return filepath.Join(
|
|
|
|
rootPath,
|
|
|
|
job.Cluster,
|
|
|
|
lvl1, lvl2,
|
2023-05-09 16:33:26 +02:00
|
|
|
strconv.FormatInt(job.StartTime.Unix(), 10))
|
|
|
|
}
|
|
|
|
|
|
|
|
func getPath(
|
|
|
|
job *schema.Job,
|
|
|
|
rootPath string,
|
|
|
|
file string) string {
|
|
|
|
|
|
|
|
return filepath.Join(
|
|
|
|
getDirectory(job, rootPath), file)
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2022-09-13 07:37:48 +02:00
|
|
|
func loadJobMeta(filename string) (*schema.JobMeta, error) {
|
2022-09-05 17:46:38 +02:00
|
|
|
|
2023-03-31 11:25:12 +02:00
|
|
|
b, err := os.ReadFile(filename)
|
2022-09-05 17:46:38 +02:00
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Errorf("loadJobMeta() > open file error: %v", err)
|
2022-09-13 07:37:48 +02:00
|
|
|
return &schema.JobMeta{}, err
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
2023-03-31 11:25:12 +02:00
|
|
|
if config.Keys.Validate {
|
|
|
|
if err := schema.Validate(schema.Meta, bytes.NewReader(b)); err != nil {
|
2023-03-31 12:33:14 +02:00
|
|
|
return &schema.JobMeta{}, fmt.Errorf("validate job meta: %v", err)
|
2023-03-31 11:25:12 +02:00
|
|
|
}
|
|
|
|
}
|
2022-09-05 17:46:38 +02:00
|
|
|
|
2023-03-31 11:25:12 +02:00
|
|
|
return DecodeJobMeta(bytes.NewReader(b))
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2023-03-31 15:28:35 +02:00
|
|
|
func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
|
|
|
|
f, err := os.Open(filename)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("fsBackend LoadJobData()- %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-05-11 16:17:17 +02:00
|
|
|
defer f.Close()
|
2023-03-31 15:28:35 +02:00
|
|
|
|
|
|
|
if isCompressed {
|
|
|
|
r, err := gzip.NewReader(f)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf(" %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer r.Close()
|
|
|
|
|
|
|
|
if config.Keys.Validate {
|
|
|
|
if err := schema.Validate(schema.Data, r); err != nil {
|
|
|
|
return schema.JobData{}, fmt.Errorf("validate job data: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return DecodeJobData(r, filename)
|
|
|
|
} else {
|
|
|
|
if config.Keys.Validate {
|
|
|
|
if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil {
|
|
|
|
return schema.JobData{}, fmt.Errorf("validate job data: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return DecodeJobData(bufio.NewReader(f), filename)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-12 10:43:46 +02:00
|
|
|
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
|
2022-09-07 12:24:45 +02:00
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
var config FsArchiveConfig
|
|
|
|
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warnf("Init() > Unmarshal error: %#v", err)
|
2023-03-27 13:24:06 +02:00
|
|
|
return 0, err
|
2022-09-11 07:09:10 +02:00
|
|
|
}
|
|
|
|
if config.Path == "" {
|
2023-02-01 11:58:27 +01:00
|
|
|
err := fmt.Errorf("Init() : empty config.Path")
|
|
|
|
log.Errorf("Init() > config.Path error: %v", err)
|
2023-03-27 13:24:06 +02:00
|
|
|
return 0, err
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
fsa.path = config.Path
|
2022-09-06 08:57:38 +02:00
|
|
|
|
2023-03-27 14:41:00 +02:00
|
|
|
b, err := os.ReadFile(filepath.Join(fsa.path, "version.txt"))
|
2023-03-27 13:24:06 +02:00
|
|
|
if err != nil {
|
2023-06-10 07:49:02 +02:00
|
|
|
log.Warnf("fsBackend Init() - %v", err)
|
2023-03-27 13:24:06 +02:00
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2023-04-12 10:43:46 +02:00
|
|
|
version, err := strconv.ParseUint(strings.TrimSuffix(string(b), "\n"), 10, 64)
|
2023-03-27 13:24:06 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("fsBackend Init()- %v", err)
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if version != Version {
|
2023-03-27 14:41:00 +02:00
|
|
|
return version, fmt.Errorf("unsupported version %d, need %d", version, Version)
|
2023-03-27 13:24:06 +02:00
|
|
|
}
|
|
|
|
|
2022-09-06 08:57:38 +02:00
|
|
|
entries, err := os.ReadDir(fsa.path)
|
|
|
|
if err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Errorf("Init() > ReadDir() error: %v", err)
|
2023-03-27 13:24:06 +02:00
|
|
|
return 0, err
|
2022-09-06 08:57:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, de := range entries {
|
2023-03-27 14:41:00 +02:00
|
|
|
if !de.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
2022-09-06 08:57:38 +02:00
|
|
|
fsa.clusters = append(fsa.clusters, de.Name())
|
|
|
|
}
|
|
|
|
|
2023-03-27 13:24:06 +02:00
|
|
|
return version, nil
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2023-05-15 14:32:23 +02:00
|
|
|
func (fsa *FsArchive) Info() {
|
|
|
|
fmt.Printf("Job archive %s\n", fsa.path)
|
|
|
|
clusters, err := os.ReadDir(fsa.path)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Reading clusters failed: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
ci := make(map[string]*clusterInfo)
|
|
|
|
|
|
|
|
for _, cluster := range clusters {
|
|
|
|
if !cluster.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
cc := cluster.Name()
|
|
|
|
ci[cc] = &clusterInfo{dateFirst: time.Now().Unix()}
|
|
|
|
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, lvl1Dir := range lvl1Dirs {
|
|
|
|
if !lvl1Dir.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, lvl2Dir := range lvl2Dirs {
|
|
|
|
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
|
|
|
startTimeDirs, err := os.ReadDir(dirpath)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, startTimeDir := range startTimeDirs {
|
|
|
|
if startTimeDir.IsDir() {
|
|
|
|
ci[cc].numJobs++
|
|
|
|
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Cannot parse starttime: %s", err.Error())
|
|
|
|
}
|
|
|
|
ci[cc].dateFirst = util.Min(ci[cc].dateFirst, startTime)
|
|
|
|
ci[cc].dateLast = util.Max(ci[cc].dateLast, startTime)
|
|
|
|
ci[cc].diskSize += util.DiskUsage(filepath.Join(dirpath, startTimeDir.Name()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cit := clusterInfo{dateFirst: time.Now().Unix()}
|
|
|
|
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', tabwriter.Debug)
|
|
|
|
fmt.Fprintln(w, "cluster\t#jobs\tfrom\tto\tdu (MB)")
|
|
|
|
for cluster, clusterInfo := range ci {
|
|
|
|
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%.2f\n", cluster,
|
|
|
|
clusterInfo.numJobs,
|
|
|
|
time.Unix(clusterInfo.dateFirst, 0),
|
|
|
|
time.Unix(clusterInfo.dateLast, 0),
|
|
|
|
clusterInfo.diskSize)
|
|
|
|
|
|
|
|
cit.numJobs += clusterInfo.numJobs
|
|
|
|
cit.dateFirst = util.Min(cit.dateFirst, clusterInfo.dateFirst)
|
|
|
|
cit.dateLast = util.Max(cit.dateLast, clusterInfo.dateLast)
|
|
|
|
cit.diskSize += clusterInfo.diskSize
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Fprintf(w, "TOTAL\t%d\t%s\t%s\t%.2f\n",
|
|
|
|
cit.numJobs, time.Unix(cit.dateFirst, 0), time.Unix(cit.dateLast, 0), cit.diskSize)
|
|
|
|
w.Flush()
|
|
|
|
}
|
|
|
|
|
2023-05-11 16:17:17 +02:00
|
|
|
func (fsa *FsArchive) Exists(job *schema.Job) bool {
|
|
|
|
dir := getDirectory(job, fsa.path)
|
|
|
|
_, err := os.Stat(dir)
|
|
|
|
return !errors.Is(err, os.ErrNotExist)
|
|
|
|
}
|
|
|
|
|
2023-05-15 14:32:23 +02:00
|
|
|
func (fsa *FsArchive) Clean(before int64, after int64) {
|
|
|
|
|
|
|
|
if after == 0 {
|
|
|
|
after = math.MaxInt64
|
|
|
|
}
|
|
|
|
|
|
|
|
clusters, err := os.ReadDir(fsa.path)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Reading clusters failed: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, cluster := range clusters {
|
|
|
|
if !cluster.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, lvl1Dir := range lvl1Dirs {
|
|
|
|
if !lvl1Dir.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, lvl2Dir := range lvl2Dirs {
|
|
|
|
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
|
|
|
startTimeDirs, err := os.ReadDir(dirpath)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, startTimeDir := range startTimeDirs {
|
|
|
|
if startTimeDir.IsDir() {
|
|
|
|
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Cannot parse starttime: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if startTime < before || startTime > after {
|
|
|
|
if err := os.RemoveAll(filepath.Join(dirpath, startTimeDir.Name())); err != nil {
|
|
|
|
log.Errorf("JobArchive Cleanup() error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if util.GetFilecount(dirpath) == 0 {
|
|
|
|
if err := os.Remove(dirpath); err != nil {
|
|
|
|
log.Errorf("JobArchive Clean() error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-15 16:57:31 +02:00
|
|
|
func (fsa *FsArchive) Move(jobs []*schema.Job, path string) {
|
|
|
|
for _, job := range jobs {
|
|
|
|
source := getDirectory(job, fsa.path)
|
|
|
|
target := getDirectory(job, path)
|
|
|
|
|
|
|
|
if err := os.MkdirAll(filepath.Clean(filepath.Join(target, "..")), 0777); err != nil {
|
|
|
|
log.Errorf("JobArchive Move MkDir error: %v", err)
|
|
|
|
}
|
|
|
|
if err := os.Rename(source, target); err != nil {
|
|
|
|
log.Errorf("JobArchive Move() error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
parent := filepath.Clean(filepath.Join(source, ".."))
|
|
|
|
if util.GetFilecount(parent) == 0 {
|
|
|
|
if err := os.Remove(parent); err != nil {
|
|
|
|
log.Errorf("JobArchive Move() error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-09 16:33:26 +02:00
|
|
|
func (fsa *FsArchive) CleanUp(jobs []*schema.Job) {
|
2023-06-10 07:49:02 +02:00
|
|
|
start := time.Now()
|
2023-05-09 16:33:26 +02:00
|
|
|
for _, job := range jobs {
|
|
|
|
dir := getDirectory(job, fsa.path)
|
|
|
|
if err := os.RemoveAll(dir); err != nil {
|
|
|
|
log.Errorf("JobArchive Cleanup() error: %v", err)
|
|
|
|
}
|
2023-05-12 15:09:39 +02:00
|
|
|
|
|
|
|
parent := filepath.Clean(filepath.Join(dir, ".."))
|
|
|
|
if util.GetFilecount(parent) == 0 {
|
|
|
|
if err := os.Remove(parent); err != nil {
|
|
|
|
log.Errorf("JobArchive Cleanup() error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2023-05-09 16:33:26 +02:00
|
|
|
}
|
2023-06-10 07:49:02 +02:00
|
|
|
|
|
|
|
log.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start))
|
2023-05-09 16:33:26 +02:00
|
|
|
}
|
2023-04-18 07:43:21 +02:00
|
|
|
|
2023-05-09 16:33:26 +02:00
|
|
|
func (fsa *FsArchive) Compress(jobs []*schema.Job) {
|
2023-06-10 07:49:02 +02:00
|
|
|
var cnt int
|
|
|
|
start := time.Now()
|
|
|
|
|
2023-05-09 16:33:26 +02:00
|
|
|
for _, job := range jobs {
|
|
|
|
fileIn := getPath(job, fsa.path, "data.json")
|
2023-06-28 07:39:39 +02:00
|
|
|
if util.CheckFileExists(fileIn) && util.GetFilesize(fileIn) > 2000 {
|
2023-05-12 15:09:39 +02:00
|
|
|
util.CompressFile(fileIn, getPath(job, fsa.path, "data.json.gz"))
|
2023-06-10 07:49:02 +02:00
|
|
|
cnt++
|
2023-05-09 16:33:26 +02:00
|
|
|
}
|
|
|
|
}
|
2023-06-10 07:49:02 +02:00
|
|
|
|
|
|
|
log.Infof("Compression Service - %d files took %s", cnt, time.Since(start))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fsa *FsArchive) CompressLast(starttime int64) int64 {
|
|
|
|
|
|
|
|
filename := filepath.Join(fsa.path, "compress.txt")
|
|
|
|
b, err := os.ReadFile(filename)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("fsBackend Compress - %v", err)
|
2023-06-14 07:31:43 +02:00
|
|
|
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
|
2023-06-10 07:49:02 +02:00
|
|
|
return starttime
|
|
|
|
}
|
|
|
|
last, err := strconv.ParseInt(strings.TrimSuffix(string(b), "\n"), 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("fsBackend Compress - %v", err)
|
|
|
|
return starttime
|
|
|
|
}
|
|
|
|
|
2023-06-27 14:29:56 +02:00
|
|
|
log.Infof("fsBackend Compress - start %d last %d", starttime, last)
|
2023-06-10 07:49:02 +02:00
|
|
|
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
|
|
|
|
return last
|
2023-04-18 07:43:21 +02:00
|
|
|
}
|
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
|
2023-05-11 16:17:17 +02:00
|
|
|
var isCompressed bool = true
|
2023-03-27 11:11:14 +02:00
|
|
|
filename := getPath(job, fsa.path, "data.json.gz")
|
2023-05-12 15:09:39 +02:00
|
|
|
|
|
|
|
if !util.CheckFileExists(filename) {
|
2023-03-27 11:11:14 +02:00
|
|
|
filename = getPath(job, fsa.path, "data.json")
|
|
|
|
isCompressed = false
|
|
|
|
}
|
|
|
|
|
2023-03-31 15:28:35 +02:00
|
|
|
return loadJobData(filename, isCompressed)
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2022-09-13 07:37:48 +02:00
|
|
|
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
|
2022-09-05 17:46:38 +02:00
|
|
|
filename := getPath(job, fsa.path, "meta.json")
|
2022-09-11 07:09:10 +02:00
|
|
|
return loadJobMeta(filename)
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2022-09-13 07:37:48 +02:00
|
|
|
func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
|
2022-09-07 12:24:45 +02:00
|
|
|
|
2022-09-13 15:22:20 +02:00
|
|
|
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
|
2022-09-05 17:46:38 +02:00
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Errorf("LoadClusterCfg() > open file error: %v", err)
|
2023-04-07 08:57:42 +02:00
|
|
|
// if config.Keys.Validate {
|
2022-09-13 15:22:20 +02:00
|
|
|
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warnf("Validate cluster config: %v\n", err)
|
2023-04-07 08:57:42 +02:00
|
|
|
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
|
2022-09-13 15:22:20 +02:00
|
|
|
}
|
|
|
|
}
|
2023-04-06 18:09:36 +02:00
|
|
|
// }
|
2022-09-13 15:22:20 +02:00
|
|
|
return DecodeCluster(bytes.NewReader(b))
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2023-03-31 15:28:35 +02:00
|
|
|
func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
2022-09-07 12:24:45 +02:00
|
|
|
|
2023-03-31 15:28:35 +02:00
|
|
|
ch := make(chan JobContainer)
|
2022-09-05 17:46:38 +02:00
|
|
|
go func() {
|
|
|
|
clustersDir, err := os.ReadDir(fsa.path)
|
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, clusterDir := range clustersDir {
|
2023-03-27 14:41:00 +02:00
|
|
|
if !clusterDir.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
2022-09-05 17:46:38 +02:00
|
|
|
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
|
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, lvl1Dir := range lvl1Dirs {
|
|
|
|
if !lvl1Dir.IsDir() {
|
|
|
|
// Could be the cluster.json file
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
|
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, lvl2Dir := range lvl2Dirs {
|
|
|
|
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
|
|
|
startTimeDirs, err := os.ReadDir(dirpath)
|
|
|
|
if err != nil {
|
2023-01-23 18:48:06 +01:00
|
|
|
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, startTimeDir := range startTimeDirs {
|
|
|
|
if startTimeDir.IsDir() {
|
2023-04-11 16:26:09 +02:00
|
|
|
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
|
2023-03-31 15:28:35 +02:00
|
|
|
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
2022-09-05 17:46:38 +02:00
|
|
|
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
2023-03-31 15:28:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if loadMetricData {
|
2023-05-11 16:17:17 +02:00
|
|
|
var isCompressed bool = true
|
2023-03-31 15:28:35 +02:00
|
|
|
filename := filepath.Join(dirpath, startTimeDir.Name(), "data.json.gz")
|
|
|
|
|
2023-05-12 15:09:39 +02:00
|
|
|
if !util.CheckFileExists(filename) {
|
2023-03-31 15:28:35 +02:00
|
|
|
filename = filepath.Join(dirpath, startTimeDir.Name(), "data.json")
|
|
|
|
isCompressed = false
|
|
|
|
}
|
|
|
|
|
|
|
|
data, err := loadJobData(filename, isCompressed)
|
|
|
|
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
|
|
|
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
|
|
|
}
|
|
|
|
ch <- JobContainer{Meta: job, Data: &data}
|
2023-04-07 08:57:42 +02:00
|
|
|
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
2022-09-05 17:46:38 +02:00
|
|
|
} else {
|
2023-03-31 15:28:35 +02:00
|
|
|
ch <- JobContainer{Meta: job, Data: nil}
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-09-11 07:09:10 +02:00
|
|
|
close(ch)
|
2022-09-05 17:46:38 +02:00
|
|
|
}()
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
2022-09-13 07:37:48 +02:00
|
|
|
func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error {
|
2022-09-05 17:46:38 +02:00
|
|
|
|
|
|
|
job := schema.Job{
|
|
|
|
BaseJob: jobMeta.BaseJob,
|
|
|
|
StartTime: time.Unix(jobMeta.StartTime, 0),
|
|
|
|
StartTimeUnix: jobMeta.StartTime,
|
|
|
|
}
|
|
|
|
f, err := os.Create(getPath(&job, fsa.path, "meta.json"))
|
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while creating filepath for meta.json")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while encoding job metadata to meta.json file")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := f.Close(); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while closing meta.json file")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-09-06 08:57:38 +02:00
|
|
|
func (fsa *FsArchive) GetClusters() []string {
|
|
|
|
return fsa.clusters
|
|
|
|
}
|
|
|
|
|
2022-09-13 07:37:48 +02:00
|
|
|
func (fsa *FsArchive) ImportJob(
|
2022-09-07 12:24:45 +02:00
|
|
|
jobMeta *schema.JobMeta,
|
|
|
|
jobData *schema.JobData) error {
|
2022-09-05 17:46:38 +02:00
|
|
|
|
|
|
|
job := schema.Job{
|
|
|
|
BaseJob: jobMeta.BaseJob,
|
|
|
|
StartTime: time.Unix(jobMeta.StartTime, 0),
|
|
|
|
StartTimeUnix: jobMeta.StartTime,
|
|
|
|
}
|
|
|
|
dir := getPath(&job, fsa.path, "")
|
|
|
|
if err := os.MkdirAll(dir, 0777); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while creating job archive path")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := os.Create(path.Join(dir, "meta.json"))
|
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while creating filepath for meta.json")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while encoding job metadata to meta.json file")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := f.Close(); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while closing meta.json file")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-03-27 11:11:14 +02:00
|
|
|
// var isCompressed bool = true
|
|
|
|
// // TODO Use shortJob Config for check
|
|
|
|
// if jobMeta.Duration < 300 {
|
|
|
|
// isCompressed = false
|
|
|
|
// f, err = os.Create(path.Join(dir, "data.json"))
|
|
|
|
// } else {
|
|
|
|
// f, err = os.Create(path.Join(dir, "data.json.gz"))
|
|
|
|
// }
|
|
|
|
// if err != nil {
|
|
|
|
// return err
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// if isCompressed {
|
|
|
|
// if err := EncodeJobData(gzip.NewWriter(f), jobData); err != nil {
|
|
|
|
// return err
|
|
|
|
// }
|
|
|
|
// } else {
|
|
|
|
// if err := EncodeJobData(f, jobData); err != nil {
|
|
|
|
// return err
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
|
2022-09-05 17:46:38 +02:00
|
|
|
f, err = os.Create(path.Join(dir, "data.json"))
|
2023-03-27 14:41:00 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while creating filepath for data.json")
|
2023-03-27 14:41:00 +02:00
|
|
|
return err
|
|
|
|
}
|
2022-09-05 17:46:38 +02:00
|
|
|
if err := EncodeJobData(f, jobData); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Error("Error while encoding job metricdata to data.json file")
|
2022-09-05 17:46:38 +02:00
|
|
|
return err
|
|
|
|
}
|
2023-01-31 18:28:44 +01:00
|
|
|
if err := f.Close(); err != nil {
|
2023-02-01 11:58:27 +01:00
|
|
|
log.Warn("Error while closing data.json file")
|
2023-04-07 08:57:42 +02:00
|
|
|
}
|
2023-04-12 07:30:41 +02:00
|
|
|
return err
|
2022-09-05 17:46:38 +02:00
|
|
|
}
|