Add S3 loadJobMeta

This commit is contained in:
2024-01-25 15:43:06 +01:00
parent 578a270932
commit 3bb5ae696a
6 changed files with 267 additions and 89 deletions

View File

@@ -59,19 +59,13 @@ func getDirectory(
func getPath(
job *schema.Job,
rootPath string,
file string) string {
file string,
) string {
return filepath.Join(
getDirectory(job, rootPath), file)
}
func loadJobMeta(filename string) (*schema.JobMeta, error) {
b, err := os.ReadFile(filename)
if err != nil {
log.Errorf("loadJobMeta() > open file error: %v", err)
return &schema.JobMeta{}, err
}
func loadJobMeta(b []byte) (*schema.JobMeta, error) {
if config.Keys.Validate {
if err := schema.Validate(schema.Meta, bytes.NewReader(b)); err != nil {
return &schema.JobMeta{}, fmt.Errorf("validate job meta: %v", err)
@@ -83,7 +77,6 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend LoadJobData()- %v", err)
return nil, err
@@ -116,7 +109,6 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
}
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Warnf("Init() > Unmarshal error: %#v", err)
@@ -242,7 +234,6 @@ func (fsa *FsArchive) Exists(job *schema.Job) bool {
}
func (fsa *FsArchive) Clean(before int64, after int64) {
if after == 0 {
after = math.MaxInt64
}
@@ -358,7 +349,6 @@ func (fsa *FsArchive) Compress(jobs []*schema.Job) {
}
func (fsa *FsArchive) CompressLast(starttime int64) int64 {
filename := filepath.Join(fsa.path, "compress.txt")
b, err := os.ReadFile(filename)
if err != nil {
@@ -391,11 +381,15 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
filename := getPath(job, fsa.path, "meta.json")
return loadJobMeta(filename)
b, err := os.ReadFile(filename)
if err != nil {
log.Errorf("loadJobMeta() > open file error: %v", err)
return &schema.JobMeta{}, err
}
return loadJobMeta(b)
}
func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil {
log.Errorf("LoadClusterCfg() > open file error: %v", err)
@@ -410,7 +404,6 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
}
func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
ch := make(chan JobContainer)
go func() {
clustersDir, err := os.ReadDir(fsa.path)
@@ -447,7 +440,11 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
b, err := os.ReadFile(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
if err != nil {
log.Errorf("loadJobMeta() > open file error: %v", err)
}
job, err := loadJobMeta(b)
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
}
@@ -481,7 +478,6 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
}
func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error {
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),
@@ -510,8 +506,8 @@ func (fsa *FsArchive) GetClusters() []string {
func (fsa *FsArchive) ImportJob(
jobMeta *schema.JobMeta,
jobData *schema.JobData) error {
jobData *schema.JobData,
) error {
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),

View File

@@ -7,6 +7,7 @@ package archive
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
"time"
@@ -30,6 +31,7 @@ func TestInitNoJson(t *testing.T) {
t.Fatal(err)
}
}
func TestInitNotExists(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/job-archive\"}"))
@@ -61,8 +63,12 @@ func TestLoadJobMetaInternal(t *testing.T) {
if err != nil {
t.Fatal(err)
}
b, err := os.ReadFile("testdata/archive/emmy/1404/397/1609300556/meta.json")
if err != nil {
t.Fatalf("loadJobMeta() > open file error: %v", err)
}
job, err := loadJobMeta("testdata/archive/emmy/1404/397/1609300556/meta.json")
job, err := loadJobMeta(b)
if err != nil {
t.Fatal(err)
}
@@ -133,7 +139,6 @@ func TestLoadJobData(t *testing.T) {
}
func BenchmarkLoadJobData(b *testing.B) {
tmpdir := b.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
util.CopyDir("./testdata/archive/", jobarchive)
@@ -157,7 +162,6 @@ func BenchmarkLoadJobData(b *testing.B) {
}
func BenchmarkLoadJobDataCompressed(b *testing.B) {
tmpdir := b.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
util.CopyDir("./testdata/archive/", jobarchive)

View File

@@ -1,11 +1,17 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
import (
"context"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
@@ -17,60 +23,171 @@ type S3ArchiveConfig struct {
Endpoint string `json:"endpoint"`
AccessKeyID string `json:"accessKeyID"`
SecretAccessKey string `json:"secretAccessKey"`
Bucket string `json:"bucket"`
UseSSL bool `json:"useSSL"`
}
type S3Archive struct {
path string
client *minio.Client
bucket string
clusters []string
}
func (s3a *S3Archive) Init(rawConfig json.RawMessage) (uint64, error) {
var config S3ArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
var err error
if err = json.Unmarshal(rawConfig, &config); err != nil {
log.Warnf("Init() > Unmarshal error: %#v", err)
return 0, err
}
client, err := minio.New(config.Endpoint, &minio.Options{
fmt.Printf("Endpoint: %s Bucket: %s\n", config.Endpoint, config.Bucket)
s3a.client, err = minio.New(config.Endpoint, &minio.Options{
Creds: credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, ""),
Secure: config.UseSSL,
})
if err != nil {
log.Fatalln(err)
err = fmt.Errorf("Init() : Initialize minio client failed")
return 0, err
}
return 0, err
s3a.bucket = config.Bucket
found, err := s3a.client.BucketExists(context.Background(), s3a.bucket)
if err != nil {
err = fmt.Errorf("Init() : %v", err)
return 0, err
}
if found {
log.Infof("Bucket found.")
} else {
log.Infof("Bucket not found.")
}
r, err := s3a.client.GetObject(context.Background(),
s3a.bucket, "version.txt", minio.GetObjectOptions{})
if err != nil {
err = fmt.Errorf("Init() : Get version object failed")
return 0, err
}
defer r.Close()
b, err := io.ReadAll(r)
if err != nil {
log.Errorf("Init() : %v", err)
return 0, err
}
version, err := strconv.ParseUint(strings.TrimSuffix(string(b), "\n"), 10, 64)
if err != nil {
log.Errorf("Init() : %v", err)
return 0, err
}
if version != Version {
return 0, fmt.Errorf("unsupported version %d, need %d", version, Version)
}
for object := range s3a.client.ListObjects(
context.Background(),
s3a.bucket, minio.ListObjectsOptions{
Recursive: false,
}) {
if object.Err != nil {
log.Errorf("listObject: %v", object.Err)
}
if strings.HasSuffix(object.Key, "/") {
s3a.clusters = append(s3a.clusters, strings.TrimSuffix(object.Key, "/"))
}
}
return version, err
}
func (s3a *S3Archive) Info() {
fmt.Printf("Job archive %s\n", s3a.bucket)
var clusters []string
for object := range s3a.client.ListObjects(
context.Background(),
s3a.bucket, minio.ListObjectsOptions{
Recursive: false,
}) {
if object.Err != nil {
log.Errorf("listObject: %v", object.Err)
}
if strings.HasSuffix(object.Key, "/") {
clusters = append(clusters, object.Key)
}
}
ci := make(map[string]*clusterInfo)
for _, cluster := range clusters {
ci[cluster] = &clusterInfo{dateFirst: time.Now().Unix()}
for d := range s3a.client.ListObjects(
context.Background(),
s3a.bucket, minio.ListObjectsOptions{
Recursive: true,
Prefix: cluster,
}) {
log.Errorf("%s", d.Key)
ci[cluster].diskSize += (float64(d.Size) * 1e-6)
}
}
}
func (s3a *S3Archive) Exists(job *schema.Job) bool {
}
// func (s3a *S3Archive) Exists(job *schema.Job) bool {
// return true
// }
func (s3a *S3Archive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
filename := getPath(job, "/", "meta.json")
log.Infof("Init() : %s", filename)
r, err := s3a.client.GetObject(context.Background(),
s3a.bucket, filename, minio.GetObjectOptions{})
if err != nil {
err = fmt.Errorf("Init() : Get version object failed")
return nil, err
}
b, err := io.ReadAll(r)
if err != nil {
log.Errorf("Init() : %v", err)
return nil, err
}
return loadJobMeta(b)
}
func (s3a *S3Archive) LoadJobData(job *schema.Job) (schema.JobData, error) {
var err error
return schema.JobData{}, err
}
func (s3a *S3Archive) LoadClusterCfg(name string) (*schema.Cluster, error) {
// func (s3a *S3Archive) LoadClusterCfg(name string) (*schema.Cluster, error) {
// var err error
// return &schema.Cluster{}, err
// }
//
// func (s3a *S3Archive) StoreJobMeta(jobMeta *schema.JobMeta) error
func (s3a *S3Archive) ImportJob(jobMeta *schema.JobMeta, jobData *schema.JobData) error {
var err error
return err
}
func (s3a *S3Archive) StoreJobMeta(jobMeta *schema.JobMeta) error
func (s3a *S3Archive) ImportJob(jobMeta *schema.JobMeta, jobData *schema.JobData) error
func (s3a *S3Archive) GetClusters() []string
func (s3a *S3Archive) CleanUp(jobs []*schema.Job)
func (s3a *S3Archive) Move(jobs []*schema.Job, path string)
func (s3a *S3Archive) Clean(before int64, after int64)
func (s3a *S3Archive) Compress(jobs []*schema.Job)
func (s3a *S3Archive) CompressLast(starttime int64) int64
func (s3a *S3Archive) Iter(loadMetricData bool) <-chan JobContainer
//
// func (s3a *S3Archive) GetClusters() []string
//
// func (s3a *S3Archive) CleanUp(jobs []*schema.Job)
//
// func (s3a *S3Archive) Move(jobs []*schema.Job, path string)
//
// func (s3a *S3Archive) Clean(before int64, after int64)
//
// func (s3a *S3Archive) Compress(jobs []*schema.Job)
//
// func (s3a *S3Archive) CompressLast(starttime int64) int64
//
// func (s3a *S3Archive) Iter(loadMetricData bool) <-chan JobContainer

View File

@@ -0,0 +1,59 @@
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
import (
"encoding/json"
"testing"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
func TestS3Init(t *testing.T) {
var s3a S3Archive
version, err := s3a.Init(json.RawMessage("{\"endpoint\":\"192.168.1.10:9100\",\"accessKeyID\":\"uACSaCN2Chiotpnr4bBS\",\"secretAccessKey\":\"MkEbBsFvMii1K5GreUriTJZxH359B1n28Au9Kaml\",\"bucket\":\"cc-archive\",\"useSSL\":false}"))
if err != nil {
t.Fatal(err)
}
if s3a.bucket != "cc-archive" {
t.Errorf("S3 bucket \ngot: %s \nwant: cc-archive", s3a.bucket)
}
if version != 1 {
t.Errorf("S3 archive version \ngot: %d \nwant: 1", version)
t.Fail()
}
if len(s3a.clusters) != 2 || s3a.clusters[0] != "alex" {
t.Fail()
}
}
func TestS3LoadJobMeta(t *testing.T) {
var s3a S3Archive
_, err := s3a.Init(json.RawMessage("{\"endpoint\":\"192.168.1.10:9100\",\"accessKeyID\":\"uACSaCN2Chiotpnr4bBS\",\"secretAccessKey\":\"MkEbBsFvMii1K5GreUriTJZxH359B1n28Au9Kaml\",\"bucket\":\"cc-archive\",\"useSSL\":false}"))
if err != nil {
t.Fatal(err)
}
jobIn := schema.Job{BaseJob: schema.JobDefaults}
jobIn.StartTime = time.Unix(1675954353, 0)
jobIn.JobID = 398764
jobIn.Cluster = "fritz"
job, err := s3a.LoadJobMeta(&jobIn)
if err != nil {
t.Fatal(err)
}
if job.JobID != 398764 {
t.Fail()
}
if int(job.NumNodes) != len(job.Resources) {
t.Fail()
}
if job.StartTime != 1675954353 {
t.Fail()
}
}