mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-01-15 09:11:45 +01:00
Reformat with gofumpt
This commit is contained in:
@@ -561,7 +561,6 @@ func (r *NodeRepository) GetNodesForList(
|
|||||||
nodeFilter string,
|
nodeFilter string,
|
||||||
page *model.PageRequest,
|
page *model.PageRequest,
|
||||||
) ([]string, map[string]string, int, bool, error) {
|
) ([]string, map[string]string, int, bool, error) {
|
||||||
|
|
||||||
// Init Return Vars
|
// Init Return Vars
|
||||||
nodes := make([]string, 0)
|
nodes := make([]string, 0)
|
||||||
stateMap := make(map[string]string)
|
stateMap := make(map[string]string)
|
||||||
|
|||||||
@@ -188,7 +188,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
|
|||||||
if isEmpty {
|
if isEmpty {
|
||||||
cclog.Infof("fsBackend Init() > Bootstrapping new archive at %s", fsa.path)
|
cclog.Infof("fsBackend Init() > Bootstrapping new archive at %s", fsa.path)
|
||||||
versionStr := fmt.Sprintf("%d\n", Version)
|
versionStr := fmt.Sprintf("%d\n", Version)
|
||||||
if err := os.WriteFile(filepath.Join(fsa.path, "version.txt"), []byte(versionStr), 0644); err != nil {
|
if err := os.WriteFile(filepath.Join(fsa.path, "version.txt"), []byte(versionStr), 0o644); err != nil {
|
||||||
cclog.Errorf("fsBackend Init() > failed to create version.txt: %v", err)
|
cclog.Errorf("fsBackend Init() > failed to create version.txt: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -674,7 +674,7 @@ func (fsa *FsArchive) ImportJob(
|
|||||||
|
|
||||||
func (fsa *FsArchive) StoreClusterCfg(name string, config *schema.Cluster) error {
|
func (fsa *FsArchive) StoreClusterCfg(name string, config *schema.Cluster) error {
|
||||||
dir := filepath.Join(fsa.path, name)
|
dir := filepath.Join(fsa.path, name)
|
||||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
if err := os.MkdirAll(dir, 0o777); err != nil {
|
||||||
cclog.Errorf("StoreClusterCfg() > mkdir error: %v", err)
|
cclog.Errorf("StoreClusterCfg() > mkdir error: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ func (m *MockS3Client) GetObject(ctx context.Context, params *s3.GetObjectInput,
|
|||||||
if !exists {
|
if !exists {
|
||||||
return nil, fmt.Errorf("NoSuchKey: object not found")
|
return nil, fmt.Errorf("NoSuchKey: object not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
contentLength := int64(len(data))
|
contentLength := int64(len(data))
|
||||||
return &s3.GetObjectOutput{
|
return &s3.GetObjectOutput{
|
||||||
Body: io.NopCloser(bytes.NewReader(data)),
|
Body: io.NopCloser(bytes.NewReader(data)),
|
||||||
@@ -65,7 +65,7 @@ func (m *MockS3Client) HeadObject(ctx context.Context, params *s3.HeadObjectInpu
|
|||||||
if !exists {
|
if !exists {
|
||||||
return nil, fmt.Errorf("NotFound")
|
return nil, fmt.Errorf("NotFound")
|
||||||
}
|
}
|
||||||
|
|
||||||
contentLength := int64(len(data))
|
contentLength := int64(len(data))
|
||||||
return &s3.HeadObjectOutput{
|
return &s3.HeadObjectOutput{
|
||||||
ContentLength: &contentLength,
|
ContentLength: &contentLength,
|
||||||
@@ -86,12 +86,12 @@ func (m *MockS3Client) CopyObject(ctx context.Context, params *s3.CopyObjectInpu
|
|||||||
return nil, fmt.Errorf("invalid CopySource")
|
return nil, fmt.Errorf("invalid CopySource")
|
||||||
}
|
}
|
||||||
sourceKey := parts[1]
|
sourceKey := parts[1]
|
||||||
|
|
||||||
data, exists := m.objects[sourceKey]
|
data, exists := m.objects[sourceKey]
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, fmt.Errorf("source not found")
|
return nil, fmt.Errorf("source not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
destKey := aws.ToString(params.Key)
|
destKey := aws.ToString(params.Key)
|
||||||
m.objects[destKey] = data
|
m.objects[destKey] = data
|
||||||
return &s3.CopyObjectOutput{}, nil
|
return &s3.CopyObjectOutput{}, nil
|
||||||
@@ -100,15 +100,15 @@ func (m *MockS3Client) CopyObject(ctx context.Context, params *s3.CopyObjectInpu
|
|||||||
func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) {
|
func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) {
|
||||||
prefix := aws.ToString(params.Prefix)
|
prefix := aws.ToString(params.Prefix)
|
||||||
delimiter := aws.ToString(params.Delimiter)
|
delimiter := aws.ToString(params.Delimiter)
|
||||||
|
|
||||||
var contents []types.Object
|
var contents []types.Object
|
||||||
commonPrefixes := make(map[string]bool)
|
commonPrefixes := make(map[string]bool)
|
||||||
|
|
||||||
for key, data := range m.objects {
|
for key, data := range m.objects {
|
||||||
if !strings.HasPrefix(key, prefix) {
|
if !strings.HasPrefix(key, prefix) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if delimiter != "" {
|
if delimiter != "" {
|
||||||
// Check if there's a delimiter after the prefix
|
// Check if there's a delimiter after the prefix
|
||||||
remainder := strings.TrimPrefix(key, prefix)
|
remainder := strings.TrimPrefix(key, prefix)
|
||||||
@@ -120,21 +120,21 @@ func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjects
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size := int64(len(data))
|
size := int64(len(data))
|
||||||
contents = append(contents, types.Object{
|
contents = append(contents, types.Object{
|
||||||
Key: aws.String(key),
|
Key: aws.String(key),
|
||||||
Size: &size,
|
Size: &size,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var prefixList []types.CommonPrefix
|
var prefixList []types.CommonPrefix
|
||||||
for p := range commonPrefixes {
|
for p := range commonPrefixes {
|
||||||
prefixList = append(prefixList, types.CommonPrefix{
|
prefixList = append(prefixList, types.CommonPrefix{
|
||||||
Prefix: aws.String(p),
|
Prefix: aws.String(p),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return &s3.ListObjectsV2Output{
|
return &s3.ListObjectsV2Output{
|
||||||
Contents: contents,
|
Contents: contents,
|
||||||
CommonPrefixes: prefixList,
|
CommonPrefixes: prefixList,
|
||||||
@@ -144,10 +144,10 @@ func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjects
|
|||||||
// Test helper to create a mock S3 archive with test data
|
// Test helper to create a mock S3 archive with test data
|
||||||
func setupMockS3Archive(t *testing.T) *MockS3Client {
|
func setupMockS3Archive(t *testing.T) *MockS3Client {
|
||||||
mock := NewMockS3Client()
|
mock := NewMockS3Client()
|
||||||
|
|
||||||
// Add version.txt
|
// Add version.txt
|
||||||
mock.objects["version.txt"] = []byte("2\n")
|
mock.objects["version.txt"] = []byte("2\n")
|
||||||
|
|
||||||
// Add a test cluster directory
|
// Add a test cluster directory
|
||||||
mock.objects["emmy/cluster.json"] = []byte(`{
|
mock.objects["emmy/cluster.json"] = []byte(`{
|
||||||
"name": "emmy",
|
"name": "emmy",
|
||||||
@@ -165,7 +165,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client {
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
// Add a test job
|
// Add a test job
|
||||||
mock.objects["emmy/1403/244/1608923076/meta.json"] = []byte(`{
|
mock.objects["emmy/1403/244/1608923076/meta.json"] = []byte(`{
|
||||||
"jobId": 1403244,
|
"jobId": 1403244,
|
||||||
@@ -174,7 +174,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client {
|
|||||||
"numNodes": 1,
|
"numNodes": 1,
|
||||||
"resources": [{"hostname": "node001"}]
|
"resources": [{"hostname": "node001"}]
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
mock.objects["emmy/1403/244/1608923076/data.json"] = []byte(`{
|
mock.objects["emmy/1403/244/1608923076/data.json"] = []byte(`{
|
||||||
"mem_used": {
|
"mem_used": {
|
||||||
"node": {
|
"node": {
|
||||||
@@ -184,7 +184,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,7 +213,7 @@ func TestGetS3Key(t *testing.T) {
|
|||||||
Cluster: "emmy",
|
Cluster: "emmy",
|
||||||
StartTime: 1608923076,
|
StartTime: 1608923076,
|
||||||
}
|
}
|
||||||
|
|
||||||
key := getS3Key(job, "meta.json")
|
key := getS3Key(job, "meta.json")
|
||||||
expected := "emmy/1403/244/1608923076/meta.json"
|
expected := "emmy/1403/244/1608923076/meta.json"
|
||||||
if key != expected {
|
if key != expected {
|
||||||
@@ -227,7 +227,7 @@ func TestGetS3Directory(t *testing.T) {
|
|||||||
Cluster: "emmy",
|
Cluster: "emmy",
|
||||||
StartTime: 1608923076,
|
StartTime: 1608923076,
|
||||||
}
|
}
|
||||||
|
|
||||||
dir := getS3Directory(job)
|
dir := getS3Directory(job)
|
||||||
expected := "emmy/1403/244/1608923076/"
|
expected := "emmy/1403/244/1608923076/"
|
||||||
if dir != expected {
|
if dir != expected {
|
||||||
@@ -247,13 +247,13 @@ func TestS3ArchiveConfigParsing(t *testing.T) {
|
|||||||
"region": "us-east-1",
|
"region": "us-east-1",
|
||||||
"usePathStyle": true
|
"usePathStyle": true
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
var cfg S3ArchiveConfig
|
var cfg S3ArchiveConfig
|
||||||
err := json.Unmarshal(rawConfig, &cfg)
|
err := json.Unmarshal(rawConfig, &cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to parse config: %v", err)
|
t.Fatalf("failed to parse config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Bucket != "test-bucket" {
|
if cfg.Bucket != "test-bucket" {
|
||||||
t.Errorf("expected bucket 'test-bucket', got '%s'", cfg.Bucket)
|
t.Errorf("expected bucket 'test-bucket', got '%s'", cfg.Bucket)
|
||||||
}
|
}
|
||||||
@@ -277,14 +277,14 @@ func TestS3KeyGeneration(t *testing.T) {
|
|||||||
{1404397, "emmy", 1609300556, "data.json.gz", "emmy/1404/397/1609300556/data.json.gz"},
|
{1404397, "emmy", 1609300556, "data.json.gz", "emmy/1404/397/1609300556/data.json.gz"},
|
||||||
{42, "fritz", 1234567890, "meta.json", "fritz/0/042/1234567890/meta.json"},
|
{42, "fritz", 1234567890, "meta.json", "fritz/0/042/1234567890/meta.json"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
job := &schema.Job{
|
job := &schema.Job{
|
||||||
JobID: tt.jobID,
|
JobID: tt.jobID,
|
||||||
Cluster: tt.cluster,
|
Cluster: tt.cluster,
|
||||||
StartTime: tt.startTime,
|
StartTime: tt.startTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
key := getS3Key(job, tt.file)
|
key := getS3Key(job, tt.file)
|
||||||
if key != tt.expected {
|
if key != tt.expected {
|
||||||
t.Errorf("for job %d: expected %s, got %s", tt.jobID, tt.expected, key)
|
t.Errorf("for job %d: expected %s, got %s", tt.jobID, tt.expected, key)
|
||||||
|
|||||||
@@ -71,7 +71,6 @@ func countJobsNative(archivePath string) (int, error) {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to walk directory: %w", err)
|
return 0, fmt.Errorf("failed to walk directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,7 +70,6 @@ func main() {
|
|||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
migrated, failed, err := migrateArchive(archivePath, dryRun, numWorkers)
|
migrated, failed, err := migrateArchive(archivePath, dryRun, numWorkers)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Migration completed with errors: %s", err.Error())
|
cclog.Errorf("Migration completed with errors: %s", err.Error())
|
||||||
if failed > 0 {
|
if failed > 0 {
|
||||||
@@ -104,5 +103,5 @@ func checkVersion(archivePath string) error {
|
|||||||
|
|
||||||
func updateVersion(archivePath string) error {
|
func updateVersion(archivePath string) error {
|
||||||
versionFile := filepath.Join(archivePath, "version.txt")
|
versionFile := filepath.Join(archivePath, "version.txt")
|
||||||
return os.WriteFile(versionFile, []byte("3\n"), 0644)
|
return os.WriteFile(versionFile, []byte("3\n"), 0o644)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user