mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-01-28 15:01:46 +01:00
Unify JSON attribute naming ot use kebab style case. Cleanup configuration.
This commit is contained in:
@@ -18,7 +18,7 @@ var configSchema = `
|
||||
"description": "Path to job archive for file backend",
|
||||
"type": "string"
|
||||
},
|
||||
"dbPath": {
|
||||
"db-path": {
|
||||
"description": "Path to SQLite database file for sqlite backend",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -26,11 +26,11 @@ var configSchema = `
|
||||
"description": "S3 endpoint URL (for S3-compatible services like MinIO)",
|
||||
"type": "string"
|
||||
},
|
||||
"accessKey": {
|
||||
"access-key": {
|
||||
"description": "S3 access key ID",
|
||||
"type": "string"
|
||||
},
|
||||
"secretKey": {
|
||||
"secret-key": {
|
||||
"description": "S3 secret access key",
|
||||
"type": "string"
|
||||
},
|
||||
@@ -42,7 +42,7 @@ var configSchema = `
|
||||
"description": "AWS region for S3 bucket",
|
||||
"type": "string"
|
||||
},
|
||||
"usePathStyle": {
|
||||
"use-path-style": {
|
||||
"description": "Use path-style S3 URLs (required for MinIO and some S3-compatible services)",
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -59,7 +59,7 @@ var configSchema = `
|
||||
"type": "string",
|
||||
"enum": ["none", "delete", "move"]
|
||||
},
|
||||
"includeDB": {
|
||||
"include-db": {
|
||||
"description": "Also remove jobs from database",
|
||||
"type": "boolean"
|
||||
},
|
||||
|
||||
@@ -35,12 +35,12 @@ import (
|
||||
|
||||
// S3ArchiveConfig holds the configuration for the S3 archive backend.
|
||||
type S3ArchiveConfig struct {
|
||||
Endpoint string `json:"endpoint"` // S3 endpoint URL (optional, for MinIO/localstack)
|
||||
AccessKey string `json:"accessKey"` // AWS access key ID
|
||||
SecretKey string `json:"secretKey"` // AWS secret access key
|
||||
Bucket string `json:"bucket"` // S3 bucket name
|
||||
Region string `json:"region"` // AWS region
|
||||
UsePathStyle bool `json:"usePathStyle"` // Use path-style URLs (required for MinIO)
|
||||
Endpoint string `json:"endpoint"` // S3 endpoint URL (optional, for MinIO/localstack)
|
||||
AccessKey string `json:"access-key"` // AWS access key ID
|
||||
SecretKey string `json:"secret-key"` // AWS secret access key
|
||||
Bucket string `json:"bucket"` // S3 bucket name
|
||||
Region string `json:"region"` // AWS region
|
||||
UsePathStyle bool `json:"use-path-style"` // Use path-style URLs (required for MinIO)
|
||||
}
|
||||
|
||||
// S3Archive implements ArchiveBackend using AWS S3 or S3-compatible object storage.
|
||||
|
||||
@@ -241,11 +241,11 @@ func TestGetS3Directory(t *testing.T) {
|
||||
func TestS3ArchiveConfigParsing(t *testing.T) {
|
||||
rawConfig := json.RawMessage(`{
|
||||
"endpoint": "http://localhost:9000",
|
||||
"accessKey": "minioadmin",
|
||||
"secretKey": "minioadmin",
|
||||
"access-key": "minioadmin",
|
||||
"secret-key": "minioadmin",
|
||||
"bucket": "test-bucket",
|
||||
"region": "us-east-1",
|
||||
"usePathStyle": true
|
||||
"use-path-style": true
|
||||
}`)
|
||||
|
||||
var cfg S3ArchiveConfig
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
// SqliteArchiveConfig holds the configuration for the SQLite archive backend.
|
||||
type SqliteArchiveConfig struct {
|
||||
DBPath string `json:"dbPath"` // Path to SQLite database file
|
||||
DBPath string `json:"db-path"` // Path to SQLite database file
|
||||
}
|
||||
|
||||
// SqliteArchive implements ArchiveBackend using a SQLite database with BLOB storage.
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestSqliteInitEmptyPath(t *testing.T) {
|
||||
|
||||
func TestSqliteInitInvalidConfig(t *testing.T) {
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`"dbPath":"/tmp/test.db"`))
|
||||
_, err := sa.Init(json.RawMessage(`"db-path":"/tmp/test.db"`))
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid config")
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func TestSqliteInit(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
version, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
version, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
@@ -51,7 +51,7 @@ func TestSqliteStoreAndLoadJobMeta(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
@@ -97,7 +97,7 @@ func TestSqliteImportJob(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
@@ -114,7 +114,7 @@ func TestSqliteGetClusters(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func TestSqliteGetClusters(t *testing.T) {
|
||||
|
||||
// Reinitialize to refresh cluster list
|
||||
sa.db.Close()
|
||||
_, err = sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err = sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("reinit failed: %v", err)
|
||||
}
|
||||
@@ -158,7 +158,7 @@ func TestSqliteCleanUp(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
@@ -193,7 +193,7 @@ func TestSqliteClean(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
@@ -237,7 +237,7 @@ func TestSqliteIter(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
@@ -276,7 +276,7 @@ func TestSqliteCompress(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
@@ -299,7 +299,7 @@ func TestSqliteCompress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSqliteConfigParsing(t *testing.T) {
|
||||
rawConfig := json.RawMessage(`{"dbPath": "/tmp/test.db"}`)
|
||||
rawConfig := json.RawMessage(`{"db-path": "/tmp/test.db"}`)
|
||||
|
||||
var cfg SqliteArchiveConfig
|
||||
err := json.Unmarshal(rawConfig, &cfg)
|
||||
@@ -317,7 +317,7 @@ func TestSqliteIterChunking(t *testing.T) {
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
var sa SqliteArchive
|
||||
_, err := sa.Init(json.RawMessage(`{"dbPath":"` + tmpfile + `"}`))
|
||||
_, err := sa.Init(json.RawMessage(`{"db-path":"` + tmpfile + `"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user