mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-11-10 08:57:25 +01:00
Synchronize DB Schema with json schema
Rework tests
This commit is contained in:
parent
a8980c7a5e
commit
500ae29d25
@ -116,7 +116,10 @@ func main() {
|
||||
}
|
||||
|
||||
if flagMigrateDB {
|
||||
repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
|
||||
err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,10 @@ func Connect(driver string, db string) {
|
||||
}
|
||||
|
||||
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
|
||||
checkDBVersion(driver, dbHandle.DB)
|
||||
err = checkDBVersion(driver, dbHandle.DB)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
@ -319,31 +320,71 @@ func loadJobStat(job *schema.JobMeta, metric string) float64 {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
func getNormalizationFactor(v float64) (float64, int) {
|
||||
count := 0
|
||||
scale := -3
|
||||
|
||||
if v > 1000.0 {
|
||||
for v > 1000.0 {
|
||||
v *= 1e-3
|
||||
count++
|
||||
}
|
||||
} else {
|
||||
for v < 1.0 {
|
||||
v *= 1e3
|
||||
count++
|
||||
}
|
||||
scale = 3
|
||||
}
|
||||
return math.Pow10(count * scale), count * scale
|
||||
}
|
||||
|
||||
func normalize(avg float64, u schema.Unit) (float64, schema.Unit) {
|
||||
f, e := getNormalizationFactor(avg)
|
||||
|
||||
if e != 0 {
|
||||
|
||||
p := units.NewPrefixFromFactor(units.NewPrefix(*u.Prefix), e)
|
||||
np := p.Prefix()
|
||||
return f, schema.Unit{Prefix: &np, Base: u.Base}
|
||||
}
|
||||
|
||||
return f, u
|
||||
}
|
||||
|
||||
func checkJobData(d *schema.JobData) error {
|
||||
for _, scopes := range *d {
|
||||
var newUnit string
|
||||
// Add node scope if missing
|
||||
// var newUnit schema.Unit
|
||||
// TODO Add node scope if missing
|
||||
for _, metric := range scopes {
|
||||
if strings.Contains(metric.Unit.Base, "B/s") ||
|
||||
strings.Contains(metric.Unit.Base, "F/s") ||
|
||||
strings.Contains(metric.Unit.Base, "B") {
|
||||
|
||||
// First get overall avg
|
||||
// get overall avg
|
||||
sum := 0.0
|
||||
for _, s := range metric.Series {
|
||||
sum += s.Statistics.Avg
|
||||
}
|
||||
|
||||
avg := sum / float64(len(metric.Series))
|
||||
f, u := normalize(avg, metric.Unit)
|
||||
|
||||
for _, s := range metric.Series {
|
||||
fp := schema.ConvertFloatToFloat64(s.Data)
|
||||
// Normalize values with new unit prefix
|
||||
oldUnit := metric.Unit.Base
|
||||
units.NormalizeSeries(fp, avg, oldUnit, &newUnit)
|
||||
s.Data = schema.GetFloat64ToFloat(fp)
|
||||
if u.Prefix != metric.Unit.Prefix {
|
||||
|
||||
for _, s := range metric.Series {
|
||||
fp := schema.ConvertFloatToFloat64(s.Data)
|
||||
|
||||
for i := 0; i < len(fp); i++ {
|
||||
fp[i] *= f
|
||||
fp[i] = math.Ceil(fp[i])
|
||||
}
|
||||
|
||||
s.Data = schema.GetFloat64ToFloat(fp)
|
||||
}
|
||||
|
||||
metric.Unit.Prefix = u.Prefix
|
||||
}
|
||||
metric.Unit.Base = newUnit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
101
internal/repository/init_test.go
Normal file
101
internal/repository/init_test.go
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/units"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func setupRepo(t *testing.T) *JobRepository {
|
||||
log.Init("info", true)
|
||||
tmpdir := t.TempDir()
|
||||
dbfilepath := filepath.Join(tmpdir, "test.db")
|
||||
err := MigrateDB("sqlite3", dbfilepath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Connect("sqlite3", dbfilepath)
|
||||
return GetJobRepository()
|
||||
}
|
||||
|
||||
func TestNormalizeFactor(t *testing.T) {
|
||||
// var us string
|
||||
s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
|
||||
// r := []float64{3, 24, 390, 391}
|
||||
|
||||
total := 0.0
|
||||
for _, number := range s {
|
||||
total += number
|
||||
}
|
||||
avg := total / float64(len(s))
|
||||
|
||||
fmt.Printf("AVG: %e\n", avg)
|
||||
f, e := getNormalizationFactor(avg)
|
||||
|
||||
fmt.Printf("Factor %e Count %d\n", f, e)
|
||||
|
||||
np := units.NewPrefix("")
|
||||
|
||||
fmt.Printf("Prefix %e Short %s\n", float64(np), np.Prefix())
|
||||
|
||||
p := units.NewPrefixFromFactor(np, e)
|
||||
|
||||
if p.Prefix() != "G" {
|
||||
t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeKeep(t *testing.T) {
|
||||
s := []float64{3.0, 24.0, 390.0, 391.0}
|
||||
|
||||
total := 0.0
|
||||
for _, number := range s {
|
||||
total += number
|
||||
}
|
||||
avg := total / float64(len(s))
|
||||
|
||||
fmt.Printf("AVG: %e\n", avg)
|
||||
f, e := getNormalizationFactor(avg)
|
||||
|
||||
fmt.Printf("Factor %e Count %d\n", f, e)
|
||||
|
||||
np := units.NewPrefix("G")
|
||||
|
||||
fmt.Printf("Prefix %e Short %s\n", float64(np), np.Prefix())
|
||||
|
||||
p := units.NewPrefixFromFactor(np, e)
|
||||
|
||||
if p.Prefix() != "G" {
|
||||
t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix())
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// func TestHandleImportFlag(t *testing.T) {
|
||||
// t.Error("wrong summary for diagnostic ")
|
||||
// r := setupRepo(t)
|
||||
//
|
||||
// s := "../../test/repo/meta1.json:../../test/repo/data1.json"
|
||||
// err := HandleImportFlag(s)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// jobId, cluster, startTime := int64(1404396), "emmy", int64(1609299584)
|
||||
// job, err := r.Find(&jobId, &cluster, &startTime)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// if job.ID != 1366 {
|
||||
// t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1366", job.JobID)
|
||||
// }
|
||||
// }
|
@ -12,19 +12,21 @@ import (
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.Init("info", true)
|
||||
Connect("sqlite3", "../../test/test.db")
|
||||
}
|
||||
|
||||
func setup(t *testing.T) *JobRepository {
|
||||
log.Init("info", true)
|
||||
dbfilepath := "../../test/test.db"
|
||||
err := MigrateDB("sqlite3", dbfilepath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Connect("sqlite3", dbfilepath)
|
||||
return GetJobRepository()
|
||||
}
|
||||
|
||||
func TestFind(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
jobId, cluster, startTime := int64(1404396), "emmy", int64(1609299584)
|
||||
jobId, cluster, startTime := int64(398998), "fritz", int64(1675957496)
|
||||
job, err := r.Find(&jobId, &cluster, &startTime)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -32,7 +34,7 @@ func TestFind(t *testing.T) {
|
||||
|
||||
// fmt.Printf("%+v", job)
|
||||
|
||||
if job.ID != 1366 {
|
||||
if job.ID != 5 {
|
||||
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1366", job.JobID)
|
||||
}
|
||||
}
|
||||
@ -40,14 +42,14 @@ func TestFind(t *testing.T) {
|
||||
func TestFindById(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
job, err := r.FindById(1366)
|
||||
job, err := r.FindById(5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// fmt.Printf("%+v", job)
|
||||
|
||||
if job.JobID != 1404396 {
|
||||
if job.JobID != 398998 {
|
||||
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1404396", job.JobID)
|
||||
}
|
||||
}
|
||||
@ -63,7 +65,7 @@ func TestGetTags(t *testing.T) {
|
||||
fmt.Printf("TAGS %+v \n", tags)
|
||||
// fmt.Printf("COUNTS %+v \n", counts)
|
||||
|
||||
if counts["bandwidth"] != 6 {
|
||||
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 6", counts["load-imbalance"])
|
||||
if counts["bandwidth"] != 3 {
|
||||
t.Errorf("wrong tag count \ngot: %d \nwant: 3", counts["bandwidth"])
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
@ -22,37 +21,37 @@ const Version uint = 3
|
||||
//go:embed migrations/*
|
||||
var migrationFiles embed.FS
|
||||
|
||||
func checkDBVersion(backend string, db *sql.DB) {
|
||||
func checkDBVersion(backend string, db *sql.DB) error {
|
||||
var m *migrate.Migrate
|
||||
|
||||
if backend == "sqlite3" {
|
||||
|
||||
driver, err := sqlite3.WithInstance(db, &sqlite3.Config{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
m, err = migrate.NewWithInstance("iofs", d, "sqlite3", driver)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
} else if backend == "mysql" {
|
||||
driver, err := mysql.WithInstance(db, &mysql.Config{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
d, err := iofs.New(migrationFiles, "migrations/mysql")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
m, err = migrate.NewWithInstance("iofs", d, "mysql", driver)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,22 +60,22 @@ func checkDBVersion(backend string, db *sql.DB) {
|
||||
if err == migrate.ErrNilVersion {
|
||||
log.Warn("Legacy database without version or missing database file!")
|
||||
} else {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if v < Version {
|
||||
log.Warnf("Unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, Version)
|
||||
os.Exit(0)
|
||||
return fmt.Errorf("Unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, Version)
|
||||
}
|
||||
|
||||
if v > Version {
|
||||
log.Warnf("Unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool!", v, Version)
|
||||
os.Exit(0)
|
||||
return fmt.Errorf("Unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool!", v, Version)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func MigrateDB(backend string, db string) {
|
||||
func MigrateDB(backend string, db string) error {
|
||||
var m *migrate.Migrate
|
||||
|
||||
if backend == "sqlite3" {
|
||||
@ -87,17 +86,17 @@ func MigrateDB(backend string, db string) {
|
||||
|
||||
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
} else if backend == "mysql" {
|
||||
d, err := iofs.New(migrationFiles, "migrations/mysql")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("mysql://%s?multiStatements=true", db))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@ -105,9 +104,10 @@ func MigrateDB(backend string, db string) {
|
||||
if err == migrate.ErrNoChange {
|
||||
log.Info("DB already up to date!")
|
||||
} else {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
m.Close()
|
||||
return nil
|
||||
}
|
||||
|
@ -7,19 +7,19 @@ CREATE TABLE IF NOT EXISTS job (
|
||||
|
||||
user VARCHAR(255) NOT NULL,
|
||||
project VARCHAR(255) NOT NULL,
|
||||
partition VARCHAR(255) NOT NULL,
|
||||
array_job_id BIGINT NOT NULL,
|
||||
duration INT NOT NULL DEFAULT 0,
|
||||
walltime INT NOT NULL DEFAULT 0,
|
||||
job_state VARCHAR(255) NOT NULL
|
||||
partition VARCHAR(255),
|
||||
array_job_id BIGINT,
|
||||
duration INT NOT NULL,
|
||||
walltime INT NOT NULL,
|
||||
job_state VARCHAR(255) NOT NULL
|
||||
CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled',
|
||||
'stopped', 'timeout', 'preempted', 'out_of_memory')),
|
||||
meta_data TEXT, -- JSON
|
||||
resources TEXT NOT NULL, -- JSON
|
||||
|
||||
num_nodes INT NOT NULL,
|
||||
num_hwthreads INT NOT NULL,
|
||||
num_acc INT NOT NULL,
|
||||
num_hwthreads INT,
|
||||
num_acc INT,
|
||||
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
|
||||
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
|
||||
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
|
||||
|
@ -11,12 +11,10 @@ import (
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Connect("sqlite3", "../../test/test.db")
|
||||
}
|
||||
|
||||
func setupUserTest(t *testing.T) *UserCfgRepo {
|
||||
const testconfig = `{
|
||||
"addr": "0.0.0.0:8080",
|
||||
@ -34,6 +32,15 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
|
||||
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
|
||||
} } ]
|
||||
}`
|
||||
|
||||
log.Init("info", true)
|
||||
dbfilepath := "../../test/test.db"
|
||||
err := MigrateDB("sqlite3", dbfilepath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Connect("sqlite3", dbfilepath)
|
||||
|
||||
tmpdir := t.TempDir()
|
||||
cfgFilePath := filepath.Join(tmpdir, "config.json")
|
||||
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
|
||||
@ -43,9 +50,10 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
|
||||
config.Init(cfgFilePath)
|
||||
return GetUserCfgRepo()
|
||||
}
|
||||
|
||||
func TestGetUIConfig(t *testing.T) {
|
||||
r := setupUserTest(t)
|
||||
u := auth.User{Username: "jan"}
|
||||
u := auth.User{Username: "demo"}
|
||||
|
||||
cfg, err := r.GetUIConfig(&u)
|
||||
if err != nil {
|
||||
@ -53,10 +61,9 @@ func TestGetUIConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
tmp := cfg["plot_list_selectedMetrics"]
|
||||
metrics := tmp.([]interface{})
|
||||
|
||||
str := metrics[2].(string)
|
||||
if str != "mem_bw" {
|
||||
metrics := tmp.([]string)
|
||||
str := metrics[2]
|
||||
if str != "mem_used" {
|
||||
t.Errorf("wrong config\ngot: %s \nwant: mem_bw", str)
|
||||
}
|
||||
}
|
||||
|
@ -11,8 +11,9 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Non-Swaggered Comment: BaseJob
|
||||
// Non-Swaggered Comment: Common subset of Job and JobMeta. Use one of those, not this type directly.
|
||||
// BaseJob is the common part of the job metadata structs
|
||||
//
|
||||
// Common subset of Job and JobMeta. Use one of those, not this type directly.
|
||||
|
||||
type BaseJob struct {
|
||||
// The unique identifier of a job
|
||||
@ -39,9 +40,10 @@ type BaseJob struct {
|
||||
MetaData map[string]string `json:"metaData"` // Additional information about the job
|
||||
}
|
||||
|
||||
// Non-Swaggered Comment: Job
|
||||
// Non-Swaggered Comment: This type is used as the GraphQL interface and using sqlx as a table row.
|
||||
|
||||
// Job struct type
|
||||
//
|
||||
// This type is used as the GraphQL interface and using sqlx as a table row.
|
||||
//
|
||||
// Job model
|
||||
// @Description Information of a HPC job.
|
||||
type Job struct {
|
||||
@ -60,13 +62,16 @@ type Job struct {
|
||||
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
|
||||
}
|
||||
|
||||
// Non-Swaggered Comment: JobMeta
|
||||
// Non-Swaggered Comment: When reading from the database or sending data via GraphQL, the start time can be in the much more
|
||||
// Non-Swaggered Comment: convenient time.Time type. In the `meta.json` files, the start time is encoded as a unix epoch timestamp.
|
||||
// Non-Swaggered Comment: This is why there is this struct, which contains all fields from the regular job struct, but "overwrites"
|
||||
// Non-Swaggered Comment: the StartTime field with one of type int64.
|
||||
// Non-Swaggered Comment: ID *int64 `json:"id,omitempty"` >> never used in the job-archive, only available via REST-API
|
||||
|
||||
// JobMeta struct type
|
||||
//
|
||||
// When reading from the database or sending data via GraphQL, the start time
|
||||
// can be in the much more convenient time.Time type. In the `meta.json`
|
||||
// files, the start time is encoded as a unix epoch timestamp. This is why
|
||||
// there is this struct, which contains all fields from the regular job
|
||||
// struct, but "overwrites" the StartTime field with one of type int64. ID
|
||||
// *int64 `json:"id,omitempty"` >> never used in the job-archive, only
|
||||
// available via REST-API
|
||||
//
|
||||
// JobMeta model
|
||||
// @Description Meta data information of a HPC job.
|
||||
type JobMeta struct {
|
||||
@ -74,7 +79,7 @@ type JobMeta struct {
|
||||
ID *int64 `json:"id,omitempty"`
|
||||
BaseJob
|
||||
StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0)
|
||||
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
|
||||
Statistics map[string]JobStatistics `json:"statistics"` // Metric statistics of job
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -112,12 +112,12 @@ func getNormalizationFactor(v float64) (float64, int) {
|
||||
return math.Pow10(count * scale), count * scale
|
||||
}
|
||||
|
||||
func NormalizeValue(v *float64, us string, nu *string) {
|
||||
func NormalizeValue(v *float64, us string) string {
|
||||
u := NewUnit(us)
|
||||
f, e := getNormalizationFactor((*v))
|
||||
*v = math.Ceil(*v * f)
|
||||
u.setPrefix(NewPrefixFromFactor(u.getPrefix(), e))
|
||||
*nu = u.Short()
|
||||
p := NewPrefixFromFactor(u.getPrefix(), e)
|
||||
return p.Prefix()
|
||||
}
|
||||
|
||||
func NormalizeSeries(s []float64, avg float64, us string, nu *string) {
|
||||
|
@ -348,7 +348,10 @@ func setup(t *testing.T) *api.RestApi {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dbfilepath := filepath.Join(tmpdir, "test.db")
|
||||
repository.MigrateDB("sqlite3", dbfilepath)
|
||||
err := repository.MigrateDB("sqlite3", dbfilepath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfgFilePath := filepath.Join(tmpdir, "config.json")
|
||||
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
|
||||
@ -566,13 +569,116 @@ func TestRestApi(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// t.Run("FailedJob", func(t *testing.T) {
|
||||
// subtestLetJobFail(t, restapi, r)
|
||||
// })
|
||||
t.Run("FailedJob", func(t *testing.T) {
|
||||
const startJobBody string = `{
|
||||
"jobId": 12345,
|
||||
"user": "testuser",
|
||||
"project": "testproj",
|
||||
"cluster": "testcluster",
|
||||
"partition": "default",
|
||||
"walltime": 3600,
|
||||
"numNodes": 1,
|
||||
"exclusive": 1,
|
||||
"monitoringStatus": 1,
|
||||
"smt": 1,
|
||||
"resources": [
|
||||
{
|
||||
"hostname": "host123"
|
||||
}
|
||||
],
|
||||
"startTime": 12345678
|
||||
}`
|
||||
|
||||
// t.Run("ImportJob", func(t *testing.T) {
|
||||
// testImportFlag(t)
|
||||
// })
|
||||
ok := t.Run("StartJob", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBody)))
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
r.ServeHTTP(recorder, req)
|
||||
response := recorder.Result()
|
||||
if response.StatusCode != http.StatusCreated {
|
||||
t.Fatal(response.Status, recorder.Body.String())
|
||||
}
|
||||
})
|
||||
if !ok {
|
||||
t.Fatal("subtest failed")
|
||||
}
|
||||
|
||||
const stopJobBody string = `{
|
||||
"jobId": 12345,
|
||||
"cluster": "testcluster",
|
||||
|
||||
"jobState": "failed",
|
||||
"stopTime": 12355678
|
||||
}`
|
||||
|
||||
ok = t.Run("StopJob", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody)))
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
r.ServeHTTP(recorder, req)
|
||||
response := recorder.Result()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
t.Fatal(response.Status, recorder.Body.String())
|
||||
}
|
||||
|
||||
restapi.JobRepository.WaitForArchiving()
|
||||
jobid, cluster := int64(12345), "testcluster"
|
||||
job, err := restapi.JobRepository.Find(&jobid, &cluster, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if job.State != schema.JobStateFailed {
|
||||
t.Fatal("expected job to be failed")
|
||||
}
|
||||
})
|
||||
if !ok {
|
||||
t.Fatal("subtest failed")
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("ImportJob", func(t *testing.T) {
|
||||
if err := repository.HandleImportFlag("meta.json:data.json"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
repo := repository.GetJobRepository()
|
||||
jobId := int64(20639587)
|
||||
cluster := "taurus"
|
||||
startTime := int64(1635856524)
|
||||
job, err := repo.Find(&jobId, &cluster, &startTime)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if job.NumNodes != 2 {
|
||||
t.Errorf("NumNode: Received %d, expected 2", job.NumNodes)
|
||||
}
|
||||
|
||||
ar := archive.GetHandle()
|
||||
data, err := ar.LoadJobData(job)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(data) != 8 {
|
||||
t.Errorf("Job data length: Got %d, want 8", len(data))
|
||||
}
|
||||
|
||||
r := map[string]string{"mem_used": "GB", "net_bw": "KB/s",
|
||||
"cpu_power": "W", "cpu_used": "",
|
||||
"file_bw": "KB/s", "flops_any": "F/s",
|
||||
"mem_bw": "GB/s", "ipc": "IPC"}
|
||||
|
||||
for name, scopes := range data {
|
||||
for _, metric := range scopes {
|
||||
if metric.Unit.Base != r[name] {
|
||||
t.Errorf("Metric %s unit: Got %s, want %s", name, metric.Unit.Base, r[name])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) {
|
||||
|
1
test/repo/data1.json
Normal file
1
test/repo/data1.json
Normal file
File diff suppressed because one or more lines are too long
1
test/repo/data2.json
Normal file
1
test/repo/data2.json
Normal file
File diff suppressed because one or more lines are too long
1
test/repo/meta1.json
Normal file
1
test/repo/meta1.json
Normal file
@ -0,0 +1 @@
|
||||
{"jobId":398764,"user":"k106eb10","project":"k106eb","cluster":"fritz","subCluster":"main","partition":"singlenode","arrayJobId":0,"numNodes":1,"numHwthreads":72,"numAcc":0,"exclusive":1,"monitoringStatus":1,"smt":0,"jobState":"completed","duration":177,"walltime":86340,"resources":[{"hostname":"f0649"}],"metaData":{"jobName":"ams_pipeline","jobScript":"#!/bin/bash -l\n#SBATCH --job-name=ams_pipeline\n#SBATCH --time=23:59:00\n#SBATCH --partition=singlenode\n#SBATCH --ntasks=72\n#SBATCH --hint=multithread\n#SBATCH --chdir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2\n#SBATCH --export=NONE\nunset SLURM_EXPORT_ENV\nuss=$(whoami)\nfind /dev/shm/ -user $uss -type f -mmin +30 -delete\ncd \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2\"\nams_pipeline pipeline.json \u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2/ams_pipeline_job.sh.out\" 2\u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2/ams_pipeline_job.sh.err\"\n","slurmInfo":"\nJobId=398764 JobName=ams_pipeline\n UserId=k106eb10(210387) GroupId=80111\n Account=k106eb QOS=normal \n Requeue=False Restarts=0 BatchFlag=True \n TimeLimit=1439\n SubmitTime=2023-02-09T14:10:20\n Partition=singlenode \n NodeList=f0649\n NumNodes=1 NumCPUs=72 NumTasks=72 CPUs/Task=1\n NTasksPerNode:Socket:Core=0:None:None\n TRES_req=cpu=72,mem=250000M,node=1,billing=72\n TRES_alloc=cpu=72,node=1,billing=72\n Command=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2/ams_pipeline_job.sh\n WorkDir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2\n StdErr=\n StdOut=ams_pipeline.o%j\n"},"startTime":1675954353,"statistics":{"clock":{"unit":{"base":"Hz","prefix":"M"},"avg":1336.519,"min":801.564,"max":2348.215},"cpu_load":{"unit":{"base":""},"avg":31.64,"min":17.36,"max":45.54},"cpu_power":{"unit":{"base":"W"},"avg":150.018,"min":93.672,"max":261.592},"cpu_user":{"unit":{"base":""},"avg":28.518,"min":0.09,"max":57.343},"flops_any":{"unit":{"base":"F/s","prefix":"G"},"avg":45.012,"min":0,"max":135.037},"flops_dp":{"unit":{"base":"F/s","prefix":"G"},"avg":22.496,"min":0,"max":67.488},"flops_sp":{"unit":{"base":"F/s","prefix":"G"},"avg":0.02,"min":0,"max":0.061},"ib_recv":{"unit":{"base":"B/s"},"avg":14442.82,"min":219.998,"max":42581.368},"ib_recv_pkts":{"unit":{"base":"packets/s"},"avg":201.532,"min":1.25,"max":601.345},"ib_xmit":{"unit":{"base":"B/s"},"avg":282.098,"min":56.2,"max":569.363},"ib_xmit_pkts":{"unit":{"base":"packets/s"},"avg":1.228,"min":0.433,"max":2},"ipc":{"unit":{"base":"IPC"},"avg":0.77,"min":0.564,"max":0.906},"mem_bw":{"unit":{"base":"B/s","prefix":"G"},"avg":4.872,"min":0.025,"max":14.552},"mem_power":{"unit":{"base":"W"},"avg":7.725,"min":6.286,"max":10.556},"mem_used":{"unit":{"base":"B","prefix":"G"},"avg":6.162,"min":6.103,"max":6.226},"nfs4_read":{"unit":{"base":"B/s","prefix":"M"},"avg":1045.333,"min":311,"max":1525},"nfs4_total":{"unit":{"base":"B/s","prefix":"M"},"avg":6430,"min":2796,"max":11518},"nfs4_write":{"unit":{"base":"B/s","prefix":"M"},"avg":24.333,"min":0,"max":38},"vectorization_ratio":{"unit":{"base":"%"},"avg":25.528,"min":0,"max":76.585}}}
|
1
test/repo/meta2.json
Normal file
1
test/repo/meta2.json
Normal file
@ -0,0 +1 @@
|
||||
{"jobId":398955,"user":"k106eb10","project":"k106eb","cluster":"fritz","subCluster":"main","partition":"singlenode","arrayJobId":0,"numNodes":1,"numHwthreads":72,"numAcc":0,"exclusive":1,"monitoringStatus":1,"smt":0,"jobState":"completed","duration":260,"walltime":86340,"resources":[{"hostname":"f0720"}],"metaData":{"jobName":"ams_pipeline","jobScript":"#!/bin/bash -l\n#SBATCH --job-name=ams_pipeline\n#SBATCH --time=23:59:00\n#SBATCH --partition=singlenode\n#SBATCH --ntasks=72\n#SBATCH --hint=multithread\n#SBATCH --chdir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11\n#SBATCH --export=NONE\nunset SLURM_EXPORT_ENV\nuss=$(whoami)\nfind /dev/shm/ -user $uss -type f -mmin +30 -delete\ncd \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11\"\nams_pipeline pipeline.json \u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11/ams_pipeline_job.sh.out\" 2\u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11/ams_pipeline_job.sh.err\"\n","slurmInfo":"\nJobId=398955 JobName=ams_pipeline\n UserId=k106eb10(210387) GroupId=80111\n Account=k106eb QOS=normal \n Requeue=False Restarts=0 BatchFlag=True \n TimeLimit=1439\n SubmitTime=2023-02-09T14:11:22\n Partition=singlenode \n NodeList=f0720\n NumNodes=1 NumCPUs=72 NumTasks=72 CPUs/Task=1\n NTasksPerNode:Socket:Core=0:None:None\n TRES_req=cpu=72,mem=250000M,node=1,billing=72\n TRES_alloc=cpu=72,node=1,billing=72\n Command=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11/ams_pipeline_job.sh\n WorkDir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11\n StdErr=\n StdOut=ams_pipeline.o%j\n"},"startTime":1675956725,"statistics":{"clock":{"unit":{"base":"Hz","prefix":"M"},"avg":2335.254,"min":800.418,"max":2734.922},"cpu_load":{"unit":{"base":""},"avg":52.72,"min":34.46,"max":71.91},"cpu_power":{"unit":{"base":"W"},"avg":407.767,"min":93.932,"max":497.636},"cpu_user":{"unit":{"base":""},"avg":63.678,"min":19.872,"max":96.633},"flops_any":{"unit":{"base":"F/s","prefix":"G"},"avg":635.672,"min":0,"max":1332.874},"flops_dp":{"unit":{"base":"F/s","prefix":"G"},"avg":261.006,"min":0,"max":382.294},"flops_sp":{"unit":{"base":"F/s","prefix":"G"},"avg":113.659,"min":0,"max":568.286},"ib_recv":{"unit":{"base":"B/s"},"avg":27981.111,"min":69.4,"max":48084.589},"ib_recv_pkts":{"unit":{"base":"packets/s"},"avg":398.939,"min":0.5,"max":693.817},"ib_xmit":{"unit":{"base":"B/s"},"avg":188.513,"min":39.597,"max":724.568},"ib_xmit_pkts":{"unit":{"base":"packets/s"},"avg":0.867,"min":0.2,"max":2.933},"ipc":{"unit":{"base":"IPC"},"avg":0.944,"min":0.564,"max":1.291},"mem_bw":{"unit":{"base":"B/s","prefix":"G"},"avg":79.565,"min":0.021,"max":116.02},"mem_power":{"unit":{"base":"W"},"avg":24.692,"min":7.883,"max":31.318},"mem_used":{"unit":{"base":"B","prefix":"G"},"avg":22.566,"min":8.225,"max":27.613},"nfs4_read":{"unit":{"base":"B/s","prefix":"M"},"avg":647,"min":0,"max":1946},"nfs4_total":{"unit":{"base":"B/s","prefix":"M"},"avg":6181.6,"min":1270,"max":11411},"nfs4_write":{"unit":{"base":"B/s","prefix":"M"},"avg":22.4,"min":11,"max":29},"vectorization_ratio":{"unit":{"base":"%"},"avg":77.351,"min":0,"max":98.837}}}
|
BIN
test/test.db
BIN
test/test.db
Binary file not shown.
Loading…
Reference in New Issue
Block a user