Restructure black box test

Remove string ptr from unit prefix
This commit is contained in:
Jan Eitzinger 2023-04-25 09:26:48 +02:00
parent 500ae29d25
commit 5a09277d1c
16 changed files with 1034 additions and 128 deletions

View File

@ -77,8 +77,8 @@ func HandleImportFlag(flag string) error {
return err return err
} }
checkJobData(&jobData) //checkJobData(&jobData)
SanityChecks(&jobMeta.BaseJob) // SanityChecks(&jobMeta.BaseJob)
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows { if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
if err != nil { if err != nil {
@ -339,17 +339,15 @@ func getNormalizationFactor(v float64) (float64, int) {
return math.Pow10(count * scale), count * scale return math.Pow10(count * scale), count * scale
} }
func normalize(avg float64, u schema.Unit) (float64, schema.Unit) { func normalize(avg float64, p string) (float64, string) {
f, e := getNormalizationFactor(avg) f, e := getNormalizationFactor(avg)
if e != 0 { if e != 0 {
np := units.NewPrefixFromFactor(units.NewPrefix(p), e)
p := units.NewPrefixFromFactor(units.NewPrefix(*u.Prefix), e) return f, np.Prefix()
np := p.Prefix()
return f, schema.Unit{Prefix: &np, Base: u.Base}
} }
return f, u return f, p
} }
func checkJobData(d *schema.JobData) error { func checkJobData(d *schema.JobData) error {
@ -368,22 +366,23 @@ func checkJobData(d *schema.JobData) error {
} }
avg := sum / float64(len(metric.Series)) avg := sum / float64(len(metric.Series))
f, u := normalize(avg, metric.Unit) f, p := normalize(avg, metric.Unit.Prefix)
if u.Prefix != metric.Unit.Prefix { if p != metric.Unit.Prefix {
for _, s := range metric.Series { fmt.Printf("Convert %e", f)
fp := schema.ConvertFloatToFloat64(s.Data) // for _, s := range metric.Series {
// fp := schema.ConvertFloatToFloat64(s.Data)
//
// for i := 0; i < len(fp); i++ {
// fp[i] *= f
// fp[i] = math.Ceil(fp[i])
// }
//
// s.Data = schema.GetFloat64ToFloat(fp)
// }
for i := 0; i < len(fp); i++ { metric.Unit.Prefix = p
fp[i] *= f
fp[i] = math.Ceil(fp[i])
}
s.Data = schema.GetFloat64ToFloat(fp)
}
metric.Unit.Prefix = u.Prefix
} }
} }
} }

View File

@ -77,25 +77,3 @@ func TestNormalizeKeep(t *testing.T) {
t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix()) t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix())
} }
} }
//
// func TestHandleImportFlag(t *testing.T) {
// t.Error("wrong summary for diagnostic ")
// r := setupRepo(t)
//
// s := "../../test/repo/meta1.json:../../test/repo/data1.json"
// err := HandleImportFlag(s)
// if err != nil {
// t.Fatal(err)
// }
//
// jobId, cluster, startTime := int64(1404396), "emmy", int64(1609299584)
// job, err := r.Find(&jobId, &cluster, &startTime)
// if err != nil {
// t.Fatal(err)
// }
//
// if job.ID != 1366 {
// t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1366", job.JobID)
// }
// }

View File

@ -0,0 +1,69 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository_test
import "testing"
func TestHandleImportFlag(t *testing.T) {
r := setupRepo(t)
paths, err := filepath.Glob(filepath.Join("testdata", "*.input"))
if err != nil {
t.Fatal(err)
}
for _, path := range paths {
_, filename := filepath.Split(path)
testname := filename[:len(filename)-len(filepath.Ext(path))]
// Each path turns into a test: the test name is the filename without the
// extension.
t.Run(testname, func(t *testing.T) {
source, err := os.ReadFile(path)
if err != nil {
t.Fatal("error reading source file:", err)
}
// >>> This is the actual code under test.
output, err := format.Source(source)
if err != nil {
t.Fatal("error formatting:", err)
}
// <<<
// Each input file is expected to have a "golden output" file, with the
// same path except the .input extension is replaced by .golden
goldenfile := filepath.Join("testdata", testname+".golden")
want, err := os.ReadFile(goldenfile)
if err != nil {
t.Fatal("error reading golden file:", err)
}
if !bytes.Equal(output, want) {
t.Errorf("\n==== got:\n%s\n==== want:\n%s\n", output, want)
}
})
}
s := "../../test/repo/meta1.json:../../test/repo/data1.json"
err := HandleImportFlag(s)
if err != nil {
t.Fatal(err)
}
jobId, cluster, startTime := int64(398764), "fritz", int64(1675954353)
job, err := r.Find(&jobId, &cluster, &startTime)
if err != nil {
t.Fatal(err)
}
if job.ID != 2 {
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1366", job.JobID)
}
}

View File

@ -0,0 +1,112 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository_test
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func copyFile(s string, d string) error {
r, err := os.Open(s)
if err != nil {
return err
}
defer r.Close()
w, err := os.Create(d)
if err != nil {
return err
}
defer w.Close()
w.ReadFrom(r)
return nil
}
func setupRepo(t *testing.T) *repository.JobRepository {
const testconfig = `{
"addr": "0.0.0.0:8080",
"validate": false,
"archive": {
"kind": "file",
"path": "./var/job-archive"
},
"clusters": [
{
"name": "testcluster",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 },
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
}
},
{
"name": "fritz",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 944 },
"duration": { "from": 0, "to": 86400 },
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
}
},
{
"name": "taurus",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 4000 },
"duration": { "from": 0, "to": 604800 },
"startTime": { "from": "2010-01-01T00:00:00Z", "to": null }
}
}
]}`
log.Init("info", true)
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
if err := os.Mkdir(jobarchive, 0777); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
t.Fatal(err)
}
fritzArchive := filepath.Join(tmpdir, "job-archive", "fritz")
if err := os.Mkdir(fritzArchive, 0777); err != nil {
t.Fatal(err)
}
if err := copyFile(filepath.Join("testdata", "cluster-fritz.json"),
filepath.Join(fritzArchive, "cluster.json")); err != nil {
t.Fatal(err)
}
dbfilepath := filepath.Join(tmpdir, "test.db")
err := repository.MigrateDB("sqlite3", dbfilepath)
if err != nil {
t.Fatal(err)
}
cfgFilePath := filepath.Join(tmpdir, "config.json")
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
t.Fatal(err)
}
config.Init(cfgFilePath)
archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", jobarchive)
if err := archive.Init(json.RawMessage(archiveCfg), config.Keys.DisableArchive); err != nil {
t.Fatal(err)
}
repository.Connect("sqlite3", dbfilepath)
return repository.GetJobRepository()
}

View File

@ -0,0 +1,746 @@
{
"name": "fritz",
"metricConfig": [
{
"name": "cpu_load",
"unit": {
"base": ""
},
"scope": "node",
"aggregation": "avg",
"timestep": 60,
"peak": 72,
"normal": 72,
"caution": 36,
"alert": 20
},
{
"name": "cpu_user",
"unit": {
"base": ""
},
"scope": "hwthread",
"aggregation": "avg",
"timestep": 60,
"peak": 100,
"normal": 50,
"caution": 20,
"alert": 10
},
{
"name": "mem_used",
"unit": {
"base": "B",
"prefix": "G"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 256,
"normal": 128,
"caution": 200,
"alert": 240
},
{
"name": "flops_any",
"unit": {
"base": "F/s",
"prefix": "G"
},
"scope": "hwthread",
"aggregation": "sum",
"timestep": 60,
"peak": 5600,
"normal": 1000,
"caution": 200,
"alert": 50
},
{
"name": "flops_sp",
"unit": {
"base": "F/s",
"prefix": "G"
},
"scope": "hwthread",
"aggregation": "sum",
"timestep": 60,
"peak": 5600,
"normal": 1000,
"caution": 200,
"alert": 50
},
{
"name": "flops_dp",
"unit": {
"base": "F/s",
"prefix": "G"
},
"scope": "hwthread",
"aggregation": "sum",
"timestep": 60,
"peak": 2300,
"normal": 500,
"caution": 100,
"alert": 50
},
{
"name": "mem_bw",
"unit": {
"base": "B/s",
"prefix": "G"
},
"scope": "socket",
"aggregation": "sum",
"timestep": 60,
"peak": 350,
"normal": 100,
"caution": 50,
"alert": 10
},
{
"name": "clock",
"unit": {
"base": "Hz",
"prefix": "M"
},
"scope": "hwthread",
"aggregation": "avg",
"timestep": 60,
"peak": 3000,
"normal": 2400,
"caution": 1800,
"alert": 1200
},
{
"name": "cpu_power",
"unit": {
"base": "W"
},
"scope": "socket",
"aggregation": "sum",
"timestep": 60,
"peak": 500,
"normal": 250,
"caution": 100,
"alert": 50
},
{
"name": "mem_power",
"unit": {
"base": "W"
},
"scope": "socket",
"aggregation": "sum",
"timestep": 60,
"peak": 100,
"normal": 50,
"caution": 20,
"alert": 10
},
{
"name": "ipc",
"unit": {
"base": "IPC"
},
"scope": "hwthread",
"aggregation": "avg",
"timestep": 60,
"peak": 4,
"normal": 2,
"caution": 1,
"alert": 0.5
},
{
"name": "vectorization_ratio",
"unit": {
"base": ""
},
"scope": "hwthread",
"aggregation": "avg",
"timestep": 60,
"peak": 100,
"normal": 60,
"caution": 40,
"alert": 10
},
{
"name": "ib_recv",
"unit": {
"base": "B/s"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 1250000,
"normal": 6000000,
"caution": 200,
"alert": 1
},
{
"name": "ib_xmit",
"unit": {
"base": "B/s"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 1250000,
"normal": 6000000,
"caution": 200,
"alert": 1
},
{
"name": "ib_recv_pkts",
"unit": {
"base": ""
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
},
{
"name": "ib_xmit_pkts",
"unit": {
"base": ""
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
},
{
"name": "nfs4_read",
"unit": {
"base": "B/s",
"prefix": "M"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
},
{
"name": "nfs4_write",
"unit": {
"base": "B/s",
"prefix": "M"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
},
{
"name": "nfs4_total",
"unit": {
"base": "B/s",
"prefix": "M"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
}
],
"subClusters": [
{
"name": "main",
"nodes": "f01[01-88],f02[01-88],f03[01-88],f03[01-88],f04[01-88],f05[01-88],f06[01-88],f07[01-88],f08[01-88],f09[01-88],f10[01-88],f11[01-56],f12[01-56]",
"processorType": "Intel Icelake",
"socketsPerNode": 2,
"coresPerSocket": 36,
"threadsPerCore": 1,
"flopRateScalar": {
"unit": {
"base": "F/s",
"prefix": "G"
},
"value": 432
},
"flopRateSimd": {
"unit": {
"base": "F/s",
"prefix": "G"
},
"value": 9216
},
"memoryBandwidth": {
"unit": {
"base": "B/s",
"prefix": "G"
},
"value": 350
},
"topology": {
"node": [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71
],
"socket": [
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35
],
[
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71
]
],
"memoryDomain": [
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
],
[
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35
],
[
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53
],
[
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71
]
],
"core": [
[
0
],
[
1
],
[
2
],
[
3
],
[
4
],
[
5
],
[
6
],
[
7
],
[
8
],
[
9
],
[
10
],
[
11
],
[
12
],
[
13
],
[
14
],
[
15
],
[
16
],
[
17
],
[
18
],
[
19
],
[
20
],
[
21
],
[
22
],
[
23
],
[
24
],
[
25
],
[
26
],
[
27
],
[
28
],
[
29
],
[
30
],
[
31
],
[
32
],
[
33
],
[
34
],
[
35
],
[
36
],
[
37
],
[
38
],
[
39
],
[
40
],
[
41
],
[
42
],
[
43
],
[
44
],
[
45
],
[
46
],
[
47
],
[
48
],
[
49
],
[
50
],
[
51
],
[
52
],
[
53
],
[
54
],
[
55
],
[
56
],
[
57
],
[
58
],
[
59
],
[
60
],
[
61
],
[
62
],
[
63
],
[
64
],
[
65
],
[
66
],
[
67
],
[
68
],
[
69
],
[
70
],
[
71
]
]
}
}
]
}

View File

@ -95,8 +95,8 @@ var JobDefaults BaseJob = BaseJob{
} }
type Unit struct { type Unit struct {
Base string `json:"base"` Base string `json:"base"`
Prefix *string `json:"prefix,omitempty"` Prefix string `json:"prefix,omitempty"`
} }
// JobStatistics model // JobStatistics model

View File

@ -147,7 +147,7 @@ func ConvertUnitString(us string) schema.Unit {
p := u.getPrefix() p := u.getPrefix()
if p.Prefix() != "" { if p.Prefix() != "" {
prefix := p.Prefix() prefix := p.Prefix()
nu.Prefix = &prefix nu.Prefix = prefix
} }
m := u.getMeasure() m := u.getMeasure()
d := u.getUnitDenominator() d := u.getUnitDenominator()

View File

@ -228,80 +228,80 @@ func TestConvertSeries(t *testing.T) {
} }
} }
func TestNormalizeValue(t *testing.T) { // func TestNormalizeValue(t *testing.T) {
var s string // var s string
v := float64(103456) // v := float64(103456)
//
NormalizeValue(&v, "MB/s", &s) // NormalizeValue(&v, "MB/s", &s)
//
if v != 104.00 { // if v != 104.00 {
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v) // t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
} // }
if s != "GB/s" { // if s != "GB/s" {
t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s) // t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s)
} // }
} // }
//
func TestNormalizeValueNoPrefix(t *testing.T) { // func TestNormalizeValueNoPrefix(t *testing.T) {
var s string // var s string
v := float64(103458596) // v := float64(103458596)
//
NormalizeValue(&v, "F/s", &s) // NormalizeValue(&v, "F/s", &s)
//
if v != 104.00 { // if v != 104.00 {
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v) // t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
} // }
if s != "MF/s" { // if s != "MF/s" {
t.Errorf("Failed Prefix or unit: Want MF/s, Got %s", s) // t.Errorf("Failed Prefix or unit: Want MF/s, Got %s", s)
} // }
} // }
//
func TestNormalizeValueKeep(t *testing.T) { // func TestNormalizeValueKeep(t *testing.T) {
var s string // var s string
v := float64(345) // v := float64(345)
//
NormalizeValue(&v, "MB/s", &s) // NormalizeValue(&v, "MB/s", &s)
//
if v != 345.00 { // if v != 345.00 {
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v) // t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
} // }
if s != "MB/s" { // if s != "MB/s" {
t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s) // t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s)
} // }
} // }
//
func TestNormalizeValueDown(t *testing.T) { // func TestNormalizeValueDown(t *testing.T) {
var s string // var s string
v := float64(0.0004578) // v := float64(0.0004578)
//
NormalizeValue(&v, "GB/s", &s) // NormalizeValue(&v, "GB/s", &s)
//
if v != 458.00 { // if v != 458.00 {
t.Errorf("Failed ConvertValue: Want 458.00, Got %f", v) // t.Errorf("Failed ConvertValue: Want 458.00, Got %f", v)
} // }
if s != "KB/s" { // if s != "KB/s" {
t.Errorf("Failed Prefix or unit: Want KB/s, Got %s", s) // t.Errorf("Failed Prefix or unit: Want KB/s, Got %s", s)
} // }
} // }
//
func TestNormalizeSeries(t *testing.T) { // func TestNormalizeSeries(t *testing.T) {
var us string // var us string
s := []float64{2890031237, 23998994567, 389734042344, 390349424345} // s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
r := []float64{3, 24, 390, 391} // r := []float64{3, 24, 390, 391}
//
total := 0.0 // total := 0.0
for _, number := range s { // for _, number := range s {
total += number // total += number
} // }
avg := total / float64(len(s)) // avg := total / float64(len(s))
//
fmt.Printf("AVG: %e\n", avg) // fmt.Printf("AVG: %e\n", avg)
NormalizeSeries(s, avg, "KB/s", &us) // NormalizeSeries(s, avg, "KB/s", &us)
//
if !reflect.DeepEqual(s, r) { // if !reflect.DeepEqual(s, r) {
t.Errorf("Failed ConvertValue: Want 3, 24, 390, 391, Got %v", s) // t.Errorf("Failed ConvertValue: Want 3, 24, 390, 391, Got %v", s)
} // }
if us != "TB/s" { // if us != "TB/s" {
t.Errorf("Failed Prefix or unit: Want TB/s, Got %s", us) // t.Errorf("Failed Prefix or unit: Want TB/s, Got %s", us)
} // }
} // }

View File

@ -1,3 +1,7 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package test package test
import ( import (

View File

@ -174,16 +174,14 @@ func deepCopyClusterConfig(co *Cluster) schema.Cluster {
scn.SocketsPerNode = sco.SocketsPerNode scn.SocketsPerNode = sco.SocketsPerNode
scn.CoresPerSocket = sco.CoresPerSocket scn.CoresPerSocket = sco.CoresPerSocket
scn.ThreadsPerCore = sco.ThreadsPerCore scn.ThreadsPerCore = sco.ThreadsPerCore
var prefix = new(string)
*prefix = "G"
scn.FlopRateScalar = schema.MetricValue{ scn.FlopRateScalar = schema.MetricValue{
Unit: schema.Unit{Base: "F/s", Prefix: prefix}, Unit: schema.Unit{Base: "F/s", Prefix: "G"},
Value: float64(sco.FlopRateScalar)} Value: float64(sco.FlopRateScalar)}
scn.FlopRateSimd = schema.MetricValue{ scn.FlopRateSimd = schema.MetricValue{
Unit: schema.Unit{Base: "F/s", Prefix: prefix}, Unit: schema.Unit{Base: "F/s", Prefix: "G"},
Value: float64(sco.FlopRateSimd)} Value: float64(sco.FlopRateSimd)}
scn.MemoryBandwidth = schema.MetricValue{ scn.MemoryBandwidth = schema.MetricValue{
Unit: schema.Unit{Base: "B/s", Prefix: prefix}, Unit: schema.Unit{Base: "B/s", Prefix: "G"},
Value: float64(sco.MemoryBandwidth)} Value: float64(sco.MemoryBandwidth)}
scn.Topology = *sco.Topology scn.Topology = *sco.Topology
cn.SubClusters = append(cn.SubClusters, &scn) cn.SubClusters = append(cn.SubClusters, &scn)