mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-07-22 20:41:40 +02:00
Merge branch 'master' into 97_107_mark_and_show_shared
This commit is contained in:
@@ -18,6 +18,10 @@ const Version uint64 = 1
|
||||
type ArchiveBackend interface {
|
||||
Init(rawConfig json.RawMessage) (uint64, error)
|
||||
|
||||
Info()
|
||||
|
||||
Exists(job *schema.Job) bool
|
||||
|
||||
LoadJobMeta(job *schema.Job) (*schema.JobMeta, error)
|
||||
|
||||
LoadJobData(job *schema.Job) (schema.JobData, error)
|
||||
@@ -30,6 +34,14 @@ type ArchiveBackend interface {
|
||||
|
||||
GetClusters() []string
|
||||
|
||||
CleanUp(jobs []*schema.Job)
|
||||
|
||||
Move(jobs []*schema.Job, path string)
|
||||
|
||||
Clean(before int64, after int64)
|
||||
|
||||
Compress(jobs []*schema.Job)
|
||||
|
||||
Iter(loadMetricData bool) <-chan JobContainer
|
||||
}
|
||||
|
||||
@@ -44,21 +56,23 @@ var useArchive bool
|
||||
|
||||
func Init(rawConfig json.RawMessage, disableArchive bool) error {
|
||||
useArchive = !disableArchive
|
||||
var kind struct {
|
||||
|
||||
var cfg struct {
|
||||
Kind string `json:"kind"`
|
||||
}
|
||||
if err := json.Unmarshal(rawConfig, &kind); err != nil {
|
||||
|
||||
if err := json.Unmarshal(rawConfig, &cfg); err != nil {
|
||||
log.Warn("Error while unmarshaling raw config json")
|
||||
return err
|
||||
}
|
||||
|
||||
switch kind.Kind {
|
||||
switch cfg.Kind {
|
||||
case "file":
|
||||
ar = &FsArchive{}
|
||||
// case "s3":
|
||||
// ar = &S3Archive{}
|
||||
default:
|
||||
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", kind.Kind)
|
||||
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", cfg.Kind)
|
||||
}
|
||||
|
||||
version, err := ar.Init(rawConfig)
|
||||
@@ -67,6 +81,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
|
||||
return err
|
||||
}
|
||||
log.Infof("Load archive version %d", version)
|
||||
|
||||
return initClusterConfig()
|
||||
}
|
||||
|
||||
|
69
pkg/archive/archive_test.go
Normal file
69
pkg/archive/archive_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package archive_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
|
||||
var jobs []*schema.Job
|
||||
|
||||
func setup(t *testing.T) archive.ArchiveBackend {
|
||||
tmpdir := t.TempDir()
|
||||
jobarchive := filepath.Join(tmpdir, "job-archive")
|
||||
util.CopyDir("./testdata/archive/", jobarchive)
|
||||
archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", jobarchive)
|
||||
|
||||
if err := archive.Init(json.RawMessage(archiveCfg), false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
jobs = make([]*schema.Job, 2)
|
||||
jobs[0] = &schema.Job{}
|
||||
jobs[0].JobID = 1403244
|
||||
jobs[0].Cluster = "emmy"
|
||||
jobs[0].StartTime = time.Unix(1608923076, 0)
|
||||
|
||||
jobs[1] = &schema.Job{}
|
||||
jobs[0].JobID = 1404397
|
||||
jobs[0].Cluster = "emmy"
|
||||
jobs[0].StartTime = time.Unix(1609300556, 0)
|
||||
|
||||
return archive.GetHandle()
|
||||
}
|
||||
|
||||
func TestCleanUp(t *testing.T) {
|
||||
a := setup(t)
|
||||
if !a.Exists(jobs[0]) {
|
||||
t.Error("Job does not exist")
|
||||
}
|
||||
|
||||
a.CleanUp(jobs)
|
||||
|
||||
if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
|
||||
t.Error("Jobs still exist")
|
||||
}
|
||||
}
|
||||
|
||||
// func TestCompress(t *testing.T) {
|
||||
// a := setup(t)
|
||||
// if !a.Exists(jobs[0]) {
|
||||
// t.Error("Job does not exist")
|
||||
// }
|
||||
//
|
||||
// a.Compress(jobs)
|
||||
//
|
||||
// if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
|
||||
// t.Error("Jobs still exist")
|
||||
// }
|
||||
// }
|
@@ -11,14 +11,17 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
"github.com/santhosh-tekuri/jsonschema/v5"
|
||||
@@ -33,9 +36,17 @@ type FsArchive struct {
|
||||
clusters []string
|
||||
}
|
||||
|
||||
func checkFileExists(filePath string) bool {
|
||||
_, err := os.Stat(filePath)
|
||||
return !errors.Is(err, os.ErrNotExist)
|
||||
func getDirectory(
|
||||
job *schema.Job,
|
||||
rootPath string,
|
||||
) string {
|
||||
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
|
||||
|
||||
return filepath.Join(
|
||||
rootPath,
|
||||
job.Cluster,
|
||||
lvl1, lvl2,
|
||||
strconv.FormatInt(job.StartTime.Unix(), 10))
|
||||
}
|
||||
|
||||
func getPath(
|
||||
@@ -43,12 +54,8 @@ func getPath(
|
||||
rootPath string,
|
||||
file string) string {
|
||||
|
||||
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
|
||||
return filepath.Join(
|
||||
rootPath,
|
||||
job.Cluster,
|
||||
lvl1, lvl2,
|
||||
strconv.FormatInt(job.StartTime.Unix(), 10), file)
|
||||
getDirectory(job, rootPath), file)
|
||||
}
|
||||
|
||||
func loadJobMeta(filename string) (*schema.JobMeta, error) {
|
||||
@@ -74,6 +81,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
|
||||
log.Errorf("fsBackend LoadJobData()- %v", err)
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if isCompressed {
|
||||
r, err := gzip.NewReader(f)
|
||||
@@ -91,7 +99,6 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
|
||||
|
||||
return DecodeJobData(r, filename)
|
||||
} else {
|
||||
defer f.Close()
|
||||
if config.Keys.Validate {
|
||||
if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil {
|
||||
return schema.JobData{}, fmt.Errorf("validate job data: %v", err)
|
||||
@@ -147,10 +154,205 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
|
||||
return version, nil
|
||||
}
|
||||
|
||||
type clusterInfo struct {
|
||||
numJobs int
|
||||
dateFirst int64
|
||||
dateLast int64
|
||||
diskSize float64
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) Info() {
|
||||
fmt.Printf("Job archive %s\n", fsa.path)
|
||||
clusters, err := os.ReadDir(fsa.path)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading clusters failed: %s", err.Error())
|
||||
}
|
||||
|
||||
ci := make(map[string]*clusterInfo)
|
||||
|
||||
for _, cluster := range clusters {
|
||||
if !cluster.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
cc := cluster.Name()
|
||||
ci[cc] = &clusterInfo{dateFirst: time.Now().Unix()}
|
||||
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, lvl1Dir := range lvl1Dirs {
|
||||
if !lvl1Dir.IsDir() {
|
||||
continue
|
||||
}
|
||||
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, lvl2Dir := range lvl2Dirs {
|
||||
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
||||
startTimeDirs, err := os.ReadDir(dirpath)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, startTimeDir := range startTimeDirs {
|
||||
if startTimeDir.IsDir() {
|
||||
ci[cc].numJobs++
|
||||
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot parse starttime: %s", err.Error())
|
||||
}
|
||||
ci[cc].dateFirst = util.Min(ci[cc].dateFirst, startTime)
|
||||
ci[cc].dateLast = util.Max(ci[cc].dateLast, startTime)
|
||||
ci[cc].diskSize += util.DiskUsage(filepath.Join(dirpath, startTimeDir.Name()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cit := clusterInfo{dateFirst: time.Now().Unix()}
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', tabwriter.Debug)
|
||||
fmt.Fprintln(w, "cluster\t#jobs\tfrom\tto\tdu (MB)")
|
||||
for cluster, clusterInfo := range ci {
|
||||
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%.2f\n", cluster,
|
||||
clusterInfo.numJobs,
|
||||
time.Unix(clusterInfo.dateFirst, 0),
|
||||
time.Unix(clusterInfo.dateLast, 0),
|
||||
clusterInfo.diskSize)
|
||||
|
||||
cit.numJobs += clusterInfo.numJobs
|
||||
cit.dateFirst = util.Min(cit.dateFirst, clusterInfo.dateFirst)
|
||||
cit.dateLast = util.Max(cit.dateLast, clusterInfo.dateLast)
|
||||
cit.diskSize += clusterInfo.diskSize
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "TOTAL\t%d\t%s\t%s\t%.2f\n",
|
||||
cit.numJobs, time.Unix(cit.dateFirst, 0), time.Unix(cit.dateLast, 0), cit.diskSize)
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) Exists(job *schema.Job) bool {
|
||||
dir := getDirectory(job, fsa.path)
|
||||
_, err := os.Stat(dir)
|
||||
return !errors.Is(err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) Clean(before int64, after int64) {
|
||||
|
||||
if after == 0 {
|
||||
after = math.MaxInt64
|
||||
}
|
||||
|
||||
clusters, err := os.ReadDir(fsa.path)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading clusters failed: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, cluster := range clusters {
|
||||
if !cluster.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, lvl1Dir := range lvl1Dirs {
|
||||
if !lvl1Dir.IsDir() {
|
||||
continue
|
||||
}
|
||||
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, lvl2Dir := range lvl2Dirs {
|
||||
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
||||
startTimeDirs, err := os.ReadDir(dirpath)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, startTimeDir := range startTimeDirs {
|
||||
if startTimeDir.IsDir() {
|
||||
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot parse starttime: %s", err.Error())
|
||||
}
|
||||
|
||||
if startTime < before || startTime > after {
|
||||
if err := os.RemoveAll(filepath.Join(dirpath, startTimeDir.Name())); err != nil {
|
||||
log.Errorf("JobArchive Cleanup() error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if util.GetFilecount(dirpath) == 0 {
|
||||
if err := os.Remove(dirpath); err != nil {
|
||||
log.Errorf("JobArchive Clean() error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) Move(jobs []*schema.Job, path string) {
|
||||
for _, job := range jobs {
|
||||
source := getDirectory(job, fsa.path)
|
||||
target := getDirectory(job, path)
|
||||
|
||||
if err := os.MkdirAll(filepath.Clean(filepath.Join(target, "..")), 0777); err != nil {
|
||||
log.Errorf("JobArchive Move MkDir error: %v", err)
|
||||
}
|
||||
if err := os.Rename(source, target); err != nil {
|
||||
log.Errorf("JobArchive Move() error: %v", err)
|
||||
}
|
||||
|
||||
parent := filepath.Clean(filepath.Join(source, ".."))
|
||||
if util.GetFilecount(parent) == 0 {
|
||||
if err := os.Remove(parent); err != nil {
|
||||
log.Errorf("JobArchive Move() error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) CleanUp(jobs []*schema.Job) {
|
||||
for _, job := range jobs {
|
||||
dir := getDirectory(job, fsa.path)
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
log.Errorf("JobArchive Cleanup() error: %v", err)
|
||||
}
|
||||
|
||||
parent := filepath.Clean(filepath.Join(dir, ".."))
|
||||
if util.GetFilecount(parent) == 0 {
|
||||
if err := os.Remove(parent); err != nil {
|
||||
log.Errorf("JobArchive Cleanup() error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) Compress(jobs []*schema.Job) {
|
||||
for _, job := range jobs {
|
||||
fileIn := getPath(job, fsa.path, "data.json")
|
||||
if !util.CheckFileExists(fileIn) && util.GetFilesize(fileIn) > 2000 {
|
||||
util.CompressFile(fileIn, getPath(job, fsa.path, "data.json.gz"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
|
||||
var isCompressed bool = true
|
||||
filename := getPath(job, fsa.path, "data.json.gz")
|
||||
if !checkFileExists(filename) {
|
||||
|
||||
if !util.CheckFileExists(filename) {
|
||||
filename = getPath(job, fsa.path, "data.json")
|
||||
isCompressed = false
|
||||
}
|
||||
@@ -159,7 +361,6 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
|
||||
|
||||
filename := getPath(job, fsa.path, "meta.json")
|
||||
return loadJobMeta(filename)
|
||||
}
|
||||
@@ -226,7 +427,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
||||
var isCompressed bool = true
|
||||
filename := filepath.Join(dirpath, startTimeDir.Name(), "data.json.gz")
|
||||
|
||||
if !checkFileExists(filename) {
|
||||
if !util.CheckFileExists(filename) {
|
||||
filename = filepath.Join(dirpath, startTimeDir.Name(), "data.json")
|
||||
isCompressed = false
|
||||
}
|
||||
|
@@ -7,20 +7,17 @@ package archive
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.Init("info", true)
|
||||
}
|
||||
|
||||
func TestInitEmptyPath(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("{\"kind\":\"../../test/archive\"}"))
|
||||
_, err := fsa.Init(json.RawMessage("{\"kind\":\"testdata/archive\"}"))
|
||||
if err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -28,14 +25,14 @@ func TestInitEmptyPath(t *testing.T) {
|
||||
|
||||
func TestInitNoJson(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("\"path\":\"../../test/archive\"}"))
|
||||
_, err := fsa.Init(json.RawMessage("\"path\":\"testdata/archive\"}"))
|
||||
if err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
func TestInitNotExists(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/job-archive\"}"))
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/job-archive\"}"))
|
||||
if err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -43,11 +40,11 @@ func TestInitNotExists(t *testing.T) {
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
version, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
|
||||
version, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fsa.path != "../../test/archive" {
|
||||
if fsa.path != "testdata/archive" {
|
||||
t.Fail()
|
||||
}
|
||||
if version != 1 {
|
||||
@@ -60,12 +57,12 @@ func TestInit(t *testing.T) {
|
||||
|
||||
func TestLoadJobMetaInternal(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
job, err := loadJobMeta("../../test/archive/emmy/1404/397/1609300556/meta.json")
|
||||
job, err := loadJobMeta("testdata/archive/emmy/1404/397/1609300556/meta.json")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -83,7 +80,7 @@ func TestLoadJobMetaInternal(t *testing.T) {
|
||||
|
||||
func TestLoadJobMeta(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -111,7 +108,7 @@ func TestLoadJobMeta(t *testing.T) {
|
||||
|
||||
func TestLoadJobData(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\": \"testdata/archive\"}"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -126,8 +123,8 @@ func TestLoadJobData(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for name, scopes := range data {
|
||||
fmt.Printf("Metric name: %s\n", name)
|
||||
for _, scopes := range data {
|
||||
// fmt.Printf("Metric name: %s\n", name)
|
||||
|
||||
if _, exists := scopes[schema.MetricScopeNode]; !exists {
|
||||
t.Fail()
|
||||
@@ -135,9 +132,54 @@ func TestLoadJobData(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLoadJobData(b *testing.B) {
|
||||
|
||||
tmpdir := b.TempDir()
|
||||
jobarchive := filepath.Join(tmpdir, "job-archive")
|
||||
util.CopyDir("./testdata/archive/", jobarchive)
|
||||
archiveCfg := fmt.Sprintf("{\"path\": \"%s\"}", jobarchive)
|
||||
|
||||
var fsa FsArchive
|
||||
fsa.Init(json.RawMessage(archiveCfg))
|
||||
|
||||
jobIn := schema.Job{BaseJob: schema.JobDefaults}
|
||||
jobIn.StartTime = time.Unix(1608923076, 0)
|
||||
jobIn.JobID = 1403244
|
||||
jobIn.Cluster = "emmy"
|
||||
|
||||
util.UncompressFile(filepath.Join(jobarchive, "emmy/1403/244/1608923076/data.json.gz"),
|
||||
filepath.Join(jobarchive, "emmy/1403/244/1608923076/data.json"))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
fsa.LoadJobData(&jobIn)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLoadJobDataCompressed(b *testing.B) {
|
||||
|
||||
tmpdir := b.TempDir()
|
||||
jobarchive := filepath.Join(tmpdir, "job-archive")
|
||||
util.CopyDir("./testdata/archive/", jobarchive)
|
||||
archiveCfg := fmt.Sprintf("{\"path\": \"%s\"}", jobarchive)
|
||||
|
||||
var fsa FsArchive
|
||||
fsa.Init(json.RawMessage(archiveCfg))
|
||||
|
||||
jobIn := schema.Job{BaseJob: schema.JobDefaults}
|
||||
jobIn.StartTime = time.Unix(1608923076, 0)
|
||||
jobIn.JobID = 1403244
|
||||
jobIn.Cluster = "emmy"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
fsa.LoadJobData(&jobIn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadCluster(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -154,7 +196,7 @@ func TestLoadCluster(t *testing.T) {
|
||||
|
||||
func TestIter(t *testing.T) {
|
||||
var fsa FsArchive
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
|
||||
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
BIN
pkg/archive/testdata/archive/emmy/1403/244/1608923076/data.json.gz
vendored
Normal file
BIN
pkg/archive/testdata/archive/emmy/1403/244/1608923076/data.json.gz
vendored
Normal file
Binary file not shown.
194
pkg/archive/testdata/archive/emmy/1403/244/1608923076/meta.json
vendored
Normal file
194
pkg/archive/testdata/archive/emmy/1403/244/1608923076/meta.json
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
{
|
||||
"exclusive": 1,
|
||||
"jobId": 1403244,
|
||||
"statistics": {
|
||||
"mem_bw": {
|
||||
"avg": 63.57,
|
||||
"min": 0,
|
||||
"unit": {
|
||||
"base": "B/s",
|
||||
"prefix": "G"
|
||||
},
|
||||
"max": 74.5
|
||||
},
|
||||
"rapl_power": {
|
||||
"avg": 228.07,
|
||||
"min": 0,
|
||||
"unit": {
|
||||
"base": "W"
|
||||
},
|
||||
"max": 258.56
|
||||
},
|
||||
"ipc": {
|
||||
"unit": {
|
||||
"base": "IPC"
|
||||
},
|
||||
"max": 0.510204081632653,
|
||||
"avg": 1.53846153846154,
|
||||
"min": 0.0
|
||||
},
|
||||
"clock": {
|
||||
"min": 1380.32,
|
||||
"avg": 2599.39,
|
||||
"unit": {
|
||||
"base": "Hz",
|
||||
"prefix": "M"
|
||||
},
|
||||
"max": 2634.46
|
||||
},
|
||||
"cpu_load": {
|
||||
"avg": 18.4,
|
||||
"min": 0,
|
||||
"max": 23.58,
|
||||
"unit": {
|
||||
"base": "load"
|
||||
}
|
||||
},
|
||||
"flops_any": {
|
||||
"max": 404.62,
|
||||
"unit": {
|
||||
"base": "F/s",
|
||||
"prefix": "G"
|
||||
},
|
||||
"avg": 225.59,
|
||||
"min": 0
|
||||
},
|
||||
"flops_dp": {
|
||||
"max": 0.24,
|
||||
"unit": {
|
||||
"base": "F/s",
|
||||
"prefix": "G"
|
||||
},
|
||||
"min": 0,
|
||||
"avg": 0
|
||||
},
|
||||
"mem_used": {
|
||||
"min": 1.55,
|
||||
"avg": 27.84,
|
||||
"unit": {
|
||||
"base": "B",
|
||||
"prefix": "G"
|
||||
},
|
||||
"max": 37.5
|
||||
},
|
||||
"flops_sp": {
|
||||
"min": 0,
|
||||
"avg": 225.59,
|
||||
"max": 404.62,
|
||||
"unit": {
|
||||
"base": "F/s",
|
||||
"prefix": "G"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"hostname": "e0102"
|
||||
},
|
||||
{
|
||||
"hostname": "e0103"
|
||||
},
|
||||
{
|
||||
"hostname": "e0105"
|
||||
},
|
||||
{
|
||||
"hostname": "e0106"
|
||||
},
|
||||
{
|
||||
"hostname": "e0107"
|
||||
},
|
||||
{
|
||||
"hostname": "e0108"
|
||||
},
|
||||
{
|
||||
"hostname": "e0114"
|
||||
},
|
||||
{
|
||||
"hostname": "e0320"
|
||||
},
|
||||
{
|
||||
"hostname": "e0321"
|
||||
},
|
||||
{
|
||||
"hostname": "e0325"
|
||||
},
|
||||
{
|
||||
"hostname": "e0404"
|
||||
},
|
||||
{
|
||||
"hostname": "e0415"
|
||||
},
|
||||
{
|
||||
"hostname": "e0433"
|
||||
},
|
||||
{
|
||||
"hostname": "e0437"
|
||||
},
|
||||
{
|
||||
"hostname": "e0439"
|
||||
},
|
||||
{
|
||||
"hostname": "e0501"
|
||||
},
|
||||
{
|
||||
"hostname": "e0503"
|
||||
},
|
||||
{
|
||||
"hostname": "e0505"
|
||||
},
|
||||
{
|
||||
"hostname": "e0506"
|
||||
},
|
||||
{
|
||||
"hostname": "e0512"
|
||||
},
|
||||
{
|
||||
"hostname": "e0513"
|
||||
},
|
||||
{
|
||||
"hostname": "e0514"
|
||||
},
|
||||
{
|
||||
"hostname": "e0653"
|
||||
},
|
||||
{
|
||||
"hostname": "e0701"
|
||||
},
|
||||
{
|
||||
"hostname": "e0716"
|
||||
},
|
||||
{
|
||||
"hostname": "e0727"
|
||||
},
|
||||
{
|
||||
"hostname": "e0728"
|
||||
},
|
||||
{
|
||||
"hostname": "e0925"
|
||||
},
|
||||
{
|
||||
"hostname": "e0926"
|
||||
},
|
||||
{
|
||||
"hostname": "e0929"
|
||||
},
|
||||
{
|
||||
"hostname": "e0934"
|
||||
},
|
||||
{
|
||||
"hostname": "e0951"
|
||||
}
|
||||
],
|
||||
"walltime": 10,
|
||||
"jobState": "completed",
|
||||
"cluster": "emmy",
|
||||
"subCluster": "haswell",
|
||||
"stopTime": 1609009562,
|
||||
"user": "emmyUser6",
|
||||
"startTime": 1608923076,
|
||||
"partition": "work",
|
||||
"tags": [],
|
||||
"project": "no project",
|
||||
"numNodes": 32,
|
||||
"duration": 86486
|
||||
}
|
BIN
pkg/archive/testdata/archive/emmy/1404/397/1609300556/data.json.gz
vendored
Normal file
BIN
pkg/archive/testdata/archive/emmy/1404/397/1609300556/data.json.gz
vendored
Normal file
Binary file not shown.
194
pkg/archive/testdata/archive/emmy/1404/397/1609300556/meta.json
vendored
Normal file
194
pkg/archive/testdata/archive/emmy/1404/397/1609300556/meta.json
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
{
|
||||
"stopTime": 1609387081,
|
||||
"resources": [
|
||||
{
|
||||
"hostname": "e0151"
|
||||
},
|
||||
{
|
||||
"hostname": "e0152"
|
||||
},
|
||||
{
|
||||
"hostname": "e0153"
|
||||
},
|
||||
{
|
||||
"hostname": "e0232"
|
||||
},
|
||||
{
|
||||
"hostname": "e0303"
|
||||
},
|
||||
{
|
||||
"hostname": "e0314"
|
||||
},
|
||||
{
|
||||
"hostname": "e0344"
|
||||
},
|
||||
{
|
||||
"hostname": "e0345"
|
||||
},
|
||||
{
|
||||
"hostname": "e0348"
|
||||
},
|
||||
{
|
||||
"hostname": "e0507"
|
||||
},
|
||||
{
|
||||
"hostname": "e0518"
|
||||
},
|
||||
{
|
||||
"hostname": "e0520"
|
||||
},
|
||||
{
|
||||
"hostname": "e0522"
|
||||
},
|
||||
{
|
||||
"hostname": "e0526"
|
||||
},
|
||||
{
|
||||
"hostname": "e0527"
|
||||
},
|
||||
{
|
||||
"hostname": "e0528"
|
||||
},
|
||||
{
|
||||
"hostname": "e0530"
|
||||
},
|
||||
{
|
||||
"hostname": "e0551"
|
||||
},
|
||||
{
|
||||
"hostname": "e0604"
|
||||
},
|
||||
{
|
||||
"hostname": "e0613"
|
||||
},
|
||||
{
|
||||
"hostname": "e0634"
|
||||
},
|
||||
{
|
||||
"hostname": "e0639"
|
||||
},
|
||||
{
|
||||
"hostname": "e0640"
|
||||
},
|
||||
{
|
||||
"hostname": "e0651"
|
||||
},
|
||||
{
|
||||
"hostname": "e0653"
|
||||
},
|
||||
{
|
||||
"hostname": "e0701"
|
||||
},
|
||||
{
|
||||
"hostname": "e0704"
|
||||
},
|
||||
{
|
||||
"hostname": "e0751"
|
||||
},
|
||||
{
|
||||
"hostname": "e0809"
|
||||
},
|
||||
{
|
||||
"hostname": "e0814"
|
||||
},
|
||||
{
|
||||
"hostname": "e0819"
|
||||
},
|
||||
{
|
||||
"hostname": "e0908"
|
||||
}
|
||||
],
|
||||
"walltime": 10,
|
||||
"cluster": "emmy",
|
||||
"subCluster": "haswell",
|
||||
"jobState": "completed",
|
||||
"statistics": {
|
||||
"clock": {
|
||||
"max": 2634.9,
|
||||
"unit": {
|
||||
"base": "Hz",
|
||||
"prefix": "M"
|
||||
},
|
||||
"min": 0,
|
||||
"avg": 2597.8
|
||||
},
|
||||
"cpu_load": {
|
||||
"max": 27.41,
|
||||
"min": 0,
|
||||
"avg": 18.39,
|
||||
"unit": {
|
||||
"base": "load"
|
||||
}
|
||||
},
|
||||
"mem_bw": {
|
||||
"min": 0,
|
||||
"avg": 63.23,
|
||||
"unit": {
|
||||
"base": "B/s",
|
||||
"prefix": "G"
|
||||
},
|
||||
"max": 75.06
|
||||
},
|
||||
"ipc": {
|
||||
"min": 0.0,
|
||||
"avg": 1.53846153846154,
|
||||
"unit": {
|
||||
"base": "IPC"
|
||||
},
|
||||
"max": 0.490196078431373
|
||||
},
|
||||
"rapl_power": {
|
||||
"min": 0,
|
||||
"avg": 227.32,
|
||||
"unit": {
|
||||
"base": "W"
|
||||
},
|
||||
"max": 256.22
|
||||
},
|
||||
"mem_used": {
|
||||
"min": 1.5,
|
||||
"avg": 27.77,
|
||||
"unit": {
|
||||
"base": "B",
|
||||
"prefix": "G"
|
||||
},
|
||||
"max": 37.43
|
||||
},
|
||||
"flops_sp": {
|
||||
"unit": {
|
||||
"base": "F/s",
|
||||
"prefix": "G"
|
||||
},
|
||||
"max": 413.21,
|
||||
"min": 0,
|
||||
"avg": 224.41
|
||||
},
|
||||
"flops_dp": {
|
||||
"max": 5.72,
|
||||
"unit": {
|
||||
"base": "F/s",
|
||||
"prefix": "G"
|
||||
},
|
||||
"min": 0,
|
||||
"avg": 0
|
||||
},
|
||||
"flops_any": {
|
||||
"min": 0,
|
||||
"avg": 224.42,
|
||||
"max": 413.21,
|
||||
"unit": {
|
||||
"base": "F/s",
|
||||
"prefix": "G"
|
||||
}
|
||||
}
|
||||
},
|
||||
"exclusive": 1,
|
||||
"jobId": 1404397,
|
||||
"tags": [],
|
||||
"partition": "work",
|
||||
"project": "no project",
|
||||
"user": "emmyUser6",
|
||||
"startTime": 1609300556,
|
||||
"duration": 86525,
|
||||
"numNodes": 32
|
||||
}
|
974
pkg/archive/testdata/archive/emmy/cluster.json
vendored
Normal file
974
pkg/archive/testdata/archive/emmy/cluster.json
vendored
Normal file
@@ -0,0 +1,974 @@
|
||||
{
|
||||
"name": "emmy",
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "haswell",
|
||||
"processorType": "Intel Xeon E3-1240 v3",
|
||||
"socketsPerNode": 1,
|
||||
"coresPerSocket": 4,
|
||||
"threadsPerCore": 1,
|
||||
"flopRateScalar": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"value": 14
|
||||
},
|
||||
"flopRateSimd": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"value": 112
|
||||
},
|
||||
"memoryBandwidth": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "B/s"
|
||||
},
|
||||
"value": 24
|
||||
},
|
||||
"nodes": "w11[27-45,49-63,69-72]",
|
||||
"topology": {
|
||||
"node": [
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"socket": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
],
|
||||
"memoryDomain": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
],
|
||||
"core": [
|
||||
[
|
||||
0
|
||||
],
|
||||
[
|
||||
1
|
||||
],
|
||||
[
|
||||
2
|
||||
],
|
||||
[
|
||||
3
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "skylake",
|
||||
"processorType": "Intel Xeon E3-1240 v5 ",
|
||||
"socketsPerNode": 1,
|
||||
"coresPerSocket": 4,
|
||||
"threadsPerCore": 1,
|
||||
"flopRateScalar": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"value": 14
|
||||
},
|
||||
"flopRateSimd": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"value": 112
|
||||
},
|
||||
"memoryBandwidth": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "B/s"
|
||||
},
|
||||
"value": 64
|
||||
},
|
||||
"nodes": "w12[01-08],w13[01-31,33-56]",
|
||||
"topology": {
|
||||
"node": [
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"socket": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
],
|
||||
"memoryDomain": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
],
|
||||
"core": [
|
||||
[
|
||||
0
|
||||
],
|
||||
[
|
||||
1
|
||||
],
|
||||
[
|
||||
2
|
||||
],
|
||||
[
|
||||
3
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "kabylake",
|
||||
"processorType": "Intel Xeon E3-1240 v6",
|
||||
"socketsPerNode": 1,
|
||||
"coresPerSocket": 4,
|
||||
"threadsPerCore": 1,
|
||||
"flopRateScalar": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"value": 14
|
||||
},
|
||||
"flopRateSimd": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"value": 112
|
||||
},
|
||||
"memoryBandwidth": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "B/s"
|
||||
},
|
||||
"value": 24
|
||||
},
|
||||
"nodes": "w14[01-56],w15[01-05,07-56]",
|
||||
"topology": {
|
||||
"node": [
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"socket": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
],
|
||||
"memoryDomain": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
],
|
||||
"core": [
|
||||
[
|
||||
0
|
||||
],
|
||||
[
|
||||
1
|
||||
],
|
||||
[
|
||||
2
|
||||
],
|
||||
[
|
||||
3
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "icelake",
|
||||
"processorType": "Intel Xeon Gold 6326",
|
||||
"socketsPerNode": 2,
|
||||
"coresPerSocket": 16,
|
||||
"threadsPerCore": 1,
|
||||
"flopRateScalar": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"value": 432
|
||||
},
|
||||
"flopRateSimd": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"value": 9216
|
||||
},
|
||||
"memoryBandwidth": {
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "B/s"
|
||||
},
|
||||
"value": 350
|
||||
},
|
||||
"nodes": "w22[01-35],w23[01-35]",
|
||||
"topology": {
|
||||
"node": [
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25,
|
||||
26,
|
||||
27,
|
||||
28,
|
||||
29,
|
||||
30,
|
||||
31,
|
||||
32,
|
||||
33,
|
||||
34,
|
||||
35,
|
||||
36,
|
||||
37,
|
||||
38,
|
||||
39,
|
||||
40,
|
||||
41,
|
||||
42,
|
||||
43,
|
||||
44,
|
||||
45,
|
||||
46,
|
||||
47,
|
||||
48,
|
||||
49,
|
||||
50,
|
||||
51,
|
||||
52,
|
||||
53,
|
||||
54,
|
||||
55,
|
||||
56,
|
||||
57,
|
||||
58,
|
||||
59,
|
||||
60,
|
||||
61,
|
||||
62,
|
||||
63,
|
||||
64,
|
||||
65,
|
||||
66,
|
||||
67,
|
||||
68,
|
||||
69,
|
||||
70,
|
||||
71
|
||||
],
|
||||
"socket": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25,
|
||||
26,
|
||||
27,
|
||||
28,
|
||||
29,
|
||||
30,
|
||||
31,
|
||||
32,
|
||||
33,
|
||||
34,
|
||||
35
|
||||
],
|
||||
[
|
||||
36,
|
||||
37,
|
||||
38,
|
||||
39,
|
||||
40,
|
||||
41,
|
||||
42,
|
||||
43,
|
||||
44,
|
||||
45,
|
||||
46,
|
||||
47,
|
||||
48,
|
||||
49,
|
||||
50,
|
||||
51,
|
||||
52,
|
||||
53,
|
||||
54,
|
||||
55,
|
||||
56,
|
||||
57,
|
||||
58,
|
||||
59,
|
||||
60,
|
||||
61,
|
||||
62,
|
||||
63,
|
||||
64,
|
||||
65,
|
||||
66,
|
||||
67,
|
||||
68,
|
||||
69,
|
||||
70,
|
||||
71
|
||||
]
|
||||
],
|
||||
"memoryDomain": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17
|
||||
],
|
||||
[
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25,
|
||||
26,
|
||||
27,
|
||||
28,
|
||||
29,
|
||||
30,
|
||||
31,
|
||||
32,
|
||||
33,
|
||||
34,
|
||||
35
|
||||
],
|
||||
[
|
||||
36,
|
||||
37,
|
||||
38,
|
||||
39,
|
||||
40,
|
||||
41,
|
||||
42,
|
||||
43,
|
||||
44,
|
||||
45,
|
||||
46,
|
||||
47,
|
||||
48,
|
||||
49,
|
||||
50,
|
||||
51,
|
||||
52,
|
||||
53
|
||||
],
|
||||
[
|
||||
54,
|
||||
55,
|
||||
56,
|
||||
57,
|
||||
58,
|
||||
59,
|
||||
60,
|
||||
61,
|
||||
62,
|
||||
63,
|
||||
64,
|
||||
65,
|
||||
66,
|
||||
67,
|
||||
68,
|
||||
69,
|
||||
70,
|
||||
71
|
||||
]
|
||||
],
|
||||
"core": [
|
||||
[
|
||||
0
|
||||
],
|
||||
[
|
||||
1
|
||||
],
|
||||
[
|
||||
2
|
||||
],
|
||||
[
|
||||
3
|
||||
],
|
||||
[
|
||||
4
|
||||
],
|
||||
[
|
||||
5
|
||||
],
|
||||
[
|
||||
6
|
||||
],
|
||||
[
|
||||
7
|
||||
],
|
||||
[
|
||||
8
|
||||
],
|
||||
[
|
||||
9
|
||||
],
|
||||
[
|
||||
10
|
||||
],
|
||||
[
|
||||
11
|
||||
],
|
||||
[
|
||||
12
|
||||
],
|
||||
[
|
||||
13
|
||||
],
|
||||
[
|
||||
14
|
||||
],
|
||||
[
|
||||
15
|
||||
],
|
||||
[
|
||||
16
|
||||
],
|
||||
[
|
||||
17
|
||||
],
|
||||
[
|
||||
18
|
||||
],
|
||||
[
|
||||
19
|
||||
],
|
||||
[
|
||||
20
|
||||
],
|
||||
[
|
||||
21
|
||||
],
|
||||
[
|
||||
22
|
||||
],
|
||||
[
|
||||
23
|
||||
],
|
||||
[
|
||||
24
|
||||
],
|
||||
[
|
||||
25
|
||||
],
|
||||
[
|
||||
26
|
||||
],
|
||||
[
|
||||
27
|
||||
],
|
||||
[
|
||||
28
|
||||
],
|
||||
[
|
||||
29
|
||||
],
|
||||
[
|
||||
30
|
||||
],
|
||||
[
|
||||
31
|
||||
],
|
||||
[
|
||||
32
|
||||
],
|
||||
[
|
||||
33
|
||||
],
|
||||
[
|
||||
34
|
||||
],
|
||||
[
|
||||
35
|
||||
],
|
||||
[
|
||||
36
|
||||
],
|
||||
[
|
||||
37
|
||||
],
|
||||
[
|
||||
38
|
||||
],
|
||||
[
|
||||
39
|
||||
],
|
||||
[
|
||||
40
|
||||
],
|
||||
[
|
||||
41
|
||||
],
|
||||
[
|
||||
42
|
||||
],
|
||||
[
|
||||
43
|
||||
],
|
||||
[
|
||||
44
|
||||
],
|
||||
[
|
||||
45
|
||||
],
|
||||
[
|
||||
46
|
||||
],
|
||||
[
|
||||
47
|
||||
],
|
||||
[
|
||||
48
|
||||
],
|
||||
[
|
||||
49
|
||||
],
|
||||
[
|
||||
50
|
||||
],
|
||||
[
|
||||
51
|
||||
],
|
||||
[
|
||||
52
|
||||
],
|
||||
[
|
||||
53
|
||||
],
|
||||
[
|
||||
54
|
||||
],
|
||||
[
|
||||
55
|
||||
],
|
||||
[
|
||||
56
|
||||
],
|
||||
[
|
||||
57
|
||||
],
|
||||
[
|
||||
58
|
||||
],
|
||||
[
|
||||
59
|
||||
],
|
||||
[
|
||||
60
|
||||
],
|
||||
[
|
||||
61
|
||||
],
|
||||
[
|
||||
62
|
||||
],
|
||||
[
|
||||
63
|
||||
],
|
||||
[
|
||||
64
|
||||
],
|
||||
[
|
||||
65
|
||||
],
|
||||
[
|
||||
66
|
||||
],
|
||||
[
|
||||
67
|
||||
],
|
||||
[
|
||||
68
|
||||
],
|
||||
[
|
||||
69
|
||||
],
|
||||
[
|
||||
70
|
||||
],
|
||||
[
|
||||
71
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"metricConfig": [
|
||||
{
|
||||
"name": "cpu_load",
|
||||
"scope": "node",
|
||||
"unit": {
|
||||
"base": ""
|
||||
},
|
||||
"aggregation": "avg",
|
||||
"timestep": 60,
|
||||
"peak": 4,
|
||||
"normal": 4,
|
||||
"caution": 4,
|
||||
"alert": 1,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "icelake",
|
||||
"peak": 32,
|
||||
"normal": 32,
|
||||
"caution": 16,
|
||||
"alert": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "cpu_user",
|
||||
"scope": "hwthread",
|
||||
"unit": {
|
||||
"base": ""
|
||||
},
|
||||
"aggregation": "avg",
|
||||
"timestep": 60,
|
||||
"peak": 100,
|
||||
"normal": 50,
|
||||
"caution": 20,
|
||||
"alert": 10,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "haswell",
|
||||
"remove": true
|
||||
},
|
||||
{
|
||||
"name": "skylake",
|
||||
"remove": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "ipc",
|
||||
"scope": "hwthread",
|
||||
"unit": {
|
||||
"base": "IPC"
|
||||
},
|
||||
"aggregation": "avg",
|
||||
"timestep": 60,
|
||||
"peak": 4,
|
||||
"normal": 2,
|
||||
"caution": 1,
|
||||
"alert": 0.25
|
||||
},
|
||||
{
|
||||
"name": "mem_used",
|
||||
"scope": "node",
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "B"
|
||||
},
|
||||
"aggregation": "sum",
|
||||
"timestep": 60,
|
||||
"peak": 32,
|
||||
"normal": 16,
|
||||
"caution": 28,
|
||||
"alert": 30,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "icelake",
|
||||
"peak": 256,
|
||||
"normal": 128,
|
||||
"caution": 245,
|
||||
"alert": 255
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "flops_any",
|
||||
"scope": "hwthread",
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"aggregation": "sum",
|
||||
"timestep": 60,
|
||||
"peak": 112,
|
||||
"normal": 50,
|
||||
"caution": 20,
|
||||
"alert": 10,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "icelake",
|
||||
"peak": 9216,
|
||||
"normal": 432,
|
||||
"caution": 100,
|
||||
"alert": 50
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "flops_dp",
|
||||
"scope": "hwthread",
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"aggregation": "sum",
|
||||
"timestep": 60,
|
||||
"peak": 56,
|
||||
"normal": 30,
|
||||
"caution": 15,
|
||||
"alert": 5,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "icelake",
|
||||
"peak": 4108,
|
||||
"normal": 220,
|
||||
"caution": 60,
|
||||
"alert": 30
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "flops_sp",
|
||||
"scope": "hwthread",
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "F/s"
|
||||
},
|
||||
"aggregation": "sum",
|
||||
"timestep": 60,
|
||||
"peak": 112,
|
||||
"normal": 50,
|
||||
"caution": 20,
|
||||
"alert": 10,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "icelake",
|
||||
"peak": 9216,
|
||||
"normal": 432,
|
||||
"caution": 100,
|
||||
"alert": 50
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "mem_bw",
|
||||
"scope": "socket",
|
||||
"unit": {
|
||||
"prefix": "G",
|
||||
"base": "B/s"
|
||||
},
|
||||
"aggregation": "sum",
|
||||
"timestep": 60,
|
||||
"peak": 24,
|
||||
"normal": 10,
|
||||
"caution": 5,
|
||||
"alert": 2,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "icelake",
|
||||
"peak": 350,
|
||||
"normal": 100,
|
||||
"caution": 50,
|
||||
"alert": 25
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "clock",
|
||||
"scope": "hwthread",
|
||||
"unit": {
|
||||
"prefix": "M",
|
||||
"base": "Hz"
|
||||
},
|
||||
"aggregation": "avg",
|
||||
"timestep": 60,
|
||||
"peak": 2900,
|
||||
"normal": 2900,
|
||||
"caution": 1500,
|
||||
"alert": 1200,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "haswell",
|
||||
"peak": 3500,
|
||||
"normal": 3500,
|
||||
"caution": 1500,
|
||||
"alert": 1200
|
||||
},
|
||||
{
|
||||
"name": "skylake",
|
||||
"peak": 3500,
|
||||
"normal": 3500,
|
||||
"caution": 1500,
|
||||
"alert": 1200
|
||||
},
|
||||
{
|
||||
"name": "kabylake",
|
||||
"peak": 3700,
|
||||
"normal": 3700,
|
||||
"caution": 1500,
|
||||
"alert": 1200
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "vectorization_ratio",
|
||||
"scope": "hwthread",
|
||||
"unit": {
|
||||
"base": ""
|
||||
},
|
||||
"aggregation": "avg",
|
||||
"timestep": 60,
|
||||
"peak": 100,
|
||||
"normal": 60,
|
||||
"caution": 40,
|
||||
"alert": 10
|
||||
},
|
||||
{
|
||||
"name": "nfs4_read",
|
||||
"scope": "node",
|
||||
"unit": {
|
||||
"prefix": "M",
|
||||
"base": "B/s"
|
||||
},
|
||||
"aggregation": "sum",
|
||||
"timestep": 60,
|
||||
"peak": 6,
|
||||
"normal": 4,
|
||||
"caution": 2,
|
||||
"alert": 1,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "haswell",
|
||||
"remove": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "nfs4_write",
|
||||
"scope": "node",
|
||||
"unit": {
|
||||
"prefix": "M",
|
||||
"base": "B/s"
|
||||
},
|
||||
"aggregation": "sum",
|
||||
"timestep": 60,
|
||||
"peak": 6,
|
||||
"normal": 4,
|
||||
"caution": 2,
|
||||
"alert": 1,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "haswell",
|
||||
"remove": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "nfs4_total",
|
||||
"scope": "node",
|
||||
"unit": {
|
||||
"prefix": "M",
|
||||
"base": "B/s"
|
||||
},
|
||||
"aggregation": "sum",
|
||||
"timestep": 60,
|
||||
"peak": 6,
|
||||
"normal": 4,
|
||||
"caution": 2,
|
||||
"alert": 1,
|
||||
"subClusters": [
|
||||
{
|
||||
"name": "haswell",
|
||||
"remove": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
1
pkg/archive/testdata/archive/version.txt
vendored
Normal file
1
pkg/archive/testdata/archive/version.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
1
|
@@ -34,11 +34,11 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
DebugLog *log.Logger
|
||||
InfoLog *log.Logger
|
||||
WarnLog *log.Logger
|
||||
ErrLog *log.Logger
|
||||
CritLog *log.Logger
|
||||
DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
|
||||
InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
|
||||
WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
|
||||
ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
|
||||
CritLog *log.Logger = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
|
||||
)
|
||||
|
||||
/* CONFIG */
|
||||
@@ -70,12 +70,6 @@ func Init(lvl string, logdate bool) {
|
||||
WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
|
||||
ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
|
||||
CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
|
||||
} else {
|
||||
DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
|
||||
InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
|
||||
WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
|
||||
ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
|
||||
CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -57,6 +57,13 @@ type ClusterConfig struct {
|
||||
MetricDataRepository json.RawMessage `json:"metricDataRepository"`
|
||||
}
|
||||
|
||||
type Retention struct {
|
||||
Age int `json:"age"`
|
||||
IncludeDB bool `json:"includeDB"`
|
||||
Policy string `json:"policy"`
|
||||
Location string `json:"location"`
|
||||
}
|
||||
|
||||
// Format of the configuration (file). See below for the defaults.
|
||||
type ProgramConfig struct {
|
||||
// Address where the http (or https) server will listen on (for example: 'localhost:80').
|
||||
|
@@ -11,8 +11,9 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Non-Swaggered Comment: BaseJob
|
||||
// Non-Swaggered Comment: Common subset of Job and JobMeta. Use one of those, not this type directly.
|
||||
// BaseJob is the common part of the job metadata structs
|
||||
//
|
||||
// Common subset of Job and JobMeta. Use one of those, not this type directly.
|
||||
|
||||
type BaseJob struct {
|
||||
// The unique identifier of a job
|
||||
@@ -21,17 +22,17 @@ type BaseJob struct {
|
||||
Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project
|
||||
Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster
|
||||
SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster
|
||||
Partition *string `json:"partition,omitempty" db:"partition" example:"main"` // The Slurm partition to which the job was submitted
|
||||
ArrayJobId *int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` // The unique identifier of an array job
|
||||
Partition string `json:"partition,omitempty" db:"partition" example:"main"` // The Slurm partition to which the job was submitted
|
||||
ArrayJobId int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` // The unique identifier of an array job
|
||||
NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0)
|
||||
NumHWThreads *int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0)
|
||||
NumAcc *int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0)
|
||||
NumHWThreads int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0)
|
||||
NumAcc int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0)
|
||||
Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user
|
||||
MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull
|
||||
SMT *int32 `json:"smt,omitempty" db:"smt" example:"4"` // SMT threads used by job
|
||||
SMT int32 `json:"smt,omitempty" db:"smt" example:"4"` // SMT threads used by job
|
||||
State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job
|
||||
Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0)
|
||||
Walltime *int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0)
|
||||
Walltime int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0)
|
||||
Tags []*Tag `json:"tags,omitempty"` // List of tags
|
||||
RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes]
|
||||
Resources []*Resource `json:"resources"` // Resources used by job
|
||||
@@ -40,9 +41,10 @@ type BaseJob struct {
|
||||
ConcurrentJobs JobLinkResultList `json:"concurrentJobs"`
|
||||
}
|
||||
|
||||
// Non-Swaggered Comment: Job
|
||||
// Non-Swaggered Comment: This type is used as the GraphQL interface and using sqlx as a table row.
|
||||
|
||||
// Job struct type
|
||||
//
|
||||
// This type is used as the GraphQL interface and using sqlx as a table row.
|
||||
//
|
||||
// Job model
|
||||
// @Description Information of a HPC job.
|
||||
type Job struct {
|
||||
@@ -61,6 +63,17 @@ type Job struct {
|
||||
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
|
||||
}
|
||||
|
||||
// JobMeta struct type
|
||||
//
|
||||
// When reading from the database or sending data via GraphQL, the start time
|
||||
// can be in the much more convenient time.Time type. In the `meta.json`
|
||||
// files, the start time is encoded as a unix epoch timestamp. This is why
|
||||
// there is this struct, which contains all fields from the regular job
|
||||
// struct, but "overwrites" the StartTime field with one of type int64. ID
|
||||
// *int64 `json:"id,omitempty"` >> never used in the job-archive, only
|
||||
// available via REST-API
|
||||
//
|
||||
|
||||
type JobLink struct {
|
||||
ID int64 `json:"id"`
|
||||
JobID int64 `json:"jobId"`
|
||||
@@ -71,13 +84,6 @@ type JobLinkResultList struct {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// Non-Swaggered Comment: JobMeta
|
||||
// Non-Swaggered Comment: When reading from the database or sending data via GraphQL, the start time can be in the much more
|
||||
// Non-Swaggered Comment: convenient time.Time type. In the `meta.json` files, the start time is encoded as a unix epoch timestamp.
|
||||
// Non-Swaggered Comment: This is why there is this struct, which contains all fields from the regular job struct, but "overwrites"
|
||||
// Non-Swaggered Comment: the StartTime field with one of type int64.
|
||||
// Non-Swaggered Comment: ID *int64 `json:"id,omitempty"` >> never used in the job-archive, only available via REST-API
|
||||
|
||||
// JobMeta model
|
||||
// @Description Meta data information of a HPC job.
|
||||
type JobMeta struct {
|
||||
@@ -85,7 +91,7 @@ type JobMeta struct {
|
||||
ID *int64 `json:"id,omitempty"`
|
||||
BaseJob
|
||||
StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0)
|
||||
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
|
||||
Statistics map[string]JobStatistics `json:"statistics"` // Metric statistics of job
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -101,8 +107,8 @@ var JobDefaults BaseJob = BaseJob{
|
||||
}
|
||||
|
||||
type Unit struct {
|
||||
Base string `json:"base"`
|
||||
Prefix *string `json:"prefix,omitempty"`
|
||||
Base string `json:"base"`
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
}
|
||||
|
||||
// JobStatistics model
|
||||
|
@@ -3,7 +3,7 @@
|
||||
"$id": "embedfs://config.schema.json",
|
||||
"title": "cc-backend configuration file schema",
|
||||
"type": "object",
|
||||
"properties":{
|
||||
"properties": {
|
||||
"addr": {
|
||||
"description": "Address where the http (or https) server will listen on (for example: 'localhost:80').",
|
||||
"type": "string"
|
||||
@@ -41,8 +41,59 @@
|
||||
"type": "string"
|
||||
},
|
||||
"job-archive": {
|
||||
"description": "Path to the job-archive.",
|
||||
"type": "string"
|
||||
"description": "Configuration keys for job-archive",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"kind": {
|
||||
"description": "Backend type for job-archive",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"file",
|
||||
"s3"
|
||||
]
|
||||
},
|
||||
"path": {
|
||||
"description": "Path to job archive for file backend",
|
||||
"type": "string"
|
||||
},
|
||||
"compression": {
|
||||
"description": "Setup automatic compression for jobs older than number of days",
|
||||
"type": "integer"
|
||||
},
|
||||
"retention": {
|
||||
"description": "Configuration keys for retention",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"policy": {
|
||||
"description": "Retention policy",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"none",
|
||||
"delete",
|
||||
"move"
|
||||
]
|
||||
},
|
||||
"includeDB": {
|
||||
"description": "Also remove jobs from database",
|
||||
"type": "boolean"
|
||||
},
|
||||
"age": {
|
||||
"description": "Act on jobs with startTime older than age (in days)",
|
||||
"type": "integer"
|
||||
},
|
||||
"location": {
|
||||
"description": "The target directory for retention. Only applicable for retention move.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"policy"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"kind"
|
||||
]
|
||||
},
|
||||
"disable-archive": {
|
||||
"description": "Keep all metric data in the metric data repositories, do not write to the job-archive.",
|
||||
|
@@ -1,175 +0,0 @@
|
||||
# cc-units - A unit system for ClusterCockpit
|
||||
|
||||
When working with metrics, the problem comes up that they may use different unit name but have the same unit in fact.
|
||||
There are a lot of real world examples like 'kB' and 'Kbyte'. In [cc-metric-collector](https://github.com/ClusterCockpit/cc-metric-collector), the collectors read data from different sources which may use different units or the programmer specifies a unit for a metric by hand. The cc-units system is not comparable with the SI unit system. If you are looking for a package for the SI units, see [here](https://pkg.go.dev/github.com/gurre/si).
|
||||
|
||||
In order to enable unit comparison and conversion, the ccUnits package provides some helpers:
|
||||
```go
|
||||
NewUnit(unit string) Unit // create a new unit from some string like 'GHz', 'Mbyte' or 'kevents/s'
|
||||
func GetUnitUnitFactor(in Unit, out Unit) (func(value float64) float64, error) // Get conversion function between two units
|
||||
func GetPrefixFactor(in Prefix, out Prefix) func(value float64) float64 // Get conversion function between two prefixes
|
||||
func GetUnitPrefixFactor(in Unit, out Prefix) (func(value float64) float64, Unit) // Get conversion function for prefix changes and the new unit for further use
|
||||
|
||||
type Unit interface {
|
||||
Valid() bool
|
||||
String() string
|
||||
Short() string
|
||||
AddUnitDenominator(div Measure)
|
||||
}
|
||||
```
|
||||
|
||||
In order to get the "normalized" string unit back or test for validity, you can use:
|
||||
```go
|
||||
u := NewUnit("MB")
|
||||
fmt.Println(u.Valid()) // true
|
||||
fmt.Printf("Long string %q", u.String()) // MegaBytes
|
||||
fmt.Printf("Short string %q", u.Short()) // MBytes
|
||||
v := NewUnit("foo")
|
||||
fmt.Println(v.Valid()) // false
|
||||
```
|
||||
|
||||
If you have two units or other components and need the conversion function:
|
||||
```go
|
||||
// Get conversion functions for 'kB' to 'MBytes'
|
||||
u1 := NewUnit("kB")
|
||||
u2 := NewUnit("MBytes")
|
||||
convFunc, err := GetUnitUnitFactor(u1, u2) // Returns an error if the units have different measures
|
||||
if err == nil {
|
||||
v2 := convFunc(v1)
|
||||
fmt.Printf("%f %s\n", v2, u2.Short())
|
||||
}
|
||||
// Get conversion function for 'kB' -> 'G' prefix.
|
||||
// Returns the function and the new unit 'GBytes'
|
||||
p1 := NewPrefix("G")
|
||||
convFunc, u_p1 := GetUnitPrefixFactor(u1, p1)
|
||||
// or
|
||||
// convFunc, u_p1 := GetUnitPrefixStringFactor(u1, "G")
|
||||
if convFunc != nil {
|
||||
v2 := convFunc(v1)
|
||||
fmt.Printf("%f %s\n", v2, u_p1.Short())
|
||||
}
|
||||
// Get conversion function for two prefixes: 'G' -> 'T'
|
||||
p2 := NewPrefix("T")
|
||||
convFunc = GetPrefixPrefixFactor(p1, p2)
|
||||
if convFunc != nil {
|
||||
v2 := convFunc(v1)
|
||||
fmt.Printf("%f %s -> %f %s\n", v1, p1.Prefix(), v2, p2.Prefix())
|
||||
}
|
||||
|
||||
|
||||
```
|
||||
|
||||
(In the ClusterCockpit ecosystem the separation between values and units if useful since they are commonly not stored as a single entity but the value is a field in the CCMetric while unit is a tag or a meta information).
|
||||
|
||||
If you have a metric and want the derivation to a bandwidth or events per second, you can use the original unit:
|
||||
|
||||
```go
|
||||
in_unit, err := metric.GetMeta("unit")
|
||||
if err == nil {
|
||||
value, ok := metric.GetField("value")
|
||||
if ok {
|
||||
out_unit = NewUnit(in_unit)
|
||||
out_unit.AddUnitDenominator("seconds")
|
||||
seconds := timeDiff.Seconds()
|
||||
y, err := lp.New(metric.Name()+"_bw",
|
||||
metric.Tags(),
|
||||
metric.Meta(),
|
||||
map[string]interface{"value": value/seconds},
|
||||
metric.Time())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", out_unit.Short())
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Special unit detection
|
||||
|
||||
Some used measures like Bytes and Flops are non-dividable. Consequently there prefixes like Milli, Micro and Nano are not useful. This is quite handy since a unit `mb` for `MBytes` is not uncommon but would by default be parsed as "MilliBytes".
|
||||
|
||||
Special parsing rules for the following measures: iff `prefix==Milli`, use `prefix==Mega`
|
||||
- `Bytes`
|
||||
- `Flops`
|
||||
- `Packets`
|
||||
- `Events`
|
||||
- `Cycles`
|
||||
- `Requests`
|
||||
|
||||
This means the prefixes `Micro` (like `ubytes`) and `Nano` like (`nflops/sec`) are not allowed and return an invalid unit. But you can specify `mflops` and `mb`.
|
||||
|
||||
Prefixes for `%` or `percent` are ignored.
|
||||
|
||||
## Supported prefixes
|
||||
|
||||
```go
|
||||
const (
|
||||
Base Prefix = 1
|
||||
Exa = 1e18
|
||||
Peta = 1e15
|
||||
Tera = 1e12
|
||||
Giga = 1e9
|
||||
Mega = 1e6
|
||||
Kilo = 1e3
|
||||
Milli = 1e-3
|
||||
Micro = 1e-6
|
||||
Nano = 1e-9
|
||||
Kibi = 1024
|
||||
Mebi = 1024 * 1024
|
||||
Gibi = 1024 * 1024 * 1024
|
||||
Tebi = 1024 * 1024 * 1024 * 1024
|
||||
)
|
||||
```
|
||||
|
||||
The prefixes are detected using a regular expression `^([kKmMgGtTpP]?[i]?)(.*)` that splits the prefix from the measure. You probably don't need to deal with the prefixes in the code.
|
||||
|
||||
## Supported measures
|
||||
|
||||
```go
|
||||
const (
|
||||
None Measure = iota
|
||||
Bytes
|
||||
Flops
|
||||
Percentage
|
||||
TemperatureC
|
||||
TemperatureF
|
||||
Rotation
|
||||
Hertz
|
||||
Time
|
||||
Watt
|
||||
Joule
|
||||
Cycles
|
||||
Requests
|
||||
Packets
|
||||
Events
|
||||
)
|
||||
```
|
||||
|
||||
There a regular expression for each of the measures like `^([bB][yY]?[tT]?[eE]?[sS]?)` for the `Bytes` measure.
|
||||
|
||||
|
||||
## New units
|
||||
|
||||
If the selected units are not suitable for your metric, feel free to send a PR.
|
||||
|
||||
### New prefix
|
||||
|
||||
For a new prefix, add it to the big `const` in `ccUnitPrefix.go` and adjust the prefix-unit-splitting regular expression. Afterwards, you have to add cases to the three functions `String()`, `Prefix()` and `NewPrefix()`. `NewPrefix()` contains the parser (`k` or `K` -> `Kilo`). The other one are used for output. `String()` outputs a longer version of the prefix (`Kilo`), while `Prefix()` returns only the short notation (`K`).
|
||||
|
||||
### New measure
|
||||
|
||||
Adding new prefixes is probably rare but adding a new measure is a more common task. At first, add it to the big `const` in `ccUnitMeasure.go`. Moreover, create a regular expression matching the measure (and pre-compile it like the others). Add the expression matching to `NewMeasure()`. The `String()` and `Short()` functions return descriptive strings for the measure in long form (like `Hertz`) and short form (like `Hz`).
|
||||
|
||||
If there are special conversation rules between measures and you want to convert one measure to another, like temperatures in Celsius to Fahrenheit, a special case in `GetUnitPrefixFactor()` is required.
|
||||
|
||||
### Special parsing rules
|
||||
|
||||
The two parsers for prefix and measure are called under the hood by `NewUnit()` and there might some special rules apply. Like in the above section about 'special unit detection', special rules for your new measure might be required. Currently there are two special cases:
|
||||
|
||||
- Measures that are non-dividable like Flops, Bytes, Events, ... cannot use `Milli`, `Micro` and `Nano`. The prefix `m` is forced to `M` for these measures
|
||||
- If the prefix is `p`/`P` (`Peta`) or `e`/`E` (`Exa`) and the measure is not detectable, it retries detection with the prefix. So first round it tries, for example, prefix `p` and measure `ackets` which fails, so it retries the detection with measure `packets` and `<empty>` prefix (resolves to `Base` prefix).
|
||||
|
||||
## Limitations
|
||||
|
||||
The `ccUnits` package is a simple implemtation of a unit system and comes with some limitations:
|
||||
|
||||
- The unit denominator (like `s` in `Mbyte/s`) can only have the `Base` prefix, you cannot specify `Byte/ms` for "Bytes per milli second".
|
@@ -1,134 +0,0 @@
|
||||
package units
|
||||
|
||||
import "regexp"
|
||||
|
||||
type Measure int
|
||||
|
||||
const (
|
||||
InvalidMeasure Measure = iota
|
||||
Bytes
|
||||
Flops
|
||||
Percentage
|
||||
TemperatureC
|
||||
TemperatureF
|
||||
Rotation
|
||||
Frequency
|
||||
Time
|
||||
Watt
|
||||
Joule
|
||||
Cycles
|
||||
Requests
|
||||
Packets
|
||||
Events
|
||||
)
|
||||
|
||||
type MeasureData struct {
|
||||
Long string
|
||||
Short string
|
||||
Regex string
|
||||
}
|
||||
|
||||
// Different names and regex used for input and output
|
||||
var InvalidMeasureLong string = "Invalid"
|
||||
var InvalidMeasureShort string = "inval"
|
||||
var MeasuresMap map[Measure]MeasureData = map[Measure]MeasureData{
|
||||
Bytes: {
|
||||
Long: "byte",
|
||||
Short: "B",
|
||||
Regex: "^([bB][yY]?[tT]?[eE]?[sS]?)",
|
||||
},
|
||||
Flops: {
|
||||
Long: "Flops",
|
||||
Short: "F",
|
||||
Regex: "^([fF][lL]?[oO]?[pP]?[sS]?)",
|
||||
},
|
||||
Percentage: {
|
||||
Long: "Percent",
|
||||
Short: "%",
|
||||
Regex: "^(%|[pP]ercent)",
|
||||
},
|
||||
TemperatureC: {
|
||||
Long: "DegreeC",
|
||||
Short: "degC",
|
||||
Regex: "^(deg[Cc]|°[cC])",
|
||||
},
|
||||
TemperatureF: {
|
||||
Long: "DegreeF",
|
||||
Short: "degF",
|
||||
Regex: "^(deg[fF]|°[fF])",
|
||||
},
|
||||
Rotation: {
|
||||
Long: "RPM",
|
||||
Short: "RPM",
|
||||
Regex: "^([rR][pP][mM])",
|
||||
},
|
||||
Frequency: {
|
||||
Long: "Hertz",
|
||||
Short: "Hz",
|
||||
Regex: "^([hH][eE]?[rR]?[tT]?[zZ])",
|
||||
},
|
||||
Time: {
|
||||
Long: "Seconds",
|
||||
Short: "s",
|
||||
Regex: "^([sS][eE]?[cC]?[oO]?[nN]?[dD]?[sS]?)",
|
||||
},
|
||||
Cycles: {
|
||||
Long: "Cycles",
|
||||
Short: "cyc",
|
||||
Regex: "^([cC][yY][cC]?[lL]?[eE]?[sS]?)",
|
||||
},
|
||||
Watt: {
|
||||
Long: "Watts",
|
||||
Short: "W",
|
||||
Regex: "^([wW][aA]?[tT]?[tT]?[sS]?)",
|
||||
},
|
||||
Joule: {
|
||||
Long: "Joules",
|
||||
Short: "J",
|
||||
Regex: "^([jJ][oO]?[uU]?[lL]?[eE]?[sS]?)",
|
||||
},
|
||||
Requests: {
|
||||
Long: "Requests",
|
||||
Short: "requests",
|
||||
Regex: "^([rR][eE][qQ][uU]?[eE]?[sS]?[tT]?[sS]?)",
|
||||
},
|
||||
Packets: {
|
||||
Long: "Packets",
|
||||
Short: "packets",
|
||||
Regex: "^([pP][aA]?[cC]?[kK][eE]?[tT][sS]?)",
|
||||
},
|
||||
Events: {
|
||||
Long: "Events",
|
||||
Short: "events",
|
||||
Regex: "^([eE][vV]?[eE]?[nN][tT][sS]?)",
|
||||
},
|
||||
}
|
||||
|
||||
// String returns the long string for the measure like 'Percent' or 'Seconds'
|
||||
func (m *Measure) String() string {
|
||||
if data, ok := MeasuresMap[*m]; ok {
|
||||
return data.Long
|
||||
}
|
||||
return InvalidMeasureLong
|
||||
}
|
||||
|
||||
// Short returns the short string for the measure like 'B' (Bytes), 's' (Time) or 'W' (Watt). Is is recommened to use Short() over String().
|
||||
func (m *Measure) Short() string {
|
||||
if data, ok := MeasuresMap[*m]; ok {
|
||||
return data.Short
|
||||
}
|
||||
return InvalidMeasureShort
|
||||
}
|
||||
|
||||
// NewMeasure creates a new measure out of a string representing a measure like 'Bytes', 'Flops' and 'precent'.
|
||||
// It uses regular expressions for matching.
|
||||
func NewMeasure(unit string) Measure {
|
||||
for m, data := range MeasuresMap {
|
||||
regex := regexp.MustCompile(data.Regex)
|
||||
match := regex.FindStringSubmatch(unit)
|
||||
if match != nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return InvalidMeasure
|
||||
}
|
@@ -1,192 +0,0 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"math"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type Prefix float64
|
||||
|
||||
const (
|
||||
InvalidPrefix Prefix = iota
|
||||
Base = 1
|
||||
Yotta = 1e24
|
||||
Zetta = 1e21
|
||||
Exa = 1e18
|
||||
Peta = 1e15
|
||||
Tera = 1e12
|
||||
Giga = 1e9
|
||||
Mega = 1e6
|
||||
Kilo = 1e3
|
||||
Milli = 1e-3
|
||||
Micro = 1e-6
|
||||
Nano = 1e-9
|
||||
Kibi = 1024
|
||||
Mebi = 1024 * 1024
|
||||
Gibi = 1024 * 1024 * 1024
|
||||
Tebi = 1024 * 1024 * 1024 * 1024
|
||||
Pebi = 1024 * 1024 * 1024 * 1024 * 1024
|
||||
Exbi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024
|
||||
Zebi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
|
||||
Yobi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
|
||||
)
|
||||
const PrefixUnitSplitRegexStr = `^([kKmMgGtTpPeEzZyY]?[i]?)(.*)`
|
||||
|
||||
var prefixUnitSplitRegex = regexp.MustCompile(PrefixUnitSplitRegexStr)
|
||||
|
||||
type PrefixData struct {
|
||||
Long string
|
||||
Short string
|
||||
Regex string
|
||||
}
|
||||
|
||||
// Different names and regex used for input and output
|
||||
var InvalidPrefixLong string = "Invalid"
|
||||
var InvalidPrefixShort string = "inval"
|
||||
var PrefixDataMap map[Prefix]PrefixData = map[Prefix]PrefixData{
|
||||
Base: {
|
||||
Long: "",
|
||||
Short: "",
|
||||
Regex: "^$",
|
||||
},
|
||||
Kilo: {
|
||||
Long: "Kilo",
|
||||
Short: "K",
|
||||
Regex: "^[kK]$",
|
||||
},
|
||||
Mega: {
|
||||
Long: "Mega",
|
||||
Short: "M",
|
||||
Regex: "^[M]$",
|
||||
},
|
||||
Giga: {
|
||||
Long: "Giga",
|
||||
Short: "G",
|
||||
Regex: "^[gG]$",
|
||||
},
|
||||
Tera: {
|
||||
Long: "Tera",
|
||||
Short: "T",
|
||||
Regex: "^[tT]$",
|
||||
},
|
||||
Peta: {
|
||||
Long: "Peta",
|
||||
Short: "P",
|
||||
Regex: "^[pP]$",
|
||||
},
|
||||
Exa: {
|
||||
Long: "Exa",
|
||||
Short: "E",
|
||||
Regex: "^[eE]$",
|
||||
},
|
||||
Zetta: {
|
||||
Long: "Zetta",
|
||||
Short: "Z",
|
||||
Regex: "^[zZ]$",
|
||||
},
|
||||
Yotta: {
|
||||
Long: "Yotta",
|
||||
Short: "Y",
|
||||
Regex: "^[yY]$",
|
||||
},
|
||||
Milli: {
|
||||
Long: "Milli",
|
||||
Short: "m",
|
||||
Regex: "^[m]$",
|
||||
},
|
||||
Micro: {
|
||||
Long: "Micro",
|
||||
Short: "u",
|
||||
Regex: "^[u]$",
|
||||
},
|
||||
Nano: {
|
||||
Long: "Nano",
|
||||
Short: "n",
|
||||
Regex: "^[n]$",
|
||||
},
|
||||
Kibi: {
|
||||
Long: "Kibi",
|
||||
Short: "Ki",
|
||||
Regex: "^[kK][i]$",
|
||||
},
|
||||
Mebi: {
|
||||
Long: "Mebi",
|
||||
Short: "Mi",
|
||||
Regex: "^[M][i]$",
|
||||
},
|
||||
Gibi: {
|
||||
Long: "Gibi",
|
||||
Short: "Gi",
|
||||
Regex: "^[gG][i]$",
|
||||
},
|
||||
Tebi: {
|
||||
Long: "Tebi",
|
||||
Short: "Ti",
|
||||
Regex: "^[tT][i]$",
|
||||
},
|
||||
Pebi: {
|
||||
Long: "Pebi",
|
||||
Short: "Pi",
|
||||
Regex: "^[pP][i]$",
|
||||
},
|
||||
Exbi: {
|
||||
Long: "Exbi",
|
||||
Short: "Ei",
|
||||
Regex: "^[eE][i]$",
|
||||
},
|
||||
Zebi: {
|
||||
Long: "Zebi",
|
||||
Short: "Zi",
|
||||
Regex: "^[zZ][i]$",
|
||||
},
|
||||
Yobi: {
|
||||
Long: "Yobi",
|
||||
Short: "Yi",
|
||||
Regex: "^[yY][i]$",
|
||||
},
|
||||
}
|
||||
|
||||
// String returns the long string for the prefix like 'Kilo' or 'Mega'
|
||||
func (p *Prefix) String() string {
|
||||
if data, ok := PrefixDataMap[*p]; ok {
|
||||
return data.Long
|
||||
}
|
||||
return InvalidMeasureLong
|
||||
}
|
||||
|
||||
// Prefix returns the short string for the prefix like 'K', 'M' or 'G'. Is is recommened to use Prefix() over String().
|
||||
func (p *Prefix) Prefix() string {
|
||||
if data, ok := PrefixDataMap[*p]; ok {
|
||||
return data.Short
|
||||
}
|
||||
return InvalidMeasureShort
|
||||
}
|
||||
|
||||
// NewPrefix creates a new prefix out of a string representing a unit like 'k', 'K', 'M' or 'G'.
|
||||
func NewPrefix(prefix string) Prefix {
|
||||
for p, data := range PrefixDataMap {
|
||||
regex := regexp.MustCompile(data.Regex)
|
||||
match := regex.FindStringSubmatch(prefix)
|
||||
if match != nil {
|
||||
return p
|
||||
}
|
||||
}
|
||||
return InvalidPrefix
|
||||
}
|
||||
|
||||
func getExponent(p float64) int {
|
||||
count := 0
|
||||
|
||||
for p > 1.0 {
|
||||
p = p / 1000.0
|
||||
count++
|
||||
}
|
||||
|
||||
return count * 3
|
||||
}
|
||||
|
||||
func NewPrefixFromFactor(op Prefix, e int) Prefix {
|
||||
f := float64(op)
|
||||
exp := math.Pow10(getExponent(f) - e)
|
||||
return Prefix(exp)
|
||||
}
|
@@ -1,339 +0,0 @@
|
||||
// Unit system for cluster monitoring metrics like bytes, flops and events
|
||||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
|
||||
type unit struct {
|
||||
prefix Prefix
|
||||
measure Measure
|
||||
divMeasure Measure
|
||||
}
|
||||
|
||||
type Unit interface {
|
||||
Valid() bool
|
||||
String() string
|
||||
Short() string
|
||||
AddUnitDenominator(div Measure)
|
||||
getPrefix() Prefix
|
||||
getMeasure() Measure
|
||||
getUnitDenominator() Measure
|
||||
setPrefix(p Prefix)
|
||||
}
|
||||
|
||||
var INVALID_UNIT = NewUnit("foobar")
|
||||
|
||||
// Valid checks whether a unit is a valid unit.
|
||||
// A unit is valid if it has at least a prefix and a measure.
|
||||
// The unit denominator is optional.
|
||||
func (u *unit) Valid() bool {
|
||||
return u.prefix != InvalidPrefix && u.measure != InvalidMeasure
|
||||
}
|
||||
|
||||
// String returns the long string for the unit like 'KiloHertz' or 'MegaBytes'
|
||||
func (u *unit) String() string {
|
||||
if u.divMeasure != InvalidMeasure {
|
||||
return fmt.Sprintf("%s%s/%s", u.prefix.String(), u.measure.String(), u.divMeasure.String())
|
||||
} else {
|
||||
return fmt.Sprintf("%s%s", u.prefix.String(), u.measure.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Short returns the short string for the unit like 'kHz' or 'MByte'. Is is recommened to use Short() over String().
|
||||
func (u *unit) Short() string {
|
||||
if u.divMeasure != InvalidMeasure {
|
||||
return fmt.Sprintf("%s%s/%s", u.prefix.Prefix(), u.measure.Short(), u.divMeasure.Short())
|
||||
} else {
|
||||
return fmt.Sprintf("%s%s", u.prefix.Prefix(), u.measure.Short())
|
||||
}
|
||||
}
|
||||
|
||||
// AddUnitDenominator adds a unit denominator to an exising unit. Can be used if you want to derive e.g. data volume to bandwidths.
|
||||
// The data volume is in a Byte unit like 'kByte' and by dividing it by the runtime in seconds, we get the bandwidth. We can use the
|
||||
// data volume unit and add 'Second' as unit denominator
|
||||
func (u *unit) AddUnitDenominator(div Measure) {
|
||||
u.divMeasure = div
|
||||
}
|
||||
|
||||
func (u *unit) getPrefix() Prefix {
|
||||
return u.prefix
|
||||
}
|
||||
|
||||
func (u *unit) setPrefix(p Prefix) {
|
||||
u.prefix = p
|
||||
}
|
||||
|
||||
func (u *unit) getMeasure() Measure {
|
||||
return u.measure
|
||||
}
|
||||
|
||||
func (u *unit) getUnitDenominator() Measure {
|
||||
return u.divMeasure
|
||||
}
|
||||
|
||||
func ConvertValue(v *float64, from string, to string) {
|
||||
uf := NewUnit(from)
|
||||
ut := NewUnit(to)
|
||||
factor := float64(uf.getPrefix()) / float64(ut.getPrefix())
|
||||
*v = math.Ceil(*v * factor)
|
||||
}
|
||||
|
||||
func ConvertSeries(s []float64, from string, to string) {
|
||||
uf := NewUnit(from)
|
||||
ut := NewUnit(to)
|
||||
factor := float64(uf.getPrefix()) / float64(ut.getPrefix())
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = math.Ceil(s[i] * factor)
|
||||
}
|
||||
}
|
||||
|
||||
func getNormalizationFactor(v float64) (float64, int) {
|
||||
count := 0
|
||||
scale := -3
|
||||
|
||||
if v > 1000.0 {
|
||||
for v > 1000.0 {
|
||||
v *= 1e-3
|
||||
count++
|
||||
}
|
||||
} else {
|
||||
for v < 1.0 {
|
||||
v *= 1e3
|
||||
count++
|
||||
}
|
||||
scale = 3
|
||||
}
|
||||
return math.Pow10(count * scale), count * scale
|
||||
}
|
||||
|
||||
func NormalizeValue(v *float64, us string, nu *string) {
|
||||
u := NewUnit(us)
|
||||
f, e := getNormalizationFactor((*v))
|
||||
*v = math.Ceil(*v * f)
|
||||
u.setPrefix(NewPrefixFromFactor(u.getPrefix(), e))
|
||||
*nu = u.Short()
|
||||
}
|
||||
|
||||
func NormalizeSeries(s []float64, avg float64, us string, nu *string) {
|
||||
u := NewUnit(us)
|
||||
f, e := getNormalizationFactor(avg)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] *= f
|
||||
s[i] = math.Ceil(s[i])
|
||||
}
|
||||
u.setPrefix(NewPrefixFromFactor(u.getPrefix(), e))
|
||||
fmt.Printf("Prefix: %e \n", u.getPrefix())
|
||||
*nu = u.Short()
|
||||
}
|
||||
|
||||
func ConvertUnitString(us string) schema.Unit {
|
||||
var nu schema.Unit
|
||||
|
||||
if us == "CPI" ||
|
||||
us == "IPC" ||
|
||||
us == "load" ||
|
||||
us == "" {
|
||||
nu.Base = us
|
||||
return nu
|
||||
}
|
||||
u := NewUnit(us)
|
||||
p := u.getPrefix()
|
||||
if p.Prefix() != "" {
|
||||
prefix := p.Prefix()
|
||||
nu.Prefix = &prefix
|
||||
}
|
||||
m := u.getMeasure()
|
||||
d := u.getUnitDenominator()
|
||||
if d.Short() != "inval" {
|
||||
nu.Base = fmt.Sprintf("%s/%s", m.Short(), d.Short())
|
||||
} else {
|
||||
nu.Base = m.Short()
|
||||
}
|
||||
|
||||
return nu
|
||||
}
|
||||
|
||||
// GetPrefixPrefixFactor creates the default conversion function between two prefixes.
|
||||
// It returns a conversation function for the value.
|
||||
func GetPrefixPrefixFactor(in Prefix, out Prefix) func(value interface{}) interface{} {
|
||||
var factor = 1.0
|
||||
var in_prefix = float64(in)
|
||||
var out_prefix = float64(out)
|
||||
factor = in_prefix / out_prefix
|
||||
conv := func(value interface{}) interface{} {
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
return v * factor
|
||||
case float32:
|
||||
return float32(float64(v) * factor)
|
||||
case int:
|
||||
return int(float64(v) * factor)
|
||||
case int32:
|
||||
return int32(float64(v) * factor)
|
||||
case int64:
|
||||
return int64(float64(v) * factor)
|
||||
case uint:
|
||||
return uint(float64(v) * factor)
|
||||
case uint32:
|
||||
return uint32(float64(v) * factor)
|
||||
case uint64:
|
||||
return uint64(float64(v) * factor)
|
||||
}
|
||||
return value
|
||||
}
|
||||
return conv
|
||||
}
|
||||
|
||||
// This is the conversion function between temperatures in Celsius to Fahrenheit
|
||||
func convertTempC2TempF(value interface{}) interface{} {
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
return (v * 1.8) + 32
|
||||
case float32:
|
||||
return (v * 1.8) + 32
|
||||
case int:
|
||||
return int((float64(v) * 1.8) + 32)
|
||||
case int32:
|
||||
return int32((float64(v) * 1.8) + 32)
|
||||
case int64:
|
||||
return int64((float64(v) * 1.8) + 32)
|
||||
case uint:
|
||||
return uint((float64(v) * 1.8) + 32)
|
||||
case uint32:
|
||||
return uint32((float64(v) * 1.8) + 32)
|
||||
case uint64:
|
||||
return uint64((float64(v) * 1.8) + 32)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// This is the conversion function between temperatures in Fahrenheit to Celsius
|
||||
func convertTempF2TempC(value interface{}) interface{} {
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
return (v - 32) / 1.8
|
||||
case float32:
|
||||
return (v - 32) / 1.8
|
||||
case int:
|
||||
return int(((float64(v) - 32) / 1.8))
|
||||
case int32:
|
||||
return int32(((float64(v) - 32) / 1.8))
|
||||
case int64:
|
||||
return int64(((float64(v) - 32) / 1.8))
|
||||
case uint:
|
||||
return uint(((float64(v) - 32) / 1.8))
|
||||
case uint32:
|
||||
return uint32(((float64(v) - 32) / 1.8))
|
||||
case uint64:
|
||||
return uint64(((float64(v) - 32) / 1.8))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// GetPrefixStringPrefixStringFactor is a wrapper for GetPrefixPrefixFactor with string inputs instead
|
||||
// of prefixes. It also returns a conversation function for the value.
|
||||
func GetPrefixStringPrefixStringFactor(in string, out string) func(value interface{}) interface{} {
|
||||
var i Prefix = NewPrefix(in)
|
||||
var o Prefix = NewPrefix(out)
|
||||
return GetPrefixPrefixFactor(i, o)
|
||||
}
|
||||
|
||||
// GetUnitPrefixFactor gets the conversion function and resulting unit for a unit and a prefix. This is
|
||||
// the most common case where you have some input unit and want to convert it to the same unit but with
|
||||
// a different prefix. The returned unit represents the value after conversation.
|
||||
func GetUnitPrefixFactor(in Unit, out Prefix) (func(value interface{}) interface{}, Unit) {
|
||||
outUnit := NewUnit(in.Short())
|
||||
if outUnit.Valid() {
|
||||
outUnit.setPrefix(out)
|
||||
conv := GetPrefixPrefixFactor(in.getPrefix(), out)
|
||||
return conv, outUnit
|
||||
}
|
||||
return nil, INVALID_UNIT
|
||||
}
|
||||
|
||||
// GetUnitPrefixStringFactor gets the conversion function and resulting unit for a unit and a prefix as string.
|
||||
// It is a wrapper for GetUnitPrefixFactor
|
||||
func GetUnitPrefixStringFactor(in Unit, out string) (func(value interface{}) interface{}, Unit) {
|
||||
var o Prefix = NewPrefix(out)
|
||||
return GetUnitPrefixFactor(in, o)
|
||||
}
|
||||
|
||||
// GetUnitStringPrefixStringFactor gets the conversion function and resulting unit for a unit and a prefix when both are only string representations.
|
||||
// This is just a wrapper for GetUnitPrefixFactor with the given input unit and the desired output prefix.
|
||||
func GetUnitStringPrefixStringFactor(in string, out string) (func(value interface{}) interface{}, Unit) {
|
||||
var i = NewUnit(in)
|
||||
return GetUnitPrefixStringFactor(i, out)
|
||||
}
|
||||
|
||||
// GetUnitUnitFactor gets the conversion function and (maybe) error for unit to unit conversion.
|
||||
// It is basically a wrapper for GetPrefixPrefixFactor with some special cases for temperature
|
||||
// conversion between Fahrenheit and Celsius.
|
||||
func GetUnitUnitFactor(in Unit, out Unit) (func(value interface{}) interface{}, error) {
|
||||
if in.getMeasure() == TemperatureC && out.getMeasure() == TemperatureF {
|
||||
return convertTempC2TempF, nil
|
||||
} else if in.getMeasure() == TemperatureF && out.getMeasure() == TemperatureC {
|
||||
return convertTempF2TempC, nil
|
||||
} else if in.getMeasure() != out.getMeasure() || in.getUnitDenominator() != out.getUnitDenominator() {
|
||||
return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("UNITS/UNITS > invalid measures in in and out Unit")
|
||||
}
|
||||
return GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()), nil
|
||||
}
|
||||
|
||||
// NewUnit creates a new unit out of a string representing a unit like 'Mbyte/s' or 'GHz'.
|
||||
// It uses regular expressions to detect the prefix, unit and (maybe) unit denominator.
|
||||
func NewUnit(unitStr string) Unit {
|
||||
u := &unit{
|
||||
prefix: InvalidPrefix,
|
||||
measure: InvalidMeasure,
|
||||
divMeasure: InvalidMeasure,
|
||||
}
|
||||
matches := prefixUnitSplitRegex.FindStringSubmatch(unitStr)
|
||||
if len(matches) > 2 {
|
||||
pre := NewPrefix(matches[1])
|
||||
measures := strings.Split(matches[2], "/")
|
||||
m := NewMeasure(measures[0])
|
||||
// Special case for prefix 'p' or 'P' (Peta) and measures starting with 'p' or 'P'
|
||||
// like 'packets' or 'percent'. Same for 'e' or 'E' (Exa) for measures starting with
|
||||
// 'e' or 'E' like 'events'
|
||||
if m == InvalidMeasure {
|
||||
switch pre {
|
||||
case Peta, Exa:
|
||||
t := NewMeasure(matches[1] + measures[0])
|
||||
if t != InvalidMeasure {
|
||||
m = t
|
||||
pre = Base
|
||||
}
|
||||
}
|
||||
}
|
||||
div := InvalidMeasure
|
||||
if len(measures) > 1 {
|
||||
div = NewMeasure(measures[1])
|
||||
}
|
||||
|
||||
switch m {
|
||||
// Special case for 'm' as prefix for Bytes and some others as thers is no unit like MilliBytes
|
||||
case Bytes, Flops, Packets, Events, Cycles, Requests:
|
||||
if pre == Milli {
|
||||
pre = Mega
|
||||
}
|
||||
// Special case for percentage. No/ignore prefix
|
||||
case Percentage:
|
||||
pre = Base
|
||||
}
|
||||
if pre != InvalidPrefix && m != InvalidMeasure {
|
||||
u.prefix = pre
|
||||
u.measure = m
|
||||
if div != InvalidMeasure {
|
||||
u.divMeasure = div
|
||||
}
|
||||
}
|
||||
}
|
||||
return u
|
||||
}
|
@@ -1,307 +0,0 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnitsExact(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
want Unit
|
||||
}{
|
||||
{"b", NewUnit("Bytes")},
|
||||
{"B", NewUnit("Bytes")},
|
||||
{"byte", NewUnit("Bytes")},
|
||||
{"bytes", NewUnit("Bytes")},
|
||||
{"BYtes", NewUnit("Bytes")},
|
||||
{"Mb", NewUnit("MBytes")},
|
||||
{"MB", NewUnit("MBytes")},
|
||||
{"Mbyte", NewUnit("MBytes")},
|
||||
{"Mbytes", NewUnit("MBytes")},
|
||||
{"MbYtes", NewUnit("MBytes")},
|
||||
{"Gb", NewUnit("GBytes")},
|
||||
{"GB", NewUnit("GBytes")},
|
||||
{"Hz", NewUnit("Hertz")},
|
||||
{"MHz", NewUnit("MHertz")},
|
||||
{"GHz", NewUnit("GHertz")},
|
||||
{"pkts", NewUnit("Packets")},
|
||||
{"packets", NewUnit("Packets")},
|
||||
{"packet", NewUnit("Packets")},
|
||||
{"flop", NewUnit("Flops")},
|
||||
{"flops", NewUnit("Flops")},
|
||||
{"floPS", NewUnit("Flops")},
|
||||
{"Mflop", NewUnit("MFlops")},
|
||||
{"Gflop", NewUnit("GFlops")},
|
||||
{"gflop", NewUnit("GFlops")},
|
||||
{"%", NewUnit("Percent")},
|
||||
{"percent", NewUnit("Percent")},
|
||||
{"degc", NewUnit("degC")},
|
||||
{"degC", NewUnit("degC")},
|
||||
{"degf", NewUnit("degF")},
|
||||
{"°f", NewUnit("degF")},
|
||||
{"events", NewUnit("events")},
|
||||
{"event", NewUnit("events")},
|
||||
{"EveNts", NewUnit("events")},
|
||||
{"reqs", NewUnit("requests")},
|
||||
{"reQuEsTs", NewUnit("requests")},
|
||||
{"Requests", NewUnit("requests")},
|
||||
{"cyc", NewUnit("cycles")},
|
||||
{"cy", NewUnit("cycles")},
|
||||
{"Cycles", NewUnit("cycles")},
|
||||
{"J", NewUnit("Joules")},
|
||||
{"Joule", NewUnit("Joules")},
|
||||
{"joule", NewUnit("Joules")},
|
||||
{"W", NewUnit("Watt")},
|
||||
{"Watts", NewUnit("Watt")},
|
||||
{"watt", NewUnit("Watt")},
|
||||
{"s", NewUnit("seconds")},
|
||||
{"sec", NewUnit("seconds")},
|
||||
{"secs", NewUnit("seconds")},
|
||||
{"RPM", NewUnit("rpm")},
|
||||
{"rPm", NewUnit("rpm")},
|
||||
{"watt/byte", NewUnit("W/B")},
|
||||
{"watts/bytes", NewUnit("W/B")},
|
||||
{"flop/byte", NewUnit("flops/Bytes")},
|
||||
{"F/B", NewUnit("flops/Bytes")},
|
||||
}
|
||||
compareUnitExact := func(in, out Unit) bool {
|
||||
if in.getMeasure() == out.getMeasure() && in.getUnitDenominator() == out.getUnitDenominator() && in.getPrefix() == out.getPrefix() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, c := range testCases {
|
||||
u := NewUnit(c.in)
|
||||
if (!u.Valid()) || (!compareUnitExact(u, c.want)) {
|
||||
t.Errorf("func NewUnit(%q) == %q, want %q", c.in, u.String(), c.want.String())
|
||||
} else {
|
||||
t.Logf("NewUnit(%q) == %q", c.in, u.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnitUnitConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
want Unit
|
||||
prefixFactor float64
|
||||
}{
|
||||
{"kb", NewUnit("Bytes"), 1000},
|
||||
{"Mb", NewUnit("Bytes"), 1000000},
|
||||
{"Mb/s", NewUnit("Bytes/s"), 1000000},
|
||||
{"Flops/s", NewUnit("MFlops/s"), 1e-6},
|
||||
{"Flops/s", NewUnit("GFlops/s"), 1e-9},
|
||||
{"MHz", NewUnit("Hertz"), 1e6},
|
||||
{"kb", NewUnit("Kib"), 1000.0 / 1024},
|
||||
{"Mib", NewUnit("MBytes"), (1024 * 1024.0) / (1e6)},
|
||||
{"mb", NewUnit("MBytes"), 1.0},
|
||||
}
|
||||
compareUnitWithPrefix := func(in, out Unit, factor float64) bool {
|
||||
if in.getMeasure() == out.getMeasure() && in.getUnitDenominator() == out.getUnitDenominator() {
|
||||
if f := GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()); f(1.0) == factor {
|
||||
return true
|
||||
} else {
|
||||
fmt.Println(f(1.0))
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, c := range testCases {
|
||||
u := NewUnit(c.in)
|
||||
if (!u.Valid()) || (!compareUnitWithPrefix(u, c.want, c.prefixFactor)) {
|
||||
t.Errorf("GetPrefixPrefixFactor(%q, %q) invalid, want %q with factor %g", c.in, u.String(), c.want.String(), c.prefixFactor)
|
||||
} else {
|
||||
t.Logf("GetPrefixPrefixFactor(%q, %q) = %g", c.in, c.want.String(), c.prefixFactor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnitPrefixConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
want string
|
||||
prefixFactor float64
|
||||
wantUnit Unit
|
||||
}{
|
||||
{"KBytes", "", 1000, NewUnit("Bytes")},
|
||||
{"MBytes", "", 1e6, NewUnit("Bytes")},
|
||||
{"MBytes", "G", 1e-3, NewUnit("GBytes")},
|
||||
{"mb", "M", 1, NewUnit("MBytes")},
|
||||
}
|
||||
compareUnitPrefix := func(in Unit, out Prefix, factor float64, outUnit Unit) bool {
|
||||
if in.Valid() {
|
||||
conv, unit := GetUnitPrefixFactor(in, out)
|
||||
value := conv(1.0)
|
||||
if value == factor && unit.String() == outUnit.String() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, c := range testCases {
|
||||
u := NewUnit(c.in)
|
||||
p := NewPrefix(c.want)
|
||||
if (!u.Valid()) || (!compareUnitPrefix(u, p, c.prefixFactor, c.wantUnit)) {
|
||||
t.Errorf("GetUnitPrefixFactor(%q, %q) invalid, want %q with factor %g", c.in, p.Prefix(), c.wantUnit.String(), c.prefixFactor)
|
||||
} else {
|
||||
t.Logf("GetUnitPrefixFactor(%q, %q) = %g", c.in, c.wantUnit.String(), c.prefixFactor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrefixPrefixConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
want string
|
||||
prefixFactor float64
|
||||
}{
|
||||
{"K", "", 1000},
|
||||
{"M", "", 1e6},
|
||||
{"M", "G", 1e-3},
|
||||
{"", "M", 1e-6},
|
||||
{"", "m", 1e3},
|
||||
{"m", "n", 1e6},
|
||||
//{"", "n", 1e9}, //does not work because of IEEE rounding problems
|
||||
}
|
||||
for _, c := range testCases {
|
||||
i := NewPrefix(c.in)
|
||||
o := NewPrefix(c.want)
|
||||
if i != InvalidPrefix && o != InvalidPrefix {
|
||||
conv := GetPrefixPrefixFactor(i, o)
|
||||
value := conv(1.0)
|
||||
if value != c.prefixFactor {
|
||||
t.Errorf("GetPrefixPrefixFactor(%q, %q) invalid, want %q with factor %g but got %g", c.in, c.want, o.Prefix(), c.prefixFactor, value)
|
||||
} else {
|
||||
t.Logf("GetPrefixPrefixFactor(%q, %q) = %g", c.in, c.want, c.prefixFactor)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMeasureRegex(t *testing.T) {
|
||||
for _, data := range MeasuresMap {
|
||||
_, err := regexp.Compile(data.Regex)
|
||||
if err != nil {
|
||||
t.Errorf("failed to compile regex '%s': %s", data.Regex, err.Error())
|
||||
}
|
||||
t.Logf("succussfully compiled regex '%s' for measure %s", data.Regex, data.Long)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrefixRegex(t *testing.T) {
|
||||
for _, data := range PrefixDataMap {
|
||||
_, err := regexp.Compile(data.Regex)
|
||||
if err != nil {
|
||||
t.Errorf("failed to compile regex '%s': %s", data.Regex, err.Error())
|
||||
}
|
||||
t.Logf("succussfully compiled regex '%s' for prefix %s", data.Regex, data.Long)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertValue(t *testing.T) {
|
||||
v := float64(103456)
|
||||
ConvertValue(&v, "MB/s", "GB/s")
|
||||
|
||||
if v != 104.00 {
|
||||
t.Errorf("Failed ConvertValue: Want 103.456, Got %f", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertValueUp(t *testing.T) {
|
||||
v := float64(10.3456)
|
||||
ConvertValue(&v, "GB/s", "MB/s")
|
||||
|
||||
if v != 10346.00 {
|
||||
t.Errorf("Failed ConvertValue: Want 10346.00, Got %f", v)
|
||||
}
|
||||
}
|
||||
func TestConvertSeries(t *testing.T) {
|
||||
s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
|
||||
r := []float64{3, 24, 390, 391}
|
||||
ConvertSeries(s, "F/s", "GF/s")
|
||||
|
||||
if !reflect.DeepEqual(s, r) {
|
||||
t.Errorf("Failed ConvertValue: Want 3, 24, 390, 391, Got %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeValue(t *testing.T) {
|
||||
var s string
|
||||
v := float64(103456)
|
||||
|
||||
NormalizeValue(&v, "MB/s", &s)
|
||||
|
||||
if v != 104.00 {
|
||||
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
|
||||
}
|
||||
if s != "GB/s" {
|
||||
t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeValueNoPrefix(t *testing.T) {
|
||||
var s string
|
||||
v := float64(103458596)
|
||||
|
||||
NormalizeValue(&v, "F/s", &s)
|
||||
|
||||
if v != 104.00 {
|
||||
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
|
||||
}
|
||||
if s != "MF/s" {
|
||||
t.Errorf("Failed Prefix or unit: Want MF/s, Got %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeValueKeep(t *testing.T) {
|
||||
var s string
|
||||
v := float64(345)
|
||||
|
||||
NormalizeValue(&v, "MB/s", &s)
|
||||
|
||||
if v != 345.00 {
|
||||
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
|
||||
}
|
||||
if s != "MB/s" {
|
||||
t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeValueDown(t *testing.T) {
|
||||
var s string
|
||||
v := float64(0.0004578)
|
||||
|
||||
NormalizeValue(&v, "GB/s", &s)
|
||||
|
||||
if v != 458.00 {
|
||||
t.Errorf("Failed ConvertValue: Want 458.00, Got %f", v)
|
||||
}
|
||||
if s != "KB/s" {
|
||||
t.Errorf("Failed Prefix or unit: Want KB/s, Got %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeSeries(t *testing.T) {
|
||||
var us string
|
||||
s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
|
||||
r := []float64{3, 24, 390, 391}
|
||||
|
||||
total := 0.0
|
||||
for _, number := range s {
|
||||
total += number
|
||||
}
|
||||
avg := total / float64(len(s))
|
||||
|
||||
fmt.Printf("AVG: %e\n", avg)
|
||||
NormalizeSeries(s, avg, "KB/s", &us)
|
||||
|
||||
if !reflect.DeepEqual(s, r) {
|
||||
t.Errorf("Failed ConvertValue: Want 3, 24, 390, 391, Got %v", s)
|
||||
}
|
||||
if us != "TB/s" {
|
||||
t.Errorf("Failed Prefix or unit: Want TB/s, Got %s", us)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user