Merge remote 'origin/103-add-retention-support'

- Into update-web-frontend-env
- package.json from update-web-frontend-env
This commit is contained in:
Christoph Kluge 2023-05-12 13:21:22 +02:00
commit 7c7dc8da45
43 changed files with 644 additions and 1879 deletions

View File

@ -5,11 +5,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
uses: actions/setup-go@v4
with:
go-version: 1.17.x
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Build, Vet & Test
run: |
go build ./...

View File

@ -1,5 +1,6 @@
TARGET = ./cc-backend
VAR = ./var
CFG = config.json .env
FRONTEND = ./web/frontend
VERSION = 1
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
@ -31,7 +32,7 @@ SVELTE_SRC = $(wildcard $(FRONTEND)/src/*.svelte) \
.NOTPARALLEL:
$(TARGET): $(VAR) $(SVELTE_TARGETS)
$(TARGET): $(VAR) $(CFG) $(SVELTE_TARGETS)
$(info ===> BUILD cc-backend)
@go build -ldflags=${LD_FLAGS} ./cmd/cc-backend
@ -59,11 +60,18 @@ tags:
$(VAR):
@mkdir $(VAR)
cd web/frontend && npm install
config.json:
$(info ===> Initialize config.json file)
@cp configs/config.json config.json
.env:
$(info ===> Initialize .env file)
@cp configs/env-template.txt .env
$(SVELTE_TARGETS): $(SVELTE_SRC)
$(info ===> BUILD frontend)
cd web/frontend && npm run build
cd web/frontend && npm install && npm run build
install: $(TARGET)
@WORKSPACE=$(PREFIX)

View File

@ -42,7 +42,7 @@ versions of third party packages.
## Demo Setup
We provide a shell skript that downloads demo data and automatically builds and starts cc-backend.
You need `wget`, `go`, and `yarn` in your path to start the demo. The demo will download 32MB of data (223MB on disk).
You need `wget`, `go`, `node`, `rollup` and `yarn` in your path to start the demo. The demo will download 32MB of data (223MB on disk).
```sh
git clone https://github.com/ClusterCockpit/cc-backend.git

View File

@ -7,6 +7,7 @@ package main
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"flag"
"fmt"
@ -36,7 +37,9 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/runtimeEnv"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/web"
"github.com/go-co-op/gocron"
"github.com/google/gops/agent"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
@ -416,18 +419,66 @@ func main() {
api.JobRepository.WaitForArchiving()
}()
s := gocron.NewScheduler(time.Local)
if config.Keys.StopJobsExceedingWalltime > 0 {
go func() {
for range time.Tick(30 * time.Minute) {
s.Every(1).Day().At("3:00").Do(func() {
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
if err != nil {
log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
}
runtime.GC()
})
}
}()
var cfg struct {
Compression int `json:"compression"`
Retention schema.Retention `json:"retention"`
}
if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil {
log.Warn("Error while unmarshaling raw config json")
}
switch cfg.Retention.Policy {
case "delete":
s.Every(1).Day().At("4:00").Do(func() {
startTime := time.Now().Unix() - int64(cfg.Retention.Age*24*3600)
jobs, err := jobRepo.FindJobsBefore(startTime)
if err != nil {
log.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().CleanUp(jobs)
if cfg.Retention.IncludeDB {
cnt, err := jobRepo.DeleteJobsBefore(startTime)
if err != nil {
log.Errorf("Error while deleting retention jobs from db: %s", err.Error())
} else {
log.Infof("Retention: Removed %d jobs from db", cnt)
}
if err = jobRepo.Optimize(); err != nil {
log.Errorf("Error occured in db optimization: %s", err.Error())
}
}
})
case "move":
log.Warn("Retention policy move not implemented")
}
if cfg.Compression > 0 {
s.Every(1).Day().At("5:00").Do(func() {
startTime := time.Now().Unix() - int64(cfg.Compression*24*3600)
jobs, err := jobRepo.FindJobsBefore(startTime)
if err != nil {
log.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().Compress(jobs)
})
}
s.StartAsync()
if os.Getenv("GOGC") == "" {
debug.SetGCPercent(25)
}

6
go.mod
View File

@ -4,6 +4,7 @@ go 1.18
require (
github.com/99designs/gqlgen v0.17.24
github.com/ClusterCockpit/cc-units v0.4.0
github.com/Masterminds/squirrel v1.5.3
github.com/go-ldap/ldap/v3 v3.4.4
github.com/go-sql-driver/mysql v1.7.0
@ -39,6 +40,7 @@ require (
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
github.com/go-co-op/gocron v1.25.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/spec v0.20.8 // indirect
@ -54,6 +56,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@ -66,7 +69,10 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/testify v1.8.2 // indirect
github.com/swaggo/files v1.0.0 // indirect
github.com/urfave/cli/v2 v2.24.4 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect

16
go.sum
View File

@ -81,6 +81,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0=
github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc=
@ -444,6 +446,8 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-co-op/gocron v1.25.0 h1:pzAdtily1JVIf6lGby6K0JKzhishgLOllQgNxoYbR+8=
github.com/go-co-op/gocron v1.25.0/go.mod h1:JHrQDY4iE1HZPkgTyoccY4xtDgLbrUwL+xODIbEQdnc=
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
@ -822,8 +826,9 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
@ -1005,6 +1010,7 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -1060,11 +1066,16 @@ github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAi
github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
@ -1142,8 +1153,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/swaggo/files v1.0.0 h1:1gGXVIeUFCS/dta17rnP0iOpr6CXFwKD7EO5ID233e4=
github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc=

View File

@ -6,7 +6,6 @@ package importer
import (
"bytes"
"database/sql"
"encoding/json"
"fmt"
"os"
@ -69,17 +68,18 @@ func HandleImportFlag(flag string) error {
}
// checkJobData(&jobData)
// SanityChecks(&jobMeta.BaseJob)
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
if _, err = r.Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
if err != nil {
log.Warn("Error while finding job in jobRepository")
return err
}
return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist")
}
// if _, err = r.Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
// if err != nil {
// log.Warn("Error while finding job in jobRepository")
// return err
// }
//
// return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist")
// }
//
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),

View File

@ -7,7 +7,6 @@ package importer
import (
"encoding/json"
"fmt"
"math"
"strings"
"time"
@ -15,13 +14,16 @@ import (
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/units"
)
// Delete the tables "job", "tag" and "jobtag" from the database and
// repopulate them using the jobs found in `archive`.
func InitDB() error {
r := repository.GetJobRepository()
if err := r.Flush(); err != nil {
log.Errorf("repository initDB(): %v", err)
return err
}
starttime := time.Now()
log.Print("Building job table...")
@ -154,36 +156,6 @@ func loadJobStat(job *schema.JobMeta, metric string) float64 {
return 0.0
}
func getNormalizationFactor(v float64) (float64, int) {
count := 0
scale := -3
if v > 1000.0 {
for v > 1000.0 {
v *= 1e-3
count++
}
} else {
for v < 1.0 {
v *= 1e3
count++
}
scale = 3
}
return math.Pow10(count * scale), count * scale
}
func normalize(avg float64, p string) (float64, string) {
f, e := getNormalizationFactor(avg)
if e != 0 {
np := units.NewPrefixFromFactor(units.NewPrefix(p), e)
return f, np.Prefix()
}
return f, p
}
func checkJobData(d *schema.JobData) error {
for _, scopes := range *d {
// var newUnit schema.Unit
@ -200,7 +172,7 @@ func checkJobData(d *schema.JobData) error {
}
avg := sum / float64(len(metric.Series))
f, p := normalize(avg, metric.Unit.Prefix)
f, p := Normalize(avg, metric.Unit.Prefix)
if p != metric.Unit.Prefix {

View File

@ -0,0 +1,58 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
import (
"math"
ccunits "github.com/ClusterCockpit/cc-units"
)
func getNormalizationFactor(v float64) (float64, int) {
count := 0
scale := -3
if v > 1000.0 {
for v > 1000.0 {
v *= 1e-3
count++
}
} else {
for v < 1.0 {
v *= 1e3
count++
}
scale = 3
}
return math.Pow10(count * scale), count * scale
}
func getExponent(p float64) int {
count := 0
for p > 1.0 {
p = p / 1000.0
count++
}
return count * 3
}
func newPrefixFromFactor(op ccunits.Prefix, e int) ccunits.Prefix {
f := float64(op)
exp := math.Pow10(getExponent(f) - e)
return ccunits.Prefix(exp)
}
func Normalize(avg float64, p string) (float64, string) {
f, e := getNormalizationFactor(avg)
if e != 0 {
np := newPrefixFromFactor(ccunits.NewPrefix(p), e)
return f, np.Prefix()
}
return f, p
}

View File

@ -8,7 +8,7 @@ import (
"fmt"
"testing"
"github.com/ClusterCockpit/cc-backend/pkg/units"
ccunits "github.com/ClusterCockpit/cc-units"
)
func TestNormalizeFactor(t *testing.T) {
@ -27,11 +27,11 @@ func TestNormalizeFactor(t *testing.T) {
fmt.Printf("Factor %e Count %d\n", f, e)
np := units.NewPrefix("")
np := ccunits.NewPrefix("")
fmt.Printf("Prefix %e Short %s\n", float64(np), np.Prefix())
p := units.NewPrefixFromFactor(np, e)
p := newPrefixFromFactor(np, e)
if p.Prefix() != "G" {
t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix())
@ -52,11 +52,11 @@ func TestNormalizeKeep(t *testing.T) {
fmt.Printf("Factor %e Count %d\n", f, e)
np := units.NewPrefix("G")
np := ccunits.NewPrefix("G")
fmt.Printf("Prefix %e Short %s\n", float64(np), np.Prefix())
p := units.NewPrefixFromFactor(np, e)
p := newPrefixFromFactor(np, e)
if p.Prefix() != "G" {
t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix())

View File

@ -1,496 +0,0 @@
{
"cpu_used": {
"core": {
"unit": {
"base": ""
},
"timestep": 30,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 0.09090909090909093,
"avg": 0.9173553719008265,
"max": 1.0000000000000002
},
"data": [
0.09090909090909093,
0.9999999999999999,
1.0,
1.0000000000000002,
1.0,
1.0000000000000002,
0.9999999999999999,
1.0,
1.0,
1.0,
1.0
]
},
{
"hostname": "taurusi6489",
"id": "1",
"statistics": {
"min": 0.03694102397926118,
"avg": 0.045968409230268584,
"max": 0.08809840425531917
},
"data": [
0.08809840425531917,
0.05710659898477157,
0.04034861200774694,
0.037962362102530824,
0.03976721629485936,
0.04163976759199483,
0.03694102397926118,
0.03821243523316062,
0.03851132686084142,
0.044752092723760455,
0.04231266149870802
]
},
{
"hostname": "taurusi6490",
"id": "10",
"statistics": {
"min": 0.10505319148936171,
"avg": 0.9186411992263056,
"max": 1.0000000000000002
},
"data": [
0.10505319148936171,
1.0000000000000002,
1.0,
1.0,
1.0,
0.9999999999999999,
1.0,
0.9999999999999999,
1.0,
1.0,
1.0
]
},
{
"hostname": "taurusi6490",
"id": "11",
"statistics": {
"min": 0.05286048845767815,
"avg": 0.07053823838706144,
"max": 0.075148113501715
},
"data": [
0.05286048845767815,
0.06936597614563718,
0.07254534083802376,
0.075148113501715,
0.06909547738693468,
0.07372696032489846,
0.07077983088005012,
0.07082419304293325,
0.07424812030075188,
0.07285803627267043,
0.07446808510638298
]
}
],
"statisticsSeries": null
}
},
"ipc": {
"core": {
"unit": {
"base": "IPC"
},
"timestep": 60,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 1.3808406263195592,
"avg": 1.3960848578375105,
"max": 1.4485575599350569
},
"data": [
1.4485575599350569,
1.3808406263195592,
1.3830284413690626,
1.3836692663348698,
1.3843283952290035
]
},
{
"hostname": "taurusi6489",
"id": "1",
"statistics": {
"min": 0.30469640475234366,
"avg": 0.8816944294664065,
"max": 1.797623522191001
},
"data": [
1.797623522191001,
0.954395633726228,
1.0019972349956185,
0.30469640475234366,
0.3497593516668412
]
},
{
"hostname": "taurusi6490",
"id": "10",
"statistics": {
"min": 1.3791232173760588,
"avg": 1.3850247295506815,
"max": 1.386710405495511
},
"data": [
1.3791232173760588,
1.38619977419787,
1.386397917938246,
1.3866923327457215,
1.386710405495511
]
},
{
"hostname": "taurusi6490",
"id": "11",
"statistics": {
"min": 0.6424094604392216,
"avg": 0.9544442638400293,
"max": 1.2706704244636826
},
"data": [
1.2706704244636826,
0.6424094604392216,
0.9249973908234796,
0.6940110823242276,
1.2401329611495353
]
}
],
"statisticsSeries": null
}
},
"flops_any": {
"core": {
"unit": {
"base": "F/s"
},
"timestep": 60,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 0.0,
"avg": 184.2699002412084,
"max": 921.3495012060421
},
"data": [
921.3495012060421,
0.0,
0.0,
0.0,
0.0
]
},
{
"hostname": "taurusi6489",
"id": "1",
"statistics": {
"min": 0.13559227208748068,
"avg": 273.2997868356056,
"max": 1355.9227390817396
},
"data": [
1355.9227390817396,
8.94908797747172,
0.6779613312519499,
0.13559227208748068,
0.8135535154771758
]
},
{
"hostname": "taurusi6490",
"id": "10",
"statistics": {
"min": 0.0,
"avg": 1678.8419461262179,
"max": 4346.591400350933
},
"data": [
4346.591400350933,
0.0,
578.4248288199713,
0.0,
3469.193501460185
]
},
{
"hostname": "taurusi6490",
"id": "11",
"statistics": {
"min": 45.28689133054866,
"avg": 609.6644949204072,
"max": 2582.7080822873186
},
"data": [
2582.7080822873186,
45.28689133054866,
48.67663233623293,
47.591911855555026,
324.0589567923803
]
}
],
"statisticsSeries": null
}
},
"mem_bw": {
"socket": {
"unit": {
"base": "B/s"
},
"timestep": 60,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 653671812.1661415,
"avg": 1637585527.5854635,
"max": 2614718291.9554267
},
"data": [
653671812.1661415,
2614718291.9554267,
1732453371.7073724,
1612865229.8704093,
1574218932.2279677
]
},
{
"hostname": "taurusi6490",
"id": "0",
"statistics": {
"min": 1520190251.61048,
"avg": 1572477682.3850098,
"max": 1688960732.2760606
},
"data": [
1688960732.2760606,
1580140679.8216474,
1520190251.61048,
1541841829.6250021,
1531254918.591859
]
}
],
"statisticsSeries": null
}
},
"file_bw": {
"node": {
"unit": {
"base": "B/s"
},
"timestep": 30,
"series": [
{
"hostname": "taurusi6489",
"statistics": {
"min": 0.0,
"avg": 190352.6328851857,
"max": 2093878.361723524
},
"data": [
0.0,
0.0,
0.0,
0.6000135186380174,
0.0,
0.0,
2093878.361723524,
0.0,
0.0,
0.0,
0.0
]
},
{
"hostname": "taurusi6490",
"statistics": {
"min": 0.0,
"avg": 1050832.4509396513,
"max": 11559156.360352296
},
"data": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
11559156.360352296,
0.0,
0.5999838690326298,
0.0,
0.0
]
}
],
"statisticsSeries": null
}
},
"net_bw": {
"node": {
"unit": {
"base": "B/s"
},
"timestep": 30,
"series": [
{
"hostname": "taurusi6489",
"statistics": {
"min": 126779.89655880642,
"avg": 653834.5091507058,
"max": 1285639.5107541133
},
"data": [
1158202.7403032137,
126779.89655880642,
419017.91939583793,
345766.3974972795,
645419.3296982117,
644667.7333333333,
1285639.5107541133,
643481.2108874657,
640025.3562553325,
643241.4875354709,
639938.0184386979
]
},
{
"hostname": "taurusi6490",
"statistics": {
"min": 640156.9862985397,
"avg": 872367.6551257868,
"max": 1916309.7075416835
},
"data": [
1774843.146788355,
643218.3646426039,
641681.1031071587,
644690.1512268113,
647183.5650609672,
644439.3303402043,
1916309.7075416835,
643748.3241006166,
757189.8273227927,
642583.6999539217,
640156.9862985397
]
}
],
"statisticsSeries": null
}
},
"mem_used": {
"node": {
"unit": {
"base": "B"
},
"timestep": 30,
"series": [
{
"hostname": "taurusi6489",
"statistics": {
"min": 2779066368.0,
"avg": 9282117259.636364,
"max": 10202595328.0
},
"data": [
2779066368.0,
8518217728.0,
9852760064.0,
9979805696.0,
10039619584.0,
10087104512.0,
10136084480.0,
10202595328.0,
10154196992.0,
10177409024.0,
10176430080.0
]
},
{
"hostname": "taurusi6490",
"statistics": {
"min": 9993277440.0,
"avg": 10013080110.545454,
"max": 10039676928.0
},
"data": [
10001317888.0,
10013028352.0,
10006728704.0,
10039676928.0,
10035838976.0,
10033356800.0,
10006577152.0,
10005659648.0,
9993277440.0,
9993564160.0,
10014855168.0
]
}
],
"statisticsSeries": null
}
},
"cpu_power": {
"socket": {
"unit": {
"base": "W"
},
"timestep": 60,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 35.50647456742635,
"avg": 72.08313211552377,
"max": 83.33799371150049
},
"data": [
35.50647456742635,
75.65022009482759,
83.33799371150049,
83.00405043233219,
82.9169217715322
]
},
{
"hostname": "taurusi6490",
"id": "0",
"statistics": {
"min": 83.8466923147859,
"avg": 85.18572681122097,
"max": 85.83909286117324
},
"data": [
83.8466923147859,
85.58816979864088,
85.31266819129794,
85.83909286117324,
85.34201089020692
]
}
],
"statisticsSeries": null
}
}
}

View File

@ -0,0 +1,6 @@
{
"jobId": 398955,
"cluster": "fritz",
"startTime": 1675956725,
"duration": 260
}

View File

@ -1,6 +1,6 @@
{
"jobId": 398764,
"cluster": "fritz",
"startTime": 1675954353
"duration": 3400
"startTime": 1675954353,
"duration": 177
}

View File

@ -1 +1 @@
{"jobId":398764,"user":"k106eb10","project":"k106eb","cluster":"fritz","subCluster":"main","partition":"singlenode","arrayJobId":0,"numNodes":1,"numHwthreads":72,"numAcc":0,"exclusive":1,"monitoringStatus":1,"smt":0,"jobState":"completed","duration":177,"walltime":86340,"resources":[{"hostname":"f0649"}],"metaData":{"jobName":"ams_pipeline","jobScript":"#!/bin/bash -l\n#SBATCH --job-name=ams_pipeline\n#SBATCH --time=23:59:00\n#SBATCH --partition=singlenode\n#SBATCH --ntasks=72\n#SBATCH --hint=multithread\n#SBATCH --chdir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2\n#SBATCH --export=NONE\nunset SLURM_EXPORT_ENV\nuss=$(whoami)\nfind /dev/shm/ -user $uss -type f -mmin +30 -delete\ncd \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2\"\nams_pipeline pipeline.json \u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2/ams_pipeline_job.sh.out\" 2\u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2/ams_pipeline_job.sh.err\"\n","slurmInfo":"\nJobId=398764 JobName=ams_pipeline\n UserId=k106eb10(210387) GroupId=80111\n Account=k106eb QOS=normal \n Requeue=False Restarts=0 BatchFlag=True \n TimeLimit=1439\n SubmitTime=2023-02-09T14:10:20\n Partition=singlenode \n NodeList=f0649\n NumNodes=1 NumCPUs=72 NumTasks=72 CPUs/Task=1\n NTasksPerNode:Socket:Core=0:None:None\n TRES_req=cpu=72,mem=250000M,node=1,billing=72\n TRES_alloc=cpu=72,node=1,billing=72\n Command=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2/ams_pipeline_job.sh\n WorkDir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_131_CuTe/cfg/Ni2Al2\n StdErr=\n StdOut=ams_pipeline.o%j\n"},"startTime":1675954353,"statistics":{"clock":{"unit":{"base":"Hz","prefix":"M"},"avg":1336.519,"min":801.564,"max":2348.215},"cpu_load":{"unit":{"base":""},"avg":31.64,"min":17.36,"max":45.54},"cpu_power":{"unit":{"base":"W"},"avg":150.018,"min":93.672,"max":261.592},"cpu_user":{"unit":{"base":""},"avg":28.518,"min":0.09,"max":57.343},"flops_any":{"unit":{"base":"F/s","prefix":"G"},"avg":45.012,"min":0,"max":135.037},"flops_dp":{"unit":{"base":"F/s","prefix":"G"},"avg":22.496,"min":0,"max":67.488},"flops_sp":{"unit":{"base":"F/s","prefix":"G"},"avg":0.02,"min":0,"max":0.061},"ib_recv":{"unit":{"base":"B/s"},"avg":14442.82,"min":219.998,"max":42581.368},"ib_recv_pkts":{"unit":{"base":"packets/s"},"avg":201.532,"min":1.25,"max":601.345},"ib_xmit":{"unit":{"base":"B/s"},"avg":282.098,"min":56.2,"max":569.363},"ib_xmit_pkts":{"unit":{"base":"packets/s"},"avg":1.228,"min":0.433,"max":2},"ipc":{"unit":{"base":"IPC"},"avg":0.77,"min":0.564,"max":0.906},"mem_bw":{"unit":{"base":"B/s","prefix":"G"},"avg":4.872,"min":0.025,"max":14.552},"mem_power":{"unit":{"base":"W"},"avg":7.725,"min":6.286,"max":10.556},"mem_used":{"unit":{"base":"B","prefix":"G"},"avg":6.162,"min":6.103,"max":6.226},"nfs4_read":{"unit":{"base":"B/s","prefix":"M"},"avg":1045.333,"min":311,"max":1525},"nfs4_total":{"unit":{"base":"B/s","prefix":"M"},"avg":6430,"min":2796,"max":11518},"nfs4_write":{"unit":{"base":"B/s","prefix":"M"},"avg":24.333,"min":0,"max":38},"vectorization_ratio":{"unit":{"base":"%"},"avg":25.528,"min":0,"max":76.585}}}
{"jobId":398764,"user":"k106eb10","project":"k106eb","cluster":"fritz","subCluster":"main","numNodes":1,"exclusive":1,"jobState":"completed","duration":177,"resources":[{"hostname":"f0649"}],"startTime":1675954353,"statistics":{"clock":{"unit":{"base":"Hz","prefix":"M"},"avg":1336.519,"min":801.564,"max":2348.215},"cpu_load":{"unit":{"base":""},"avg":31.64,"min":17.36,"max":45.54},"cpu_power":{"unit":{"base":"W"},"avg":150.018,"min":93.672,"max":261.592},"cpu_user":{"unit":{"base":""},"avg":28.518,"min":0.09,"max":57.343},"flops_any":{"unit":{"base":"F/s","prefix":"G"},"avg":45.012,"min":0,"max":135.037},"flops_dp":{"unit":{"base":"F/s","prefix":"G"},"avg":22.496,"min":0,"max":67.488},"flops_sp":{"unit":{"base":"F/s","prefix":"G"},"avg":0.02,"min":0,"max":0.061},"ib_recv":{"unit":{"base":"B/s"},"avg":14442.82,"min":219.998,"max":42581.368},"ib_recv_pkts":{"unit":{"base":"packets/s"},"avg":201.532,"min":1.25,"max":601.345},"ib_xmit":{"unit":{"base":"B/s"},"avg":282.098,"min":56.2,"max":569.363},"ib_xmit_pkts":{"unit":{"base":"packets/s"},"avg":1.228,"min":0.433,"max":2},"ipc":{"unit":{"base":"IPC"},"avg":0.77,"min":0.564,"max":0.906},"mem_bw":{"unit":{"base":"B/s","prefix":"G"},"avg":4.872,"min":0.025,"max":14.552},"mem_power":{"unit":{"base":"W"},"avg":7.725,"min":6.286,"max":10.556},"mem_used":{"unit":{"base":"B","prefix":"G"},"avg":6.162,"min":6.103,"max":6.226},"nfs4_read":{"unit":{"base":"B/s","prefix":"M"},"avg":1045.333,"min":311,"max":1525},"nfs4_total":{"unit":{"base":"B/s","prefix":"M"},"avg":6430,"min":2796,"max":11518},"nfs4_write":{"unit":{"base":"B/s","prefix":"M"},"avg":24.333,"min":0,"max":38},"vectorization_ratio":{"unit":{"base":"%"},"avg":25.528,"min":0,"max":76.585}}}

View File

@ -1,98 +0,0 @@
{
"jobId": 20639587,
"user": "s3804552",
"project": "p_speichersysteme",
"cluster": "taurus",
"subCluster": "haswell",
"partition": "haswell64",
"numNodes": 2,
"numHwthreads": 4,
"exclusive": 0,
"startTime": 1635856524,
"jobState": "completed",
"duration": 310,
"walltime": 3600,
"smt": 0,
"resources": [
{
"hostname": "taurusi6489",
"hwthreads": [
0,
1
]
},
{
"hostname": "taurusi6490",
"hwthreads": [
10,
11
]
}
],
"statistics": {
"cpu_used": {
"min": 0.03694102397926118,
"avg": 0.48812580468611544,
"max": 1.0000000000000002,
"unit": {
"base": ""
}
},
"ipc": {
"min": 0.30469640475234366,
"avg": 1.154312070173657,
"max": 1.797623522191001,
"unit": {
"base": "IPC"
}
},
"flops_any": {
"min": 0.0,
"avg": 686.5190320308598,
"max": 4346.591400350933,
"unit": {
"base": "F/s"
}
},
"mem_bw": {
"min": 653671812.1661415,
"avg": 1605031604.9852366,
"max": 2614718291.9554267,
"unit": {
"base": "B/s"
}
},
"file_bw": {
"min": 0.0,
"avg": 620592.5419124186,
"max": 11559156.360352296,
"unit": {
"base": "B/s"
}
},
"net_bw": {
"min": 126779.89655880642,
"avg": 763101.082138246,
"max": 1916309.7075416835,
"unit": {
"base": "B/s"
}
},
"mem_used": {
"min": 2779066368.0,
"avg": 9647598685.09091,
"max": 10202595328.0,
"unit": {
"base": "B"
}
},
"cpu_power": {
"min": 35.50647456742635,
"avg": 78.63442946337237,
"max": 85.83909286117324,
"unit": {
"base": "W"
}
}
}
}

View File

@ -96,6 +96,50 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
return job, nil
}
func (r *JobRepository) Optimize() error {
var err error
switch r.driver {
case "sqlite3":
if _, err = r.DB.Exec(`VACUUM`); err != nil {
return err
}
case "mysql":
log.Info("Optimize currently not supported for mysql driver")
}
return nil
}
func (r *JobRepository) Flush() error {
var err error
switch r.driver {
case "sqlite3":
if _, err = r.DB.Exec(`DELETE FROM jobtag`); err != nil {
return err
}
if _, err = r.DB.Exec(`DELETE FROM tag`); err != nil {
return err
}
if _, err = r.DB.Exec(`DELETE FROM job`); err != nil {
return err
}
case "mysql":
if _, err = r.DB.Exec(`TRUNCATE TABLE jobtag`); err != nil {
return err
}
if _, err = r.DB.Exec(`TRUNCATE TABLE tag`); err != nil {
return err
}
if _, err = r.DB.Exec(`TRUNCATE TABLE job`); err != nil {
return err
}
}
return nil
}
func (r *JobRepository) FetchJobName(job *schema.Job) (*string, error) {
start := time.Now()
cachekey := fmt.Sprintf("metadata:%d", job.ID)
@ -537,7 +581,7 @@ func (r *JobRepository) FindUserOrProjectOrJobname(ctx context.Context, searchte
func (r *JobRepository) FindColumnValue(user *auth.User, searchterm string, table string, selectColumn string, whereColumn string, isLike bool) (result string, err error) {
compareStr := " = ?"
query := searchterm
if isLike == true {
if isLike {
compareStr = " LIKE ?"
query = "%" + searchterm + "%"
}
@ -678,6 +722,38 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
return nil
}
func (r *JobRepository) FindJobsBefore(startTime int64) ([]*schema.Job, error) {
query := sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
"job.start_time < %d", startTime))
sql, args, err := query.ToSql()
if err != nil {
log.Warn("Error while converting query to sql")
return nil, err
}
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
log.Error("Error while running query")
return nil, err
}
jobs := make([]*schema.Job, 0, 50)
for rows.Next() {
job, err := scanJob(rows)
if err != nil {
rows.Close()
log.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
}
return jobs, nil
}
// GraphQL validation should make sure that no unkown values can be specified.
var groupBy2column = map[model.Aggregate]string{
model.AggregateUser: "job.user",
@ -695,9 +771,10 @@ func (r *JobRepository) JobsStatistics(ctx context.Context,
stats := map[string]*model.JobsStatistics{}
var castType string
if r.driver == "sqlite3" {
switch r.driver {
case "sqlite3":
castType = "int"
} else if r.driver == "mysql" {
case "mysql":
castType = "unsigned"
}
@ -879,7 +956,6 @@ func (r *JobRepository) jobsStatisticsHistogram(ctx context.Context,
value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) {
start := time.Now()
query := sq.Select(value, "COUNT(job.id) AS count").From("job")
query, qerr := SecurityCheck(ctx, sq.Select(value, "COUNT(job.id) AS count").From("job"))
if qerr != nil {

View File

@ -14,7 +14,7 @@ import (
func setup(t *testing.T) *JobRepository {
log.Init("info", true)
dbfilepath := "../../test/test.db"
dbfilepath := "testdata/test.db"
err := MigrateDB("sqlite3", dbfilepath)
if err != nil {
t.Fatal(err)

View File

@ -24,8 +24,8 @@ var migrationFiles embed.FS
func checkDBVersion(backend string, db *sql.DB) error {
var m *migrate.Migrate
if backend == "sqlite3" {
switch backend {
case "sqlite3":
driver, err := sqlite3.WithInstance(db, &sqlite3.Config{})
if err != nil {
return err
@ -39,7 +39,7 @@ func checkDBVersion(backend string, db *sql.DB) error {
if err != nil {
return err
}
} else if backend == "mysql" {
case "mysql":
driver, err := mysql.WithInstance(db, &mysql.Config{})
if err != nil {
return err
@ -65,11 +65,11 @@ func checkDBVersion(backend string, db *sql.DB) error {
}
if v < Version {
return fmt.Errorf("Unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, Version)
return fmt.Errorf("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, Version)
}
if v > Version {
return fmt.Errorf("Unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool!", v, Version)
return fmt.Errorf("unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool", v, Version)
}
return nil
@ -78,7 +78,8 @@ func checkDBVersion(backend string, db *sql.DB) error {
func MigrateDB(backend string, db string) error {
var m *migrate.Migrate
if backend == "sqlite3" {
switch backend {
case "sqlite3":
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
if err != nil {
log.Fatal(err)
@ -88,7 +89,7 @@ func MigrateDB(backend string, db string) error {
if err != nil {
return err
}
} else if backend == "mysql" {
case "mysql":
d, err := iofs.New(migrationFiles, "migrations/mysql")
if err != nil {
return err

View File

@ -31,13 +31,15 @@ CREATE TABLE IF NOT EXISTS job (
net_bw_avg REAL NOT NULL DEFAULT 0.0,
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
file_bw_avg REAL NOT NULL DEFAULT 0.0,
file_data_vol_total REAL NOT NULL DEFAULT 0.0);
file_data_vol_total REAL NOT NULL DEFAULT 0.0,
UNIQUE (job_id, cluster, start_time)
);
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
tag_type VARCHAR(255) NOT NULL,
tag_name VARCHAR(255) NOT NULL,
CONSTRAINT be_unique UNIQUE (tag_type, tag_name));
UNIQUE (tag_type, tag_name));
CREATE TABLE IF NOT EXISTS jobtag (
job_id INTEGER,

View File

@ -31,13 +31,15 @@ CREATE TABLE IF NOT EXISTS job (
net_bw_avg REAL NOT NULL DEFAULT 0.0,
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
file_bw_avg REAL NOT NULL DEFAULT 0.0,
file_data_vol_total REAL NOT NULL DEFAULT 0.0);
file_data_vol_total REAL NOT NULL DEFAULT 0.0,
UNIQUE (job_id, cluster, start_time)
);
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
tag_type VARCHAR(255) NOT NULL,
tag_name VARCHAR(255) NOT NULL,
CONSTRAINT be_unique UNIQUE (tag_type, tag_name));
UNIQUE (tag_type, tag_name));
CREATE TABLE IF NOT EXISTS jobtag (
job_id INTEGER,

View File

@ -34,11 +34,13 @@ func (r *JobRepository) QueryJobs(
if order != nil {
field := toSnakeCase(order.Field)
if order.Order == model.SortDirectionEnumAsc {
switch order.Order {
case model.SortDirectionEnumAsc:
query = query.OrderBy(fmt.Sprintf("job.%s ASC", field))
} else if order.Order == model.SortDirectionEnumDesc {
case model.SortDirectionEnumDesc:
query = query.OrderBy(fmt.Sprintf("job.%s DESC", field))
} else {
default:
return nil, errors.New("REPOSITORY/QUERY > invalid sorting order")
}
}
@ -117,7 +119,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (queryOut sq.Sel
return query.Where("job.user = ?", user.Username), nil
} else { // Unauthorized : Error
var qnil sq.SelectBuilder
return qnil, errors.New(fmt.Sprintf("User '%s' with unknown roles! [%#v]\n", user.Username, user.Roles))
return qnil, fmt.Errorf("user '%s' with unknown roles [%#v]", user.Username, user.Roles)
}
}

View File

@ -34,7 +34,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
}`
log.Init("info", true)
dbfilepath := "../../test/test.db"
dbfilepath := "testdata/test.db"
err := MigrateDB("sqlite3", dbfilepath)
if err != nil {
t.Fatal(err)

View File

@ -24,7 +24,7 @@ import (
func LoadEnv(file string) error {
f, err := os.Open(file)
if err != nil {
log.Error("Error while opening file")
log.Error("Error while opening .env file")
return err
}

View File

@ -18,6 +18,8 @@ const Version uint64 = 1
type ArchiveBackend interface {
Init(rawConfig json.RawMessage) (uint64, error)
Exists(job *schema.Job) bool
LoadJobMeta(job *schema.Job) (*schema.JobMeta, error)
LoadJobData(job *schema.Job) (schema.JobData, error)
@ -30,6 +32,10 @@ type ArchiveBackend interface {
GetClusters() []string
CleanUp(jobs []*schema.Job)
Compress(jobs []*schema.Job)
Iter(loadMetricData bool) <-chan JobContainer
}
@ -44,21 +50,23 @@ var useArchive bool
func Init(rawConfig json.RawMessage, disableArchive bool) error {
useArchive = !disableArchive
var kind struct {
var cfg struct {
Kind string `json:"kind"`
}
if err := json.Unmarshal(rawConfig, &kind); err != nil {
if err := json.Unmarshal(rawConfig, &cfg); err != nil {
log.Warn("Error while unmarshaling raw config json")
return err
}
switch kind.Kind {
switch cfg.Kind {
case "file":
ar = &FsArchive{}
// case "s3":
// ar = &S3Archive{}
default:
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", kind.Kind)
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", cfg.Kind)
}
version, err := ar.Init(rawConfig)
@ -67,6 +75,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
return err
}
log.Infof("Load archive version %d", version)
return initClusterConfig()
}

168
pkg/archive/archive_test.go Normal file
View File

@ -0,0 +1,168 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive_test
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
func CopyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
if e := out.Close(); e != nil {
err = e
}
}()
_, err = io.Copy(out, in)
if err != nil {
return
}
err = out.Sync()
if err != nil {
return
}
si, err := os.Stat(src)
if err != nil {
return
}
err = os.Chmod(dst, si.Mode())
if err != nil {
return
}
return
}
// CopyDir recursively copies a directory tree, attempting to preserve permissions.
// Source directory must exist, destination directory must *not* exist.
// Symlinks are ignored and skipped.
func CopyDir(src string, dst string) (err error) {
src = filepath.Clean(src)
dst = filepath.Clean(dst)
si, err := os.Stat(src)
if err != nil {
return err
}
if !si.IsDir() {
return fmt.Errorf("source is not a directory")
}
_, err = os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
return
}
if err == nil {
return fmt.Errorf("destination already exists")
}
err = os.MkdirAll(dst, si.Mode())
if err != nil {
return
}
entries, err := ioutil.ReadDir(src)
if err != nil {
return
}
for _, entry := range entries {
srcPath := filepath.Join(src, entry.Name())
dstPath := filepath.Join(dst, entry.Name())
if entry.IsDir() {
err = CopyDir(srcPath, dstPath)
if err != nil {
return
}
} else {
// Skip symlinks.
if entry.Mode()&os.ModeSymlink != 0 {
continue
}
err = CopyFile(srcPath, dstPath)
if err != nil {
return
}
}
}
return
}
var jobs []*schema.Job
func setup(t *testing.T) archive.ArchiveBackend {
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
CopyDir("./testdata/archive/", jobarchive)
archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", jobarchive)
if err := archive.Init(json.RawMessage(archiveCfg), false); err != nil {
t.Fatal(err)
}
jobs = make([]*schema.Job, 2)
jobs[0] = &schema.Job{}
jobs[0].JobID = 1403244
jobs[0].Cluster = "emmy"
jobs[0].StartTime = time.Unix(1608923076, 0)
jobs[1] = &schema.Job{}
jobs[0].JobID = 1404397
jobs[0].Cluster = "emmy"
jobs[0].StartTime = time.Unix(1609300556, 0)
return archive.GetHandle()
}
func TestCleanUp(t *testing.T) {
a := setup(t)
if !a.Exists(jobs[0]) {
t.Error("Job does not exist")
}
a.CleanUp(jobs)
if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
t.Error("Jobs still exist")
}
}
// func TestCompress(t *testing.T) {
// a := setup(t)
// if !a.Exists(jobs[0]) {
// t.Error("Job does not exist")
// }
//
// a.Compress(jobs)
//
// if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
// t.Error("Jobs still exist")
// }
// }

View File

@ -11,6 +11,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
@ -38,17 +39,26 @@ func checkFileExists(filePath string) bool {
return !errors.Is(err, os.ErrNotExist)
}
func getDirectory(
job *schema.Job,
rootPath string,
) string {
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
return filepath.Join(
rootPath,
job.Cluster,
lvl1, lvl2,
strconv.FormatInt(job.StartTime.Unix(), 10))
}
func getPath(
job *schema.Job,
rootPath string,
file string) string {
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
return filepath.Join(
rootPath,
job.Cluster,
lvl1, lvl2,
strconv.FormatInt(job.StartTime.Unix(), 10), file)
getDirectory(job, rootPath), file)
}
func loadJobMeta(filename string) (*schema.JobMeta, error) {
@ -74,6 +84,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
log.Errorf("fsBackend LoadJobData()- %v", err)
return nil, err
}
defer f.Close()
if isCompressed {
r, err := gzip.NewReader(f)
@ -91,7 +102,6 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
return DecodeJobData(r, filename)
} else {
defer f.Close()
if config.Keys.Validate {
if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil {
return schema.JobData{}, fmt.Errorf("validate job data: %v", err)
@ -147,6 +157,55 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
return version, nil
}
func (fsa *FsArchive) Exists(job *schema.Job) bool {
dir := getDirectory(job, fsa.path)
_, err := os.Stat(dir)
return !errors.Is(err, os.ErrNotExist)
}
func (fsa *FsArchive) CleanUp(jobs []*schema.Job) {
for _, job := range jobs {
dir := getDirectory(job, fsa.path)
if err := os.RemoveAll(dir); err != nil {
log.Errorf("JobArchive Cleanup() error: %v", err)
}
}
}
func (fsa *FsArchive) Compress(jobs []*schema.Job) {
for _, job := range jobs {
fileIn := getPath(job, fsa.path, "data.json")
if !checkFileExists(fileIn) && (job.Duration > 600 || job.NumNodes > 4) {
originalFile, err := os.Open(fileIn)
if err != nil {
log.Errorf("JobArchive Compress() error: %v", err)
}
defer originalFile.Close()
fileOut := getPath(job, fsa.path, "data.json.gz")
gzippedFile, err := os.Create(fileOut)
if err != nil {
log.Errorf("JobArchive Compress() error: %v", err)
}
defer gzippedFile.Close()
gzipWriter := gzip.NewWriter(gzippedFile)
defer gzipWriter.Close()
_, err = io.Copy(gzipWriter, originalFile)
if err != nil {
log.Errorf("JobArchive Compress() error: %v", err)
}
gzipWriter.Flush()
if err := os.Remove(fileIn); err != nil {
log.Errorf("JobArchive Compress() error: %v", err)
}
}
}
}
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
var isCompressed bool = true
filename := getPath(job, fsa.path, "data.json.gz")

View File

@ -10,17 +10,12 @@ import (
"testing"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
func init() {
log.Init("info", true)
}
func TestInitEmptyPath(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"kind\":\"../../test/archive\"}"))
_, err := fsa.Init(json.RawMessage("{\"kind\":\"testdata/archive\"}"))
if err == nil {
t.Fatal(err)
}
@ -28,14 +23,14 @@ func TestInitEmptyPath(t *testing.T) {
func TestInitNoJson(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("\"path\":\"../../test/archive\"}"))
_, err := fsa.Init(json.RawMessage("\"path\":\"testdata/archive\"}"))
if err == nil {
t.Fatal(err)
}
}
func TestInitNotExists(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/job-archive\"}"))
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/job-archive\"}"))
if err == nil {
t.Fatal(err)
}
@ -43,11 +38,11 @@ func TestInitNotExists(t *testing.T) {
func TestInit(t *testing.T) {
var fsa FsArchive
version, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
version, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil {
t.Fatal(err)
}
if fsa.path != "../../test/archive" {
if fsa.path != "testdata/archive" {
t.Fail()
}
if version != 1 {
@ -60,12 +55,12 @@ func TestInit(t *testing.T) {
func TestLoadJobMetaInternal(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil {
t.Fatal(err)
}
job, err := loadJobMeta("../../test/archive/emmy/1404/397/1609300556/meta.json")
job, err := loadJobMeta("testdata/archive/emmy/1404/397/1609300556/meta.json")
if err != nil {
t.Fatal(err)
}
@ -83,7 +78,7 @@ func TestLoadJobMetaInternal(t *testing.T) {
func TestLoadJobMeta(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil {
t.Fatal(err)
}
@ -111,7 +106,7 @@ func TestLoadJobMeta(t *testing.T) {
func TestLoadJobData(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil {
t.Fatal(err)
}
@ -137,7 +132,7 @@ func TestLoadJobData(t *testing.T) {
func TestLoadCluster(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil {
t.Fatal(err)
}
@ -154,7 +149,7 @@ func TestLoadCluster(t *testing.T) {
func TestIter(t *testing.T) {
var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}"))
_, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil {
t.Fatal(err)
}

View File

@ -34,11 +34,11 @@ var (
)
var (
DebugLog *log.Logger
InfoLog *log.Logger
WarnLog *log.Logger
ErrLog *log.Logger
CritLog *log.Logger
DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
CritLog *log.Logger = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
)
/* CONFIG */
@ -70,12 +70,6 @@ func Init(lvl string, logdate bool) {
WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
} else {
DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
}
}

View File

@ -57,6 +57,13 @@ type ClusterConfig struct {
MetricDataRepository json.RawMessage `json:"metricDataRepository"`
}
type Retention struct {
Age int `json:"age"`
IncludeDB bool `json:"includeDB"`
Policy string `json:"policy"`
Location string `json:"location"`
}
// Format of the configuration (file). See below for the defaults.
type ProgramConfig struct {
// Address where the http (or https) server will listen on (for example: 'localhost:80').

View File

@ -41,9 +41,60 @@
"type": "string"
},
"job-archive": {
"description": "Path to the job-archive.",
"description": "Configuration keys for job-archive",
"type": "object",
"properties": {
"kind": {
"description": "Backend type for job-archive",
"type": "string",
"enum": [
"file",
"s3"
]
},
"path": {
"description": "Path to job archive for file backend",
"type": "string"
},
"compression": {
"description": "Setup automatic compression for jobs older than number of days",
"type": "integer"
},
"retention": {
"description": "Configuration keys for retention",
"type": "object",
"properties": {
"policy": {
"description": "Retention policy",
"type": "string",
"enum": [
"none",
"delete",
"move"
]
},
"includeDB": {
"description": "Also remove jobs from database",
"type": "boolean"
},
"age": {
"description": "Act on jobs with startTime older than age (in days)",
"type": "integer"
},
"location": {
"description": "The target directory for retention. Only applicable for retention move.",
"type": "string"
}
},
"required": [
"policy"
]
}
},
"required": [
"kind"
]
},
"disable-archive": {
"description": "Keep all metric data in the metric data repositories, do not write to the job-archive.",
"type": "boolean"

View File

@ -1,175 +0,0 @@
# cc-units - A unit system for ClusterCockpit
When working with metrics, the problem comes up that they may use different unit name but have the same unit in fact.
There are a lot of real world examples like 'kB' and 'Kbyte'. In [cc-metric-collector](https://github.com/ClusterCockpit/cc-metric-collector), the collectors read data from different sources which may use different units or the programmer specifies a unit for a metric by hand. The cc-units system is not comparable with the SI unit system. If you are looking for a package for the SI units, see [here](https://pkg.go.dev/github.com/gurre/si).
In order to enable unit comparison and conversion, the ccUnits package provides some helpers:
```go
NewUnit(unit string) Unit // create a new unit from some string like 'GHz', 'Mbyte' or 'kevents/s'
func GetUnitUnitFactor(in Unit, out Unit) (func(value float64) float64, error) // Get conversion function between two units
func GetPrefixFactor(in Prefix, out Prefix) func(value float64) float64 // Get conversion function between two prefixes
func GetUnitPrefixFactor(in Unit, out Prefix) (func(value float64) float64, Unit) // Get conversion function for prefix changes and the new unit for further use
type Unit interface {
Valid() bool
String() string
Short() string
AddUnitDenominator(div Measure)
}
```
In order to get the "normalized" string unit back or test for validity, you can use:
```go
u := NewUnit("MB")
fmt.Println(u.Valid()) // true
fmt.Printf("Long string %q", u.String()) // MegaBytes
fmt.Printf("Short string %q", u.Short()) // MBytes
v := NewUnit("foo")
fmt.Println(v.Valid()) // false
```
If you have two units or other components and need the conversion function:
```go
// Get conversion functions for 'kB' to 'MBytes'
u1 := NewUnit("kB")
u2 := NewUnit("MBytes")
convFunc, err := GetUnitUnitFactor(u1, u2) // Returns an error if the units have different measures
if err == nil {
v2 := convFunc(v1)
fmt.Printf("%f %s\n", v2, u2.Short())
}
// Get conversion function for 'kB' -> 'G' prefix.
// Returns the function and the new unit 'GBytes'
p1 := NewPrefix("G")
convFunc, u_p1 := GetUnitPrefixFactor(u1, p1)
// or
// convFunc, u_p1 := GetUnitPrefixStringFactor(u1, "G")
if convFunc != nil {
v2 := convFunc(v1)
fmt.Printf("%f %s\n", v2, u_p1.Short())
}
// Get conversion function for two prefixes: 'G' -> 'T'
p2 := NewPrefix("T")
convFunc = GetPrefixPrefixFactor(p1, p2)
if convFunc != nil {
v2 := convFunc(v1)
fmt.Printf("%f %s -> %f %s\n", v1, p1.Prefix(), v2, p2.Prefix())
}
```
(In the ClusterCockpit ecosystem the separation between values and units if useful since they are commonly not stored as a single entity but the value is a field in the CCMetric while unit is a tag or a meta information).
If you have a metric and want the derivation to a bandwidth or events per second, you can use the original unit:
```go
in_unit, err := metric.GetMeta("unit")
if err == nil {
value, ok := metric.GetField("value")
if ok {
out_unit = NewUnit(in_unit)
out_unit.AddUnitDenominator("seconds")
seconds := timeDiff.Seconds()
y, err := lp.New(metric.Name()+"_bw",
metric.Tags(),
metric.Meta(),
map[string]interface{"value": value/seconds},
metric.Time())
if err == nil {
y.AddMeta("unit", out_unit.Short())
}
}
}
```
## Special unit detection
Some used measures like Bytes and Flops are non-dividable. Consequently there prefixes like Milli, Micro and Nano are not useful. This is quite handy since a unit `mb` for `MBytes` is not uncommon but would by default be parsed as "MilliBytes".
Special parsing rules for the following measures: iff `prefix==Milli`, use `prefix==Mega`
- `Bytes`
- `Flops`
- `Packets`
- `Events`
- `Cycles`
- `Requests`
This means the prefixes `Micro` (like `ubytes`) and `Nano` like (`nflops/sec`) are not allowed and return an invalid unit. But you can specify `mflops` and `mb`.
Prefixes for `%` or `percent` are ignored.
## Supported prefixes
```go
const (
Base Prefix = 1
Exa = 1e18
Peta = 1e15
Tera = 1e12
Giga = 1e9
Mega = 1e6
Kilo = 1e3
Milli = 1e-3
Micro = 1e-6
Nano = 1e-9
Kibi = 1024
Mebi = 1024 * 1024
Gibi = 1024 * 1024 * 1024
Tebi = 1024 * 1024 * 1024 * 1024
)
```
The prefixes are detected using a regular expression `^([kKmMgGtTpP]?[i]?)(.*)` that splits the prefix from the measure. You probably don't need to deal with the prefixes in the code.
## Supported measures
```go
const (
None Measure = iota
Bytes
Flops
Percentage
TemperatureC
TemperatureF
Rotation
Hertz
Time
Watt
Joule
Cycles
Requests
Packets
Events
)
```
There a regular expression for each of the measures like `^([bB][yY]?[tT]?[eE]?[sS]?)` for the `Bytes` measure.
## New units
If the selected units are not suitable for your metric, feel free to send a PR.
### New prefix
For a new prefix, add it to the big `const` in `ccUnitPrefix.go` and adjust the prefix-unit-splitting regular expression. Afterwards, you have to add cases to the three functions `String()`, `Prefix()` and `NewPrefix()`. `NewPrefix()` contains the parser (`k` or `K` -> `Kilo`). The other one are used for output. `String()` outputs a longer version of the prefix (`Kilo`), while `Prefix()` returns only the short notation (`K`).
### New measure
Adding new prefixes is probably rare but adding a new measure is a more common task. At first, add it to the big `const` in `ccUnitMeasure.go`. Moreover, create a regular expression matching the measure (and pre-compile it like the others). Add the expression matching to `NewMeasure()`. The `String()` and `Short()` functions return descriptive strings for the measure in long form (like `Hertz`) and short form (like `Hz`).
If there are special conversation rules between measures and you want to convert one measure to another, like temperatures in Celsius to Fahrenheit, a special case in `GetUnitPrefixFactor()` is required.
### Special parsing rules
The two parsers for prefix and measure are called under the hood by `NewUnit()` and there might some special rules apply. Like in the above section about 'special unit detection', special rules for your new measure might be required. Currently there are two special cases:
- Measures that are non-dividable like Flops, Bytes, Events, ... cannot use `Milli`, `Micro` and `Nano`. The prefix `m` is forced to `M` for these measures
- If the prefix is `p`/`P` (`Peta`) or `e`/`E` (`Exa`) and the measure is not detectable, it retries detection with the prefix. So first round it tries, for example, prefix `p` and measure `ackets` which fails, so it retries the detection with measure `packets` and `<empty>` prefix (resolves to `Base` prefix).
## Limitations
The `ccUnits` package is a simple implemtation of a unit system and comes with some limitations:
- The unit denominator (like `s` in `Mbyte/s`) can only have the `Base` prefix, you cannot specify `Byte/ms` for "Bytes per milli second".

View File

@ -1,134 +0,0 @@
package units
import "regexp"
type Measure int
const (
InvalidMeasure Measure = iota
Bytes
Flops
Percentage
TemperatureC
TemperatureF
Rotation
Frequency
Time
Watt
Joule
Cycles
Requests
Packets
Events
)
type MeasureData struct {
Long string
Short string
Regex string
}
// Different names and regex used for input and output
var InvalidMeasureLong string = "Invalid"
var InvalidMeasureShort string = "inval"
var MeasuresMap map[Measure]MeasureData = map[Measure]MeasureData{
Bytes: {
Long: "byte",
Short: "B",
Regex: "^([bB][yY]?[tT]?[eE]?[sS]?)",
},
Flops: {
Long: "Flops",
Short: "F",
Regex: "^([fF][lL]?[oO]?[pP]?[sS]?)",
},
Percentage: {
Long: "Percent",
Short: "%",
Regex: "^(%|[pP]ercent)",
},
TemperatureC: {
Long: "DegreeC",
Short: "degC",
Regex: "^(deg[Cc]|°[cC])",
},
TemperatureF: {
Long: "DegreeF",
Short: "degF",
Regex: "^(deg[fF]|°[fF])",
},
Rotation: {
Long: "RPM",
Short: "RPM",
Regex: "^([rR][pP][mM])",
},
Frequency: {
Long: "Hertz",
Short: "Hz",
Regex: "^([hH][eE]?[rR]?[tT]?[zZ])",
},
Time: {
Long: "Seconds",
Short: "s",
Regex: "^([sS][eE]?[cC]?[oO]?[nN]?[dD]?[sS]?)",
},
Cycles: {
Long: "Cycles",
Short: "cyc",
Regex: "^([cC][yY][cC]?[lL]?[eE]?[sS]?)",
},
Watt: {
Long: "Watts",
Short: "W",
Regex: "^([wW][aA]?[tT]?[tT]?[sS]?)",
},
Joule: {
Long: "Joules",
Short: "J",
Regex: "^([jJ][oO]?[uU]?[lL]?[eE]?[sS]?)",
},
Requests: {
Long: "Requests",
Short: "requests",
Regex: "^([rR][eE][qQ][uU]?[eE]?[sS]?[tT]?[sS]?)",
},
Packets: {
Long: "Packets",
Short: "packets",
Regex: "^([pP][aA]?[cC]?[kK][eE]?[tT][sS]?)",
},
Events: {
Long: "Events",
Short: "events",
Regex: "^([eE][vV]?[eE]?[nN][tT][sS]?)",
},
}
// String returns the long string for the measure like 'Percent' or 'Seconds'
func (m *Measure) String() string {
if data, ok := MeasuresMap[*m]; ok {
return data.Long
}
return InvalidMeasureLong
}
// Short returns the short string for the measure like 'B' (Bytes), 's' (Time) or 'W' (Watt). Is is recommened to use Short() over String().
func (m *Measure) Short() string {
if data, ok := MeasuresMap[*m]; ok {
return data.Short
}
return InvalidMeasureShort
}
// NewMeasure creates a new measure out of a string representing a measure like 'Bytes', 'Flops' and 'precent'.
// It uses regular expressions for matching.
func NewMeasure(unit string) Measure {
for m, data := range MeasuresMap {
regex := regexp.MustCompile(data.Regex)
match := regex.FindStringSubmatch(unit)
if match != nil {
return m
}
}
return InvalidMeasure
}

View File

@ -1,192 +0,0 @@
package units
import (
"math"
"regexp"
)
type Prefix float64
const (
InvalidPrefix Prefix = iota
Base = 1
Yotta = 1e24
Zetta = 1e21
Exa = 1e18
Peta = 1e15
Tera = 1e12
Giga = 1e9
Mega = 1e6
Kilo = 1e3
Milli = 1e-3
Micro = 1e-6
Nano = 1e-9
Kibi = 1024
Mebi = 1024 * 1024
Gibi = 1024 * 1024 * 1024
Tebi = 1024 * 1024 * 1024 * 1024
Pebi = 1024 * 1024 * 1024 * 1024 * 1024
Exbi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024
Zebi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
Yobi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
)
const PrefixUnitSplitRegexStr = `^([kKmMgGtTpPeEzZyY]?[i]?)(.*)`
var prefixUnitSplitRegex = regexp.MustCompile(PrefixUnitSplitRegexStr)
type PrefixData struct {
Long string
Short string
Regex string
}
// Different names and regex used for input and output
var InvalidPrefixLong string = "Invalid"
var InvalidPrefixShort string = "inval"
var PrefixDataMap map[Prefix]PrefixData = map[Prefix]PrefixData{
Base: {
Long: "",
Short: "",
Regex: "^$",
},
Kilo: {
Long: "Kilo",
Short: "K",
Regex: "^[kK]$",
},
Mega: {
Long: "Mega",
Short: "M",
Regex: "^[M]$",
},
Giga: {
Long: "Giga",
Short: "G",
Regex: "^[gG]$",
},
Tera: {
Long: "Tera",
Short: "T",
Regex: "^[tT]$",
},
Peta: {
Long: "Peta",
Short: "P",
Regex: "^[pP]$",
},
Exa: {
Long: "Exa",
Short: "E",
Regex: "^[eE]$",
},
Zetta: {
Long: "Zetta",
Short: "Z",
Regex: "^[zZ]$",
},
Yotta: {
Long: "Yotta",
Short: "Y",
Regex: "^[yY]$",
},
Milli: {
Long: "Milli",
Short: "m",
Regex: "^[m]$",
},
Micro: {
Long: "Micro",
Short: "u",
Regex: "^[u]$",
},
Nano: {
Long: "Nano",
Short: "n",
Regex: "^[n]$",
},
Kibi: {
Long: "Kibi",
Short: "Ki",
Regex: "^[kK][i]$",
},
Mebi: {
Long: "Mebi",
Short: "Mi",
Regex: "^[M][i]$",
},
Gibi: {
Long: "Gibi",
Short: "Gi",
Regex: "^[gG][i]$",
},
Tebi: {
Long: "Tebi",
Short: "Ti",
Regex: "^[tT][i]$",
},
Pebi: {
Long: "Pebi",
Short: "Pi",
Regex: "^[pP][i]$",
},
Exbi: {
Long: "Exbi",
Short: "Ei",
Regex: "^[eE][i]$",
},
Zebi: {
Long: "Zebi",
Short: "Zi",
Regex: "^[zZ][i]$",
},
Yobi: {
Long: "Yobi",
Short: "Yi",
Regex: "^[yY][i]$",
},
}
// String returns the long string for the prefix like 'Kilo' or 'Mega'
func (p *Prefix) String() string {
if data, ok := PrefixDataMap[*p]; ok {
return data.Long
}
return InvalidMeasureLong
}
// Prefix returns the short string for the prefix like 'K', 'M' or 'G'. Is is recommened to use Prefix() over String().
func (p *Prefix) Prefix() string {
if data, ok := PrefixDataMap[*p]; ok {
return data.Short
}
return InvalidMeasureShort
}
// NewPrefix creates a new prefix out of a string representing a unit like 'k', 'K', 'M' or 'G'.
func NewPrefix(prefix string) Prefix {
for p, data := range PrefixDataMap {
regex := regexp.MustCompile(data.Regex)
match := regex.FindStringSubmatch(prefix)
if match != nil {
return p
}
}
return InvalidPrefix
}
func getExponent(p float64) int {
count := 0
for p > 1.0 {
p = p / 1000.0
count++
}
return count * 3
}
func NewPrefixFromFactor(op Prefix, e int) Prefix {
f := float64(op)
exp := math.Pow10(getExponent(f) - e)
return Prefix(exp)
}

View File

@ -1,339 +0,0 @@
// Unit system for cluster monitoring metrics like bytes, flops and events
package units
import (
"fmt"
"math"
"strings"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
type unit struct {
prefix Prefix
measure Measure
divMeasure Measure
}
type Unit interface {
Valid() bool
String() string
Short() string
AddUnitDenominator(div Measure)
getPrefix() Prefix
getMeasure() Measure
getUnitDenominator() Measure
setPrefix(p Prefix)
}
var INVALID_UNIT = NewUnit("foobar")
// Valid checks whether a unit is a valid unit.
// A unit is valid if it has at least a prefix and a measure.
// The unit denominator is optional.
func (u *unit) Valid() bool {
return u.prefix != InvalidPrefix && u.measure != InvalidMeasure
}
// String returns the long string for the unit like 'KiloHertz' or 'MegaBytes'
func (u *unit) String() string {
if u.divMeasure != InvalidMeasure {
return fmt.Sprintf("%s%s/%s", u.prefix.String(), u.measure.String(), u.divMeasure.String())
} else {
return fmt.Sprintf("%s%s", u.prefix.String(), u.measure.String())
}
}
// Short returns the short string for the unit like 'kHz' or 'MByte'. Is is recommened to use Short() over String().
func (u *unit) Short() string {
if u.divMeasure != InvalidMeasure {
return fmt.Sprintf("%s%s/%s", u.prefix.Prefix(), u.measure.Short(), u.divMeasure.Short())
} else {
return fmt.Sprintf("%s%s", u.prefix.Prefix(), u.measure.Short())
}
}
// AddUnitDenominator adds a unit denominator to an exising unit. Can be used if you want to derive e.g. data volume to bandwidths.
// The data volume is in a Byte unit like 'kByte' and by dividing it by the runtime in seconds, we get the bandwidth. We can use the
// data volume unit and add 'Second' as unit denominator
func (u *unit) AddUnitDenominator(div Measure) {
u.divMeasure = div
}
func (u *unit) getPrefix() Prefix {
return u.prefix
}
func (u *unit) setPrefix(p Prefix) {
u.prefix = p
}
func (u *unit) getMeasure() Measure {
return u.measure
}
func (u *unit) getUnitDenominator() Measure {
return u.divMeasure
}
func ConvertValue(v *float64, from string, to string) {
uf := NewUnit(from)
ut := NewUnit(to)
factor := float64(uf.getPrefix()) / float64(ut.getPrefix())
*v = math.Ceil(*v * factor)
}
func ConvertSeries(s []float64, from string, to string) {
uf := NewUnit(from)
ut := NewUnit(to)
factor := float64(uf.getPrefix()) / float64(ut.getPrefix())
for i := 0; i < len(s); i++ {
s[i] = math.Ceil(s[i] * factor)
}
}
func getNormalizationFactor(v float64) (float64, int) {
count := 0
scale := -3
if v > 1000.0 {
for v > 1000.0 {
v *= 1e-3
count++
}
} else {
for v < 1.0 {
v *= 1e3
count++
}
scale = 3
}
return math.Pow10(count * scale), count * scale
}
func NormalizeValue(v *float64, us string) string {
u := NewUnit(us)
f, e := getNormalizationFactor((*v))
*v = math.Ceil(*v * f)
p := NewPrefixFromFactor(u.getPrefix(), e)
return p.Prefix()
}
func NormalizeSeries(s []float64, avg float64, us string, nu *string) {
u := NewUnit(us)
f, e := getNormalizationFactor(avg)
for i := 0; i < len(s); i++ {
s[i] *= f
s[i] = math.Ceil(s[i])
}
u.setPrefix(NewPrefixFromFactor(u.getPrefix(), e))
fmt.Printf("Prefix: %e \n", u.getPrefix())
*nu = u.Short()
}
func ConvertUnitString(us string) schema.Unit {
var nu schema.Unit
if us == "CPI" ||
us == "IPC" ||
us == "load" ||
us == "" {
nu.Base = us
return nu
}
u := NewUnit(us)
p := u.getPrefix()
if p.Prefix() != "" {
prefix := p.Prefix()
nu.Prefix = prefix
}
m := u.getMeasure()
d := u.getUnitDenominator()
if d.Short() != "inval" {
nu.Base = fmt.Sprintf("%s/%s", m.Short(), d.Short())
} else {
nu.Base = m.Short()
}
return nu
}
// GetPrefixPrefixFactor creates the default conversion function between two prefixes.
// It returns a conversation function for the value.
func GetPrefixPrefixFactor(in Prefix, out Prefix) func(value interface{}) interface{} {
var factor = 1.0
var in_prefix = float64(in)
var out_prefix = float64(out)
factor = in_prefix / out_prefix
conv := func(value interface{}) interface{} {
switch v := value.(type) {
case float64:
return v * factor
case float32:
return float32(float64(v) * factor)
case int:
return int(float64(v) * factor)
case int32:
return int32(float64(v) * factor)
case int64:
return int64(float64(v) * factor)
case uint:
return uint(float64(v) * factor)
case uint32:
return uint32(float64(v) * factor)
case uint64:
return uint64(float64(v) * factor)
}
return value
}
return conv
}
// This is the conversion function between temperatures in Celsius to Fahrenheit
func convertTempC2TempF(value interface{}) interface{} {
switch v := value.(type) {
case float64:
return (v * 1.8) + 32
case float32:
return (v * 1.8) + 32
case int:
return int((float64(v) * 1.8) + 32)
case int32:
return int32((float64(v) * 1.8) + 32)
case int64:
return int64((float64(v) * 1.8) + 32)
case uint:
return uint((float64(v) * 1.8) + 32)
case uint32:
return uint32((float64(v) * 1.8) + 32)
case uint64:
return uint64((float64(v) * 1.8) + 32)
}
return value
}
// This is the conversion function between temperatures in Fahrenheit to Celsius
func convertTempF2TempC(value interface{}) interface{} {
switch v := value.(type) {
case float64:
return (v - 32) / 1.8
case float32:
return (v - 32) / 1.8
case int:
return int(((float64(v) - 32) / 1.8))
case int32:
return int32(((float64(v) - 32) / 1.8))
case int64:
return int64(((float64(v) - 32) / 1.8))
case uint:
return uint(((float64(v) - 32) / 1.8))
case uint32:
return uint32(((float64(v) - 32) / 1.8))
case uint64:
return uint64(((float64(v) - 32) / 1.8))
}
return value
}
// GetPrefixStringPrefixStringFactor is a wrapper for GetPrefixPrefixFactor with string inputs instead
// of prefixes. It also returns a conversation function for the value.
func GetPrefixStringPrefixStringFactor(in string, out string) func(value interface{}) interface{} {
var i Prefix = NewPrefix(in)
var o Prefix = NewPrefix(out)
return GetPrefixPrefixFactor(i, o)
}
// GetUnitPrefixFactor gets the conversion function and resulting unit for a unit and a prefix. This is
// the most common case where you have some input unit and want to convert it to the same unit but with
// a different prefix. The returned unit represents the value after conversation.
func GetUnitPrefixFactor(in Unit, out Prefix) (func(value interface{}) interface{}, Unit) {
outUnit := NewUnit(in.Short())
if outUnit.Valid() {
outUnit.setPrefix(out)
conv := GetPrefixPrefixFactor(in.getPrefix(), out)
return conv, outUnit
}
return nil, INVALID_UNIT
}
// GetUnitPrefixStringFactor gets the conversion function and resulting unit for a unit and a prefix as string.
// It is a wrapper for GetUnitPrefixFactor
func GetUnitPrefixStringFactor(in Unit, out string) (func(value interface{}) interface{}, Unit) {
var o Prefix = NewPrefix(out)
return GetUnitPrefixFactor(in, o)
}
// GetUnitStringPrefixStringFactor gets the conversion function and resulting unit for a unit and a prefix when both are only string representations.
// This is just a wrapper for GetUnitPrefixFactor with the given input unit and the desired output prefix.
func GetUnitStringPrefixStringFactor(in string, out string) (func(value interface{}) interface{}, Unit) {
var i = NewUnit(in)
return GetUnitPrefixStringFactor(i, out)
}
// GetUnitUnitFactor gets the conversion function and (maybe) error for unit to unit conversion.
// It is basically a wrapper for GetPrefixPrefixFactor with some special cases for temperature
// conversion between Fahrenheit and Celsius.
func GetUnitUnitFactor(in Unit, out Unit) (func(value interface{}) interface{}, error) {
if in.getMeasure() == TemperatureC && out.getMeasure() == TemperatureF {
return convertTempC2TempF, nil
} else if in.getMeasure() == TemperatureF && out.getMeasure() == TemperatureC {
return convertTempF2TempC, nil
} else if in.getMeasure() != out.getMeasure() || in.getUnitDenominator() != out.getUnitDenominator() {
return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("UNITS/UNITS > invalid measures in in and out Unit")
}
return GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()), nil
}
// NewUnit creates a new unit out of a string representing a unit like 'Mbyte/s' or 'GHz'.
// It uses regular expressions to detect the prefix, unit and (maybe) unit denominator.
func NewUnit(unitStr string) Unit {
u := &unit{
prefix: InvalidPrefix,
measure: InvalidMeasure,
divMeasure: InvalidMeasure,
}
matches := prefixUnitSplitRegex.FindStringSubmatch(unitStr)
if len(matches) > 2 {
pre := NewPrefix(matches[1])
measures := strings.Split(matches[2], "/")
m := NewMeasure(measures[0])
// Special case for prefix 'p' or 'P' (Peta) and measures starting with 'p' or 'P'
// like 'packets' or 'percent'. Same for 'e' or 'E' (Exa) for measures starting with
// 'e' or 'E' like 'events'
if m == InvalidMeasure {
switch pre {
case Peta, Exa:
t := NewMeasure(matches[1] + measures[0])
if t != InvalidMeasure {
m = t
pre = Base
}
}
}
div := InvalidMeasure
if len(measures) > 1 {
div = NewMeasure(measures[1])
}
switch m {
// Special case for 'm' as prefix for Bytes and some others as thers is no unit like MilliBytes
case Bytes, Flops, Packets, Events, Cycles, Requests:
if pre == Milli {
pre = Mega
}
// Special case for percentage. No/ignore prefix
case Percentage:
pre = Base
}
if pre != InvalidPrefix && m != InvalidMeasure {
u.prefix = pre
u.measure = m
if div != InvalidMeasure {
u.divMeasure = div
}
}
}
return u
}

View File

@ -1,307 +0,0 @@
package units
import (
"fmt"
"reflect"
"regexp"
"testing"
)
func TestUnitsExact(t *testing.T) {
testCases := []struct {
in string
want Unit
}{
{"b", NewUnit("Bytes")},
{"B", NewUnit("Bytes")},
{"byte", NewUnit("Bytes")},
{"bytes", NewUnit("Bytes")},
{"BYtes", NewUnit("Bytes")},
{"Mb", NewUnit("MBytes")},
{"MB", NewUnit("MBytes")},
{"Mbyte", NewUnit("MBytes")},
{"Mbytes", NewUnit("MBytes")},
{"MbYtes", NewUnit("MBytes")},
{"Gb", NewUnit("GBytes")},
{"GB", NewUnit("GBytes")},
{"Hz", NewUnit("Hertz")},
{"MHz", NewUnit("MHertz")},
{"GHz", NewUnit("GHertz")},
{"pkts", NewUnit("Packets")},
{"packets", NewUnit("Packets")},
{"packet", NewUnit("Packets")},
{"flop", NewUnit("Flops")},
{"flops", NewUnit("Flops")},
{"floPS", NewUnit("Flops")},
{"Mflop", NewUnit("MFlops")},
{"Gflop", NewUnit("GFlops")},
{"gflop", NewUnit("GFlops")},
{"%", NewUnit("Percent")},
{"percent", NewUnit("Percent")},
{"degc", NewUnit("degC")},
{"degC", NewUnit("degC")},
{"degf", NewUnit("degF")},
{"°f", NewUnit("degF")},
{"events", NewUnit("events")},
{"event", NewUnit("events")},
{"EveNts", NewUnit("events")},
{"reqs", NewUnit("requests")},
{"reQuEsTs", NewUnit("requests")},
{"Requests", NewUnit("requests")},
{"cyc", NewUnit("cycles")},
{"cy", NewUnit("cycles")},
{"Cycles", NewUnit("cycles")},
{"J", NewUnit("Joules")},
{"Joule", NewUnit("Joules")},
{"joule", NewUnit("Joules")},
{"W", NewUnit("Watt")},
{"Watts", NewUnit("Watt")},
{"watt", NewUnit("Watt")},
{"s", NewUnit("seconds")},
{"sec", NewUnit("seconds")},
{"secs", NewUnit("seconds")},
{"RPM", NewUnit("rpm")},
{"rPm", NewUnit("rpm")},
{"watt/byte", NewUnit("W/B")},
{"watts/bytes", NewUnit("W/B")},
{"flop/byte", NewUnit("flops/Bytes")},
{"F/B", NewUnit("flops/Bytes")},
}
compareUnitExact := func(in, out Unit) bool {
if in.getMeasure() == out.getMeasure() && in.getUnitDenominator() == out.getUnitDenominator() && in.getPrefix() == out.getPrefix() {
return true
}
return false
}
for _, c := range testCases {
u := NewUnit(c.in)
if (!u.Valid()) || (!compareUnitExact(u, c.want)) {
t.Errorf("func NewUnit(%q) == %q, want %q", c.in, u.String(), c.want.String())
} else {
t.Logf("NewUnit(%q) == %q", c.in, u.String())
}
}
}
func TestUnitUnitConversion(t *testing.T) {
testCases := []struct {
in string
want Unit
prefixFactor float64
}{
{"kb", NewUnit("Bytes"), 1000},
{"Mb", NewUnit("Bytes"), 1000000},
{"Mb/s", NewUnit("Bytes/s"), 1000000},
{"Flops/s", NewUnit("MFlops/s"), 1e-6},
{"Flops/s", NewUnit("GFlops/s"), 1e-9},
{"MHz", NewUnit("Hertz"), 1e6},
{"kb", NewUnit("Kib"), 1000.0 / 1024},
{"Mib", NewUnit("MBytes"), (1024 * 1024.0) / (1e6)},
{"mb", NewUnit("MBytes"), 1.0},
}
compareUnitWithPrefix := func(in, out Unit, factor float64) bool {
if in.getMeasure() == out.getMeasure() && in.getUnitDenominator() == out.getUnitDenominator() {
if f := GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()); f(1.0) == factor {
return true
} else {
fmt.Println(f(1.0))
}
}
return false
}
for _, c := range testCases {
u := NewUnit(c.in)
if (!u.Valid()) || (!compareUnitWithPrefix(u, c.want, c.prefixFactor)) {
t.Errorf("GetPrefixPrefixFactor(%q, %q) invalid, want %q with factor %g", c.in, u.String(), c.want.String(), c.prefixFactor)
} else {
t.Logf("GetPrefixPrefixFactor(%q, %q) = %g", c.in, c.want.String(), c.prefixFactor)
}
}
}
func TestUnitPrefixConversion(t *testing.T) {
testCases := []struct {
in string
want string
prefixFactor float64
wantUnit Unit
}{
{"KBytes", "", 1000, NewUnit("Bytes")},
{"MBytes", "", 1e6, NewUnit("Bytes")},
{"MBytes", "G", 1e-3, NewUnit("GBytes")},
{"mb", "M", 1, NewUnit("MBytes")},
}
compareUnitPrefix := func(in Unit, out Prefix, factor float64, outUnit Unit) bool {
if in.Valid() {
conv, unit := GetUnitPrefixFactor(in, out)
value := conv(1.0)
if value == factor && unit.String() == outUnit.String() {
return true
}
}
return false
}
for _, c := range testCases {
u := NewUnit(c.in)
p := NewPrefix(c.want)
if (!u.Valid()) || (!compareUnitPrefix(u, p, c.prefixFactor, c.wantUnit)) {
t.Errorf("GetUnitPrefixFactor(%q, %q) invalid, want %q with factor %g", c.in, p.Prefix(), c.wantUnit.String(), c.prefixFactor)
} else {
t.Logf("GetUnitPrefixFactor(%q, %q) = %g", c.in, c.wantUnit.String(), c.prefixFactor)
}
}
}
func TestPrefixPrefixConversion(t *testing.T) {
testCases := []struct {
in string
want string
prefixFactor float64
}{
{"K", "", 1000},
{"M", "", 1e6},
{"M", "G", 1e-3},
{"", "M", 1e-6},
{"", "m", 1e3},
{"m", "n", 1e6},
//{"", "n", 1e9}, //does not work because of IEEE rounding problems
}
for _, c := range testCases {
i := NewPrefix(c.in)
o := NewPrefix(c.want)
if i != InvalidPrefix && o != InvalidPrefix {
conv := GetPrefixPrefixFactor(i, o)
value := conv(1.0)
if value != c.prefixFactor {
t.Errorf("GetPrefixPrefixFactor(%q, %q) invalid, want %q with factor %g but got %g", c.in, c.want, o.Prefix(), c.prefixFactor, value)
} else {
t.Logf("GetPrefixPrefixFactor(%q, %q) = %g", c.in, c.want, c.prefixFactor)
}
}
}
}
func TestMeasureRegex(t *testing.T) {
for _, data := range MeasuresMap {
_, err := regexp.Compile(data.Regex)
if err != nil {
t.Errorf("failed to compile regex '%s': %s", data.Regex, err.Error())
}
t.Logf("succussfully compiled regex '%s' for measure %s", data.Regex, data.Long)
}
}
func TestPrefixRegex(t *testing.T) {
for _, data := range PrefixDataMap {
_, err := regexp.Compile(data.Regex)
if err != nil {
t.Errorf("failed to compile regex '%s': %s", data.Regex, err.Error())
}
t.Logf("succussfully compiled regex '%s' for prefix %s", data.Regex, data.Long)
}
}
func TestConvertValue(t *testing.T) {
v := float64(103456)
ConvertValue(&v, "MB/s", "GB/s")
if v != 104.00 {
t.Errorf("Failed ConvertValue: Want 103.456, Got %f", v)
}
}
func TestConvertValueUp(t *testing.T) {
v := float64(10.3456)
ConvertValue(&v, "GB/s", "MB/s")
if v != 10346.00 {
t.Errorf("Failed ConvertValue: Want 10346.00, Got %f", v)
}
}
func TestConvertSeries(t *testing.T) {
s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
r := []float64{3, 24, 390, 391}
ConvertSeries(s, "F/s", "GF/s")
if !reflect.DeepEqual(s, r) {
t.Errorf("Failed ConvertValue: Want 3, 24, 390, 391, Got %v", s)
}
}
// func TestNormalizeValue(t *testing.T) {
// var s string
// v := float64(103456)
//
// NormalizeValue(&v, "MB/s", &s)
//
// if v != 104.00 {
// t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
// }
// if s != "GB/s" {
// t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s)
// }
// }
//
// func TestNormalizeValueNoPrefix(t *testing.T) {
// var s string
// v := float64(103458596)
//
// NormalizeValue(&v, "F/s", &s)
//
// if v != 104.00 {
// t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
// }
// if s != "MF/s" {
// t.Errorf("Failed Prefix or unit: Want MF/s, Got %s", s)
// }
// }
//
// func TestNormalizeValueKeep(t *testing.T) {
// var s string
// v := float64(345)
//
// NormalizeValue(&v, "MB/s", &s)
//
// if v != 345.00 {
// t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
// }
// if s != "MB/s" {
// t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s)
// }
// }
//
// func TestNormalizeValueDown(t *testing.T) {
// var s string
// v := float64(0.0004578)
//
// NormalizeValue(&v, "GB/s", &s)
//
// if v != 458.00 {
// t.Errorf("Failed ConvertValue: Want 458.00, Got %f", v)
// }
// if s != "KB/s" {
// t.Errorf("Failed Prefix or unit: Want KB/s, Got %s", s)
// }
// }
//
// func TestNormalizeSeries(t *testing.T) {
// var us string
// s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
// r := []float64{3, 24, 390, 391}
//
// total := 0.0
// for _, number := range s {
// total += number
// }
// avg := total / float64(len(s))
//
// fmt.Printf("AVG: %e\n", avg)
// NormalizeSeries(s, avg, "KB/s", &us)
//
// if !reflect.DeepEqual(s, r) {
// t.Errorf("Failed ConvertValue: Want 3, 24, 390, 391, Got %v", s)
// }
// if us != "TB/s" {
// t.Errorf("Failed Prefix or unit: Want TB/s, Got %s", us)
// }
// }

View File

@ -17,7 +17,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/units"
ccunits "github.com/ClusterCockpit/cc-units"
)
const Version = 1
@ -35,6 +35,33 @@ func loadJobData(filename string) (*JobData, error) {
return DecodeJobData(bufio.NewReader(f))
}
func ConvertUnitString(us string) schema.Unit {
var nu schema.Unit
if us == "CPI" ||
us == "IPC" ||
us == "load" ||
us == "" {
nu.Base = us
return nu
}
u := ccunits.NewUnit(us)
p := u.GetPrefix()
if p.Prefix() != "" {
prefix := p.Prefix()
nu.Prefix = prefix
}
m := u.GetMeasure()
d := u.GetUnitDenominator()
if d.Short() != "inval" {
nu.Base = fmt.Sprintf("%s/%s", m.Short(), d.Short())
} else {
nu.Base = m.Short()
}
return nu
}
func deepCopyJobMeta(j *JobMeta) schema.JobMeta {
var jn schema.JobMeta
@ -78,7 +105,7 @@ func deepCopyJobMeta(j *JobMeta) schema.JobMeta {
sn.Avg = v.Avg
sn.Max = v.Max
sn.Min = v.Min
tmpUnit := units.ConvertUnitString(v.Unit)
tmpUnit := ConvertUnitString(v.Unit)
if tmpUnit.Base == "inval" {
sn.Unit = schema.Unit{Base: ""}
} else {
@ -113,7 +140,7 @@ func deepCopyJobData(d *JobData, cluster string, subCluster string) *schema.JobD
for mk, mv := range v {
// fmt.Printf("Scope %s\n", mk)
var mn schema.JobMetric
tmpUnit := units.ConvertUnitString(mv.Unit)
tmpUnit := ConvertUnitString(mv.Unit)
if tmpUnit.Base == "inval" {
mn.Unit = schema.Unit{Base: ""}
} else {
@ -192,13 +219,13 @@ func deepCopyClusterConfig(co *Cluster) schema.Cluster {
mcn.Name = mco.Name
mcn.Scope = mco.Scope
if mco.Aggregation == "" {
fmt.Println("Property aggregation missing! Please review file!")
fmt.Println("cluster.json - Property aggregation missing! Please review file!")
mcn.Aggregation = "sum"
} else {
mcn.Aggregation = mco.Aggregation
}
mcn.Timestep = mco.Timestep
tmpUnit := units.ConvertUnitString(mco.Unit)
tmpUnit := ConvertUnitString(mco.Unit)
if tmpUnit.Base == "inval" {
mcn.Unit = schema.Unit{Base: ""}
} else {
@ -225,8 +252,8 @@ func main() {
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`")
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
flag.StringVar(&srcPath, "s", "./var/job-archive", "Specify the source job archive path. Default is ./var/job-archive")
flag.StringVar(&dstPath, "d", "./var/job-archive-new", "Specify the destination job archive path. Default is ./var/job-archive-new")
flag.StringVar(&srcPath, "s", "./var/job-archive", "Specify the source job archive path")
flag.StringVar(&dstPath, "d", "./var/job-archive-new", "Specify the destination job archive path")
flag.Parse()
if _, err := os.Stat(filepath.Join(srcPath, "version.txt")); !errors.Is(err, os.ErrNotExist) {