Merge branch 'master' into 97_107_mark_and_show_shared

This commit is contained in:
Christoph Kluge 2023-06-01 15:24:26 +02:00
commit 37d5db443f
85 changed files with 4317 additions and 3839 deletions

View File

@ -5,11 +5,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Install Go - name: Install Go
uses: actions/setup-go@v2 uses: actions/setup-go@v4
with: with:
go-version: 1.17.x go-version: 1.19.x
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Build, Vet & Test - name: Build, Vet & Test
run: | run: |
go build ./... go build ./...

View File

@ -1,12 +1,16 @@
TARGET = ./cc-backend TARGET = ./cc-backend
VAR = ./var VAR = ./var
DB = ./var/job.db CFG = config.json .env
FRONTEND = ./web/frontend FRONTEND = ./web/frontend
VERSION = 1 VERSION = 1
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development') GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S") CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S")
LD_FLAGS = '-s -X main.buildTime=${CURRENT_TIME} -X main.version=${VERSION} -X main.hash=${GIT_HASH}' LD_FLAGS = '-s -X main.buildTime=${CURRENT_TIME} -X main.version=${VERSION} -X main.hash=${GIT_HASH}'
EXECUTABLES = go npm
K := $(foreach exec,$(EXECUTABLES),\
$(if $(shell which $(exec)),some string,$(error "No $(exec) in PATH")))
SVELTE_COMPONENTS = status \ SVELTE_COMPONENTS = status \
analysis \ analysis \
node \ node \
@ -28,17 +32,24 @@ SVELTE_SRC = $(wildcard $(FRONTEND)/src/*.svelte) \
.NOTPARALLEL: .NOTPARALLEL:
$(TARGET): $(VAR) $(DB) $(SVELTE_TARGETS) $(TARGET): $(VAR) $(CFG) $(SVELTE_TARGETS)
$(info ===> BUILD cc-backend) $(info ===> BUILD cc-backend)
@go build -ldflags=${LD_FLAGS} ./cmd/cc-backend @go build -ldflags=${LD_FLAGS} ./cmd/cc-backend
clean: clean:
$(info ===> CLEAN) $(info ===> CLEAN)
@go clean @go clean
@rm $(TARGET) @rm -f $(TARGET)
distclean:
@$(MAKE) clean
$(info ===> DISTCLEAN)
@rm -rf $(FRONTEND)/node_modules
@rm -rf $(VAR)
test: test:
$(info ===> TESTING) $(info ===> TESTING)
@go clean -testcache
@go build ./... @go build ./...
@go vet ./... @go vet ./...
@go test ./... @go test ./...
@ -49,15 +60,18 @@ tags:
$(VAR): $(VAR):
@mkdir $(VAR) @mkdir $(VAR)
@touch ./var/job.db
cd web/frontend && yarn install
$(DB): config.json:
./cc-backend --migrate-db $(info ===> Initialize config.json file)
@cp configs/config.json config.json
.env:
$(info ===> Initialize .env file)
@cp configs/env-template.txt .env
$(SVELTE_TARGETS): $(SVELTE_SRC) $(SVELTE_TARGETS): $(SVELTE_SRC)
$(info ===> BUILD frontend) $(info ===> BUILD frontend)
cd web/frontend && yarn build cd web/frontend && npm install && npm run build
install: $(TARGET) install: $(TARGET)
@WORKSPACE=$(PREFIX) @WORKSPACE=$(PREFIX)

View File

@ -42,7 +42,7 @@ versions of third party packages.
## Demo Setup ## Demo Setup
We provide a shell skript that downloads demo data and automatically builds and starts cc-backend. We provide a shell skript that downloads demo data and automatically builds and starts cc-backend.
You need `wget`, `go`, and `yarn` in your path to start the demo. The demo will download 32MB of data (223MB on disk). You need `wget`, `go`, `node`, `rollup` and `yarn` in your path to start the demo. The demo will download 32MB of data (223MB on disk).
```sh ```sh
git clone https://github.com/ClusterCockpit/cc-backend.git git clone https://github.com/ClusterCockpit/cc-backend.git

View File

@ -7,6 +7,7 @@ package main
import ( import (
"context" "context"
"crypto/tls" "crypto/tls"
"encoding/json"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
@ -29,13 +30,16 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph"
"github.com/ClusterCockpit/cc-backend/internal/graph/generated" "github.com/ClusterCockpit/cc-backend/internal/graph/generated"
"github.com/ClusterCockpit/cc-backend/internal/importer"
"github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/internal/routerConfig" "github.com/ClusterCockpit/cc-backend/internal/routerConfig"
"github.com/ClusterCockpit/cc-backend/internal/runtimeEnv" "github.com/ClusterCockpit/cc-backend/internal/runtimeEnv"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/web" "github.com/ClusterCockpit/cc-backend/web"
"github.com/go-co-op/gocron"
"github.com/google/gops/agent" "github.com/google/gops/agent"
"github.com/gorilla/handlers" "github.com/gorilla/handlers"
"github.com/gorilla/mux" "github.com/gorilla/mux"
@ -116,7 +120,10 @@ func main() {
} }
if flagMigrateDB { if flagMigrateDB {
repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB) err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil {
log.Fatal(err)
}
os.Exit(0) os.Exit(0)
} }
@ -196,13 +203,13 @@ func main() {
} }
if flagReinitDB { if flagReinitDB {
if err := repository.InitDB(); err != nil { if err := importer.InitDB(); err != nil {
log.Fatalf("failed to re-initialize repository DB: %s", err.Error()) log.Fatalf("failed to re-initialize repository DB: %s", err.Error())
} }
} }
if flagImportJob != "" { if flagImportJob != "" {
if err := repository.HandleImportFlag(flagImportJob); err != nil { if err := importer.HandleImportFlag(flagImportJob); err != nil {
log.Fatalf("job import failed: %s", err.Error()) log.Fatalf("job import failed: %s", err.Error())
} }
} }
@ -412,18 +419,95 @@ func main() {
api.JobRepository.WaitForArchiving() api.JobRepository.WaitForArchiving()
}() }()
s := gocron.NewScheduler(time.Local)
if config.Keys.StopJobsExceedingWalltime > 0 { if config.Keys.StopJobsExceedingWalltime > 0 {
go func() { log.Info("Register undead jobs service")
for range time.Tick(30 * time.Minute) {
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime) s.Every(1).Day().At("3:00").Do(func() {
if err != nil { err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error()) if err != nil {
} log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
runtime.GC()
} }
}() runtime.GC()
})
} }
var cfg struct {
Compression int `json:"compression"`
Retention schema.Retention `json:"retention"`
}
cfg.Retention.IncludeDB = true
if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil {
log.Warn("Error while unmarshaling raw config json")
}
switch cfg.Retention.Policy {
case "delete":
log.Info("Register retention delete service")
s.Every(1).Day().At("4:00").Do(func() {
startTime := time.Now().Unix() - int64(cfg.Retention.Age*24*3600)
jobs, err := jobRepo.FindJobsBefore(startTime)
if err != nil {
log.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().CleanUp(jobs)
if cfg.Retention.IncludeDB {
cnt, err := jobRepo.DeleteJobsBefore(startTime)
if err != nil {
log.Errorf("Error while deleting retention jobs from db: %s", err.Error())
} else {
log.Infof("Retention: Removed %d jobs from db", cnt)
}
if err = jobRepo.Optimize(); err != nil {
log.Errorf("Error occured in db optimization: %s", err.Error())
}
}
})
case "move":
log.Info("Register retention move service")
s.Every(1).Day().At("4:00").Do(func() {
startTime := time.Now().Unix() - int64(cfg.Retention.Age*24*3600)
jobs, err := jobRepo.FindJobsBefore(startTime)
if err != nil {
log.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().Move(jobs, cfg.Retention.Location)
if cfg.Retention.IncludeDB {
cnt, err := jobRepo.DeleteJobsBefore(startTime)
if err != nil {
log.Errorf("Error while deleting retention jobs from db: %s", err.Error())
} else {
log.Infof("Retention: Removed %d jobs from db", cnt)
}
if err = jobRepo.Optimize(); err != nil {
log.Errorf("Error occured in db optimization: %s", err.Error())
}
}
})
}
if cfg.Compression > 0 {
log.Info("Register compression service")
s.Every(1).Day().At("5:00").Do(func() {
startTime := time.Now().Unix() - int64(cfg.Compression*24*3600)
jobs, err := jobRepo.FindJobsBefore(startTime)
if err != nil {
log.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().Compress(jobs)
})
}
s.StartAsync()
if os.Getenv("GOGC") == "" { if os.Getenv("GOGC") == "" {
debug.SetGCPercent(25) debug.SetGCPercent(25)
} }

33
docs/dev-frontend.md Normal file
View File

@ -0,0 +1,33 @@
## Tips for frontend development
The frontend assets including the Svelte js files are per default embedded in
the bgo binary. To enable a quick turnaround cycle for web development of the
frontend disable embedding of static assets in `config.json`:
```
"embed-static-files": false,
"static-files": "./web/frontend/public/",
```
Start the node build process (in directory `./web/frontend`) in development mode:
```
$ npm run dev
```
This will start the build process in listen mode. Whenever you change a source
files the depending javascript targets will be automatically rebuild.
In case the javascript files are minified you may need to set the production
flag by hand to false in `./web/frontend/rollup.config.mjs`:
```
const production = false
```
Usually this should work automatically.
Because the files are still served by ./cc-backend you have to reload the view
explicitly in your browser.
A common setup is to have three terminals open:
* One running cc-backend (working directory repository root): `./cc-backend -server -dev`
* Another running npm in developer mode (working directory `./web/frontend`): `npm run dev`
* And the last one editing the frontend source files

34
docs/dev-testing.md Normal file
View File

@ -0,0 +1,34 @@
## Overview
We use the standard golang testing environment.
The following conventions are used:
* *White box unit tests*: Tests for internal functionality are placed in files
* *Black box unit tests*: Tests for public interfaces are placed in files
with `<package name>_test.go` and belong to the package `<package_name>_test`.
There only exists one package test file per package.
* *Integration tests*: Tests that use multiple componenents are placed in a
package test file. These are named `<package name>_test.go` and belong to the
package `<package_name>_test`.
* *Test assets*: Any required files are placed in a directory `./testdata`
within each package directory.
## Executing tests
Visual Studio Code has a very good golang test integration.
For debugging a test this is the recommended solution.
The Makefile provided by us has a `test` target that executes:
```
$ go clean -testcache
$ go build ./...
$ go vet ./...
$ go test ./...
```
Of course the commands can also be used on the command line.
For details about golang testing refer to the standard documentation:
* [Testing package](https://pkg.go.dev/testing)
* [go test command](https://pkg.go.dev/cmd/go#hdr-Test_packages)

7
go.mod
View File

@ -4,6 +4,7 @@ go 1.18
require ( require (
github.com/99designs/gqlgen v0.17.24 github.com/99designs/gqlgen v0.17.24
github.com/ClusterCockpit/cc-units v0.4.0
github.com/Masterminds/squirrel v1.5.3 github.com/Masterminds/squirrel v1.5.3
github.com/go-ldap/ldap/v3 v3.4.4 github.com/go-ldap/ldap/v3 v3.4.4
github.com/go-sql-driver/mysql v1.7.0 github.com/go-sql-driver/mysql v1.7.0
@ -39,6 +40,7 @@ require (
github.com/felixge/httpsnoop v1.0.3 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/ghodss/yaml v1.0.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
github.com/go-co-op/gocron v1.25.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/spec v0.20.8 // indirect github.com/go-openapi/spec v0.20.8 // indirect
@ -54,6 +56,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
@ -66,11 +69,15 @@ require (
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/testify v1.8.2 // indirect
github.com/swaggo/files v1.0.0 // indirect github.com/swaggo/files v1.0.0 // indirect
github.com/urfave/cli/v2 v2.24.4 // indirect github.com/urfave/cli/v2 v2.24.4 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect
golang.org/x/mod v0.8.0 // indirect golang.org/x/mod v0.8.0 // indirect
golang.org/x/net v0.7.0 // indirect golang.org/x/net v0.7.0 // indirect
golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect

18
go.sum
View File

@ -81,6 +81,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0=
github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc= github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc=
@ -444,6 +446,8 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-co-op/gocron v1.25.0 h1:pzAdtily1JVIf6lGby6K0JKzhishgLOllQgNxoYbR+8=
github.com/go-co-op/gocron v1.25.0/go.mod h1:JHrQDY4iE1HZPkgTyoccY4xtDgLbrUwL+xODIbEQdnc=
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
@ -822,8 +826,9 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
@ -1005,6 +1010,7 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -1060,11 +1066,16 @@ github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAi
github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU= github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
@ -1142,8 +1153,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/swaggo/files v1.0.0 h1:1gGXVIeUFCS/dta17rnP0iOpr6CXFwKD7EO5ID233e4= github.com/swaggo/files v1.0.0 h1:1gGXVIeUFCS/dta17rnP0iOpr6CXFwKD7EO5ID233e4=
github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc= github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc=
@ -1302,6 +1314,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4=
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=

View File

@ -1,4 +1,8 @@
package test // Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api_test
import ( import (
"bytes" "bytes"
@ -44,16 +48,7 @@ func setup(t *testing.T) *api.RestApi {
"duration": { "from": 0, "to": 86400 }, "duration": { "from": 0, "to": 86400 },
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null } "startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
} }
}, }
{
"name": "taurus",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 4000 },
"duration": { "from": 0, "to": 604800 },
"startTime": { "from": "2010-01-01T00:00:00Z", "to": null }
}
}
] ]
}` }`
const testclusterJson = `{ const testclusterJson = `{
@ -111,215 +106,6 @@ func setup(t *testing.T) *api.RestApi {
} }
] ]
}` }`
const taurusclusterJson = `{
"name": "taurus",
"subClusters": [
{
"name": "haswell",
"processorType": "Intel Haswell",
"socketsPerNode": 2,
"coresPerSocket": 12,
"threadsPerCore": 1,
"flopRateScalar": {
"unit": {
"prefix": "G",
"base": "F/s"
},
"value": 14
},
"flopRateSimd": {
"unit": {
"prefix": "G",
"base": "F/s"
},
"value": 112
},
"memoryBandwidth": {
"unit": {
"prefix": "G",
"base": "B/s"
},
"value": 24
},
"numberOfNodes": 70,
"nodes": "w11[27-45,49-63,69-72]",
"topology": {
"node": [ 0, 1 ],
"socket": [
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ],
[ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]
],
"memoryDomain": [
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ],
[ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]
],
"core": [ [ 0 ], [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ], [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ], [ 11 ], [ 12 ], [ 13 ], [ 14 ], [ 15 ], [ 16 ], [ 17 ], [ 18 ], [ 19 ], [ 20 ], [ 21 ], [ 22 ], [ 23 ] ]
}
}
],
"metricConfig": [
{
"name": "cpu_used",
"scope": "core",
"unit": {"base": ""},
"aggregation": "avg",
"timestep": 30,
"peak": 1,
"normal": 0.5,
"caution": 2e-07,
"alert": 1e-07,
"subClusters": [
{
"name": "haswell",
"peak": 1,
"normal": 0.5,
"caution": 2e-07,
"alert": 1e-07
}
]
},
{
"name": "ipc",
"scope": "core",
"unit": { "base": "IPC"},
"aggregation": "avg",
"timestep": 60,
"peak": 2,
"normal": 1,
"caution": 0.1,
"alert": 0.5,
"subClusters": [
{
"name": "haswell",
"peak": 2,
"normal": 1,
"caution": 0.1,
"alert": 0.5
}
]
},
{
"name": "flops_any",
"scope": "core",
"unit": { "base": "F/s"},
"aggregation": "sum",
"timestep": 60,
"peak": 40000000000,
"normal": 20000000000,
"caution": 30000000000,
"alert": 35000000000,
"subClusters": [
{
"name": "haswell",
"peak": 40000000000,
"normal": 20000000000,
"caution": 30000000000,
"alert": 35000000000
}
]
},
{
"name": "mem_bw",
"scope": "socket",
"unit": { "base": "B/s"},
"aggregation": "sum",
"timestep": 60,
"peak": 58800000000,
"normal": 28800000000,
"caution": 38800000000,
"alert": 48800000000,
"subClusters": [
{
"name": "haswell",
"peak": 58800000000,
"normal": 28800000000,
"caution": 38800000000,
"alert": 48800000000
}
]
},
{
"name": "file_bw",
"scope": "node",
"unit": { "base": "B/s"},
"aggregation": "sum",
"timestep": 30,
"peak": 20000000000,
"normal": 5000000000,
"caution": 9000000000,
"alert": 19000000000,
"subClusters": [
{
"name": "haswell",
"peak": 20000000000,
"normal": 5000000000,
"caution": 9000000000,
"alert": 19000000000
}
]
},
{
"name": "net_bw",
"scope": "node",
"unit": { "base": "B/s"},
"timestep": 30,
"aggregation": "sum",
"peak": 7000000000,
"normal": 5000000000,
"caution": 6000000000,
"alert": 6500000000,
"subClusters": [
{
"name": "haswell",
"peak": 7000000000,
"normal": 5000000000,
"caution": 6000000000,
"alert": 6500000000
}
]
},
{
"name": "mem_used",
"scope": "node",
"unit": {"base": "B"},
"aggregation": "sum",
"timestep": 30,
"peak": 32000000000,
"normal": 2000000000,
"caution": 31000000000,
"alert": 30000000000,
"subClusters": [
{
"name": "haswell",
"peak": 32000000000,
"normal": 2000000000,
"caution": 31000000000,
"alert": 30000000000
}
]
},
{
"name": "cpu_power",
"scope": "socket",
"unit": {"base": "W"},
"aggregation": "sum",
"timestep": 60,
"peak": 100,
"normal": 80,
"caution": 90,
"alert": 90,
"subClusters": [
{
"name": "haswell",
"peak": 100,
"normal": 80,
"caution": 90,
"alert": 90
}
]
}
]
}`
log.Init("info", true) log.Init("info", true)
tmpdir := t.TempDir() tmpdir := t.TempDir()
@ -340,15 +126,11 @@ func setup(t *testing.T) *api.RestApi {
t.Fatal(err) t.Fatal(err)
} }
if err := os.Mkdir(filepath.Join(jobarchive, "taurus"), 0777); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(jobarchive, "taurus", "cluster.json"), []byte(taurusclusterJson), 0666); err != nil {
t.Fatal(err)
}
dbfilepath := filepath.Join(tmpdir, "test.db") dbfilepath := filepath.Join(tmpdir, "test.db")
repository.MigrateDB("sqlite3", dbfilepath) err := repository.MigrateDB("sqlite3", dbfilepath)
if err != nil {
t.Fatal(err)
}
cfgFilePath := filepath.Join(tmpdir, "config.json") cfgFilePath := filepath.Join(tmpdir, "config.json")
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil { if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
@ -385,7 +167,7 @@ func cleanup() {
/* /*
* This function starts a job, stops it, and then reads its data from the job-archive. * This function starts a job, stops it, and then reads its data from the job-archive.
* Do not run sub-tests in parallel! Tests should not be run in parallel at all, because * Do not run sub-tests in parallel! Tests should not be run in parallel at all, because
* at least `setup` modifies global state. Log-Output is redirected to /dev/null on purpose. * at least `setup` modifies global state.
*/ */
func TestRestApi(t *testing.T) { func TestRestApi(t *testing.T) {
restapi := setup(t) restapi := setup(t)
@ -470,15 +252,15 @@ func TestRestApi(t *testing.T) {
job.Project != "testproj" || job.Project != "testproj" ||
job.Cluster != "testcluster" || job.Cluster != "testcluster" ||
job.SubCluster != "sc1" || job.SubCluster != "sc1" ||
*job.Partition != "default" || job.Partition != "default" ||
*job.Walltime != 3600 || job.Walltime != 3600 ||
*job.ArrayJobId != 0 || job.ArrayJobId != 0 ||
job.NumNodes != 1 || job.NumNodes != 1 ||
*job.NumHWThreads != 8 || job.NumHWThreads != 8 ||
*job.NumAcc != 0 || job.NumAcc != 0 ||
job.Exclusive != 1 || job.Exclusive != 1 ||
job.MonitoringStatus != 1 || job.MonitoringStatus != 1 ||
*job.SMT != 1 || job.SMT != 1 ||
!reflect.DeepEqual(job.Resources, []*schema.Resource{{Hostname: "host123", HWThreads: []int{0, 1, 2, 3, 4, 5, 6, 7}}}) || !reflect.DeepEqual(job.Resources, []*schema.Resource{{Hostname: "host123", HWThreads: []int{0, 1, 2, 3, 4, 5, 6, 7}}}) ||
job.StartTime.Unix() != 123456789 { job.StartTime.Unix() != 123456789 {
t.Fatalf("unexpected job properties: %#v", job) t.Fatalf("unexpected job properties: %#v", job)
@ -566,17 +348,7 @@ func TestRestApi(t *testing.T) {
} }
}) })
// t.Run("FailedJob", func(t *testing.T) { const startJobBodyFailed string = `{
// subtestLetJobFail(t, restapi, r)
// })
// t.Run("ImportJob", func(t *testing.T) {
// testImportFlag(t)
// })
}
func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) {
const startJobBody string = `{
"jobId": 12345, "jobId": 12345,
"user": "testuser", "user": "testuser",
"project": "testproj", "project": "testproj",
@ -595,8 +367,8 @@ func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) {
"startTime": 12345678 "startTime": 12345678
}` }`
ok := t.Run("StartJob", func(t *testing.T) { ok := t.Run("StartJobFailed", func(t *testing.T) {
req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBody))) req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBodyFailed)))
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
r.ServeHTTP(recorder, req) r.ServeHTTP(recorder, req)
@ -609,7 +381,7 @@ func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) {
t.Fatal("subtest failed") t.Fatal("subtest failed")
} }
const stopJobBody string = `{ const stopJobBodyFailed string = `{
"jobId": 12345, "jobId": 12345,
"cluster": "testcluster", "cluster": "testcluster",
@ -617,8 +389,8 @@ func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) {
"stopTime": 12355678 "stopTime": 12355678
}` }`
ok = t.Run("StopJob", func(t *testing.T) { ok = t.Run("StopJobFailed", func(t *testing.T) {
req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody))) req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBodyFailed)))
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
r.ServeHTTP(recorder, req) r.ServeHTTP(recorder, req)
@ -642,45 +414,3 @@ func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) {
t.Fatal("subtest failed") t.Fatal("subtest failed")
} }
} }
func testImportFlag(t *testing.T) {
if err := repository.HandleImportFlag("meta.json:data.json"); err != nil {
t.Fatal(err)
}
repo := repository.GetJobRepository()
jobId := int64(20639587)
cluster := "taurus"
startTime := int64(1635856524)
job, err := repo.Find(&jobId, &cluster, &startTime)
if err != nil {
t.Fatal(err)
}
if job.NumNodes != 2 {
t.Errorf("NumNode: Received %d, expected 2", job.NumNodes)
}
ar := archive.GetHandle()
data, err := ar.LoadJobData(job)
if err != nil {
t.Fatal(err)
}
if len(data) != 8 {
t.Errorf("Job data length: Got %d, want 8", len(data))
}
r := map[string]string{"mem_used": "GB", "net_bw": "KB/s",
"cpu_power": "W", "cpu_used": "",
"file_bw": "KB/s", "flops_any": "F/s",
"mem_bw": "GB/s", "ipc": "IPC"}
for name, scopes := range data {
for _, metric := range scopes {
if metric.Unit.Base != r[name] {
t.Errorf("Metric %s unit: Got %s, want %s", name, metric.Unit.Base, r[name])
}
}
}
}

View File

@ -22,6 +22,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/auth"
"github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph"
"github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/importer"
"github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
@ -252,7 +253,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
results := make([]*schema.JobMeta, 0, len(jobs)) results := make([]*schema.JobMeta, 0, len(jobs))
for _, job := range jobs { for _, job := range jobs {
if withMetadata { if withMetadata {
if _, err := api.JobRepository.FetchMetadata(job); err != nil { if _, err = api.JobRepository.FetchMetadata(job); err != nil {
handleError(err, http.StatusInternalServerError, rw) handleError(err, http.StatusInternalServerError, rw)
return return
} }
@ -396,7 +397,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
if req.State == "" { if req.State == "" {
req.State = schema.JobStateRunning req.State = schema.JobStateRunning
} }
if err := repository.SanityChecks(&req.BaseJob); err != nil { if err := importer.SanityChecks(&req.BaseJob); err != nil {
handleError(err, http.StatusBadRequest, rw) handleError(err, http.StatusBadRequest, rw)
return return
} }

View File

@ -3420,9 +3420,9 @@ func (ec *executionContext) _Job_walltime(ctx context.Context, field graphql.Col
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*int64) res := resTmp.(int64)
fc.Result = res fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res) return ec.marshalNInt2int64(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Job_walltime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Job_walltime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -3508,9 +3508,9 @@ func (ec *executionContext) _Job_numHWThreads(ctx context.Context, field graphql
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*int32) res := resTmp.(int32)
fc.Result = res fc.Result = res
return ec.marshalNInt2int32(ctx, field.Selections, res) return ec.marshalNInt2int32(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Job_numHWThreads(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Job_numHWThreads(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -3552,9 +3552,9 @@ func (ec *executionContext) _Job_numAcc(ctx context.Context, field graphql.Colle
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*int32) res := resTmp.(int32)
fc.Result = res fc.Result = res
return ec.marshalNInt2int32(ctx, field.Selections, res) return ec.marshalNInt2int32(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Job_numAcc(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Job_numAcc(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -3596,9 +3596,9 @@ func (ec *executionContext) _Job_SMT(ctx context.Context, field graphql.Collecte
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*int32) res := resTmp.(int32)
fc.Result = res fc.Result = res
return ec.marshalNInt2int32(ctx, field.Selections, res) return ec.marshalNInt2int32(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Job_SMT(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Job_SMT(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -3684,9 +3684,9 @@ func (ec *executionContext) _Job_partition(ctx context.Context, field graphql.Co
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*string) res := resTmp.(string)
fc.Result = res fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res) return ec.marshalNString2string(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Job_partition(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Job_partition(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -3728,9 +3728,9 @@ func (ec *executionContext) _Job_arrayJobId(ctx context.Context, field graphql.C
} }
return graphql.Null return graphql.Null
} }
res := resTmp.(*int64) res := resTmp.(int64)
fc.Result = res fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res) return ec.marshalNInt2int64(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Job_arrayJobId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Job_arrayJobId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -9034,9 +9034,9 @@ func (ec *executionContext) _Unit_prefix(ctx context.Context, field graphql.Coll
if resTmp == nil { if resTmp == nil {
return graphql.Null return graphql.Null
} }
res := resTmp.(*string) res := resTmp.(string)
fc.Result = res fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res) return ec.marshalOString2string(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Unit_prefix(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Unit_prefix(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@ -14020,48 +14020,6 @@ func (ec *executionContext) marshalNInt2ᚖint(ctx context.Context, sel ast.Sele
return res return res
} }
func (ec *executionContext) unmarshalNInt2ᚖint32(ctx context.Context, v interface{}) (*int32, error) {
res, err := graphql.UnmarshalInt32(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNInt2ᚖint32(ctx context.Context, sel ast.SelectionSet, v *int32) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
res := graphql.MarshalInt32(*v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
}
return res
}
func (ec *executionContext) unmarshalNInt2ᚖint64(ctx context.Context, v interface{}) (*int64, error) {
res, err := graphql.UnmarshalInt64(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNInt2ᚖint64(ctx context.Context, sel ast.SelectionSet, v *int64) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
res := graphql.MarshalInt64(*v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
}
return res
}
func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler { func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler {
ret := make(graphql.Array, len(v)) ret := make(graphql.Array, len(v))
var wg sync.WaitGroup var wg sync.WaitGroup
@ -14684,27 +14642,6 @@ func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel
return ret return ret
} }
func (ec *executionContext) unmarshalNString2ᚖstring(ctx context.Context, v interface{}) (*string, error) {
res, err := graphql.UnmarshalString(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
res := graphql.MarshalString(*v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
}
return res
}
func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubCluster) graphql.Marshaler { func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubCluster) graphql.Marshaler {
ret := make(graphql.Array, len(v)) ret := make(graphql.Array, len(v))
var wg sync.WaitGroup var wg sync.WaitGroup

View File

@ -0,0 +1,131 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
import (
"bytes"
"encoding/json"
"fmt"
"os"
"strings"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
// Import all jobs specified as `<path-to-meta.json>:<path-to-data.json>,...`
func HandleImportFlag(flag string) error {
r := repository.GetJobRepository()
for _, pair := range strings.Split(flag, ",") {
files := strings.Split(pair, ":")
if len(files) != 2 {
return fmt.Errorf("REPOSITORY/INIT > invalid import flag format")
}
raw, err := os.ReadFile(files[0])
if err != nil {
log.Warn("Error while reading metadata file for import")
return err
}
if config.Keys.Validate {
if err = schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil {
return fmt.Errorf("REPOSITORY/INIT > validate job meta: %v", err)
}
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
if err = dec.Decode(&jobMeta); err != nil {
log.Warn("Error while decoding raw json metadata for import")
return err
}
raw, err = os.ReadFile(files[1])
if err != nil {
log.Warn("Error while reading jobdata file for import")
return err
}
if config.Keys.Validate {
if err = schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil {
return fmt.Errorf("REPOSITORY/INIT > validate job data: %v", err)
}
}
dec = json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
jobData := schema.JobData{}
if err = dec.Decode(&jobData); err != nil {
log.Warn("Error while decoding raw json jobdata for import")
return err
}
// checkJobData(&jobData)
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
// if _, err = r.Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
// if err != nil {
// log.Warn("Error while finding job in jobRepository")
// return err
// }
//
// return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist")
// }
//
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),
StartTimeUnix: jobMeta.StartTime,
}
// TODO: Other metrics...
job.FlopsAnyAvg = loadJobStat(&jobMeta, "flops_any")
job.MemBwAvg = loadJobStat(&jobMeta, "mem_bw")
job.NetBwAvg = loadJobStat(&jobMeta, "net_bw")
job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
job.RawResources, err = json.Marshal(job.Resources)
if err != nil {
log.Warn("Error while marshaling job resources")
return err
}
job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil {
log.Warn("Error while marshaling job metadata")
return err
}
if err = SanityChecks(&job.BaseJob); err != nil {
log.Warn("BaseJob SanityChecks failed")
return err
}
if err = archive.GetHandle().ImportJob(&jobMeta, &jobData); err != nil {
log.Error("Error while importing job")
return err
}
id, err := r.InsertJob(&job)
if err != nil {
log.Warn("Error while job db insert")
return err
}
for _, tag := range job.Tags {
if _, err := r.AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
log.Error("Error while adding or creating tag")
return err
}
}
log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
}
return nil
}

View File

@ -0,0 +1,172 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer_test
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/importer"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func copyFile(s string, d string) error {
r, err := os.Open(s)
if err != nil {
return err
}
defer r.Close()
w, err := os.Create(d)
if err != nil {
return err
}
defer w.Close()
w.ReadFrom(r)
return nil
}
func setup(t *testing.T) *repository.JobRepository {
const testconfig = `{
"addr": "0.0.0.0:8080",
"validate": false,
"archive": {
"kind": "file",
"path": "./var/job-archive"
},
"clusters": [
{
"name": "testcluster",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 },
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
}
},
{
"name": "fritz",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 944 },
"duration": { "from": 0, "to": 86400 },
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
}
},
{
"name": "taurus",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 4000 },
"duration": { "from": 0, "to": 604800 },
"startTime": { "from": "2010-01-01T00:00:00Z", "to": null }
}
}
]}`
log.Init("info", true)
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
if err := os.Mkdir(jobarchive, 0777); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
t.Fatal(err)
}
fritzArchive := filepath.Join(tmpdir, "job-archive", "fritz")
if err := os.Mkdir(fritzArchive, 0777); err != nil {
t.Fatal(err)
}
if err := copyFile(filepath.Join("testdata", "cluster-fritz.json"),
filepath.Join(fritzArchive, "cluster.json")); err != nil {
t.Fatal(err)
}
dbfilepath := filepath.Join(tmpdir, "test.db")
err := repository.MigrateDB("sqlite3", dbfilepath)
if err != nil {
t.Fatal(err)
}
cfgFilePath := filepath.Join(tmpdir, "config.json")
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
t.Fatal(err)
}
config.Init(cfgFilePath)
archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", jobarchive)
if err := archive.Init(json.RawMessage(archiveCfg), config.Keys.DisableArchive); err != nil {
t.Fatal(err)
}
repository.Connect("sqlite3", dbfilepath)
return repository.GetJobRepository()
}
type Result struct {
JobId int64
Cluster string
StartTime int64
Duration int32
}
func readResult(t *testing.T, testname string) Result {
var r Result
content, err := os.ReadFile(filepath.Join("testdata",
fmt.Sprintf("%s-golden.json", testname)))
if err != nil {
t.Fatal("Error when opening file: ", err)
}
err = json.Unmarshal(content, &r)
if err != nil {
t.Fatal("Error during Unmarshal(): ", err)
}
return r
}
func TestHandleImportFlag(t *testing.T) {
r := setup(t)
tests, err := filepath.Glob(filepath.Join("testdata", "*.input"))
if err != nil {
t.Fatal(err)
}
for _, path := range tests {
_, filename := filepath.Split(path)
str := strings.Split(strings.TrimSuffix(filename, ".input"), "-")
testname := str[1]
t.Run(testname, func(t *testing.T) {
s := fmt.Sprintf("%s:%s", filepath.Join("testdata",
fmt.Sprintf("meta-%s.input", testname)),
filepath.Join("testdata", fmt.Sprintf("data-%s.json", testname)))
err := importer.HandleImportFlag(s)
if err != nil {
t.Fatal(err)
}
result := readResult(t, testname)
job, err := r.Find(&result.JobId, &result.Cluster, &result.StartTime)
if err != nil {
t.Fatal(err)
}
if job.Duration != result.Duration {
t.Errorf("wrong duration for job\ngot: %d \nwant: %d", job.Duration, result.Duration)
}
})
}
}

197
internal/importer/initDB.go Normal file
View File

@ -0,0 +1,197 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
// Delete the tables "job", "tag" and "jobtag" from the database and
// repopulate them using the jobs found in `archive`.
func InitDB() error {
r := repository.GetJobRepository()
if err := r.Flush(); err != nil {
log.Errorf("repository initDB(): %v", err)
return err
}
starttime := time.Now()
log.Print("Building job table...")
t, err := r.TransactionInit()
if err != nil {
log.Warn("Error while initializing SQL transactions")
return err
}
tags := make(map[string]int64)
// Not using log.Print because we want the line to end with `\r` and
// this function is only ever called when a special command line flag
// is passed anyways.
fmt.Printf("%d jobs inserted...\r", 0)
ar := archive.GetHandle()
i := 0
errorOccured := 0
for jobContainer := range ar.Iter(false) {
jobMeta := jobContainer.Meta
// Bundle 100 inserts into one transaction for better performance
if i%100 == 0 {
r.TransactionCommit(t)
fmt.Printf("%d jobs inserted...\r", i)
}
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),
StartTimeUnix: jobMeta.StartTime,
}
// TODO: Other metrics...
job.FlopsAnyAvg = loadJobStat(jobMeta, "flops_any")
job.MemBwAvg = loadJobStat(jobMeta, "mem_bw")
job.NetBwAvg = loadJobStat(jobMeta, "net_bw")
job.FileBwAvg = loadJobStat(jobMeta, "file_bw")
job.RawResources, err = json.Marshal(job.Resources)
if err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
if err := SanityChecks(&job.BaseJob); err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
id, err := r.TransactionAdd(t, job)
if err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
for _, tag := range job.Tags {
tagstr := tag.Name + ":" + tag.Type
tagId, ok := tags[tagstr]
if !ok {
tagId, err = r.TransactionAddTag(t, tag)
if err != nil {
log.Errorf("Error adding tag: %v", err)
errorOccured++
continue
}
tags[tagstr] = tagId
}
r.TransactionSetTag(t, id, tagId)
}
if err == nil {
i += 1
}
}
if errorOccured > 0 {
log.Warnf("Error in import of %d jobs!", errorOccured)
}
r.TransactionEnd(t)
log.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
return nil
}
// This function also sets the subcluster if necessary!
func SanityChecks(job *schema.BaseJob) error {
if c := archive.GetCluster(job.Cluster); c == nil {
return fmt.Errorf("no such cluster: %v", job.Cluster)
}
if err := archive.AssignSubCluster(job); err != nil {
log.Warn("Error while assigning subcluster to job")
return err
}
if !job.State.Valid() {
return fmt.Errorf("not a valid job state: %v", job.State)
}
if len(job.Resources) == 0 || len(job.User) == 0 {
return fmt.Errorf("'resources' and 'user' should not be empty")
}
if job.NumAcc < 0 || job.NumHWThreads < 0 || job.NumNodes < 1 {
return fmt.Errorf("'numNodes', 'numAcc' or 'numHWThreads' invalid")
}
if len(job.Resources) != int(job.NumNodes) {
return fmt.Errorf("len(resources) does not equal numNodes (%d vs %d)", len(job.Resources), job.NumNodes)
}
return nil
}
func loadJobStat(job *schema.JobMeta, metric string) float64 {
if stats, ok := job.Statistics[metric]; ok {
return stats.Avg
}
return 0.0
}
func checkJobData(d *schema.JobData) error {
for _, scopes := range *d {
// var newUnit schema.Unit
// TODO Add node scope if missing
for _, metric := range scopes {
if strings.Contains(metric.Unit.Base, "B/s") ||
strings.Contains(metric.Unit.Base, "F/s") ||
strings.Contains(metric.Unit.Base, "B") {
// get overall avg
sum := 0.0
for _, s := range metric.Series {
sum += s.Statistics.Avg
}
avg := sum / float64(len(metric.Series))
f, p := Normalize(avg, metric.Unit.Prefix)
if p != metric.Unit.Prefix {
fmt.Printf("Convert %e", f)
// for _, s := range metric.Series {
// fp := schema.ConvertFloatToFloat64(s.Data)
//
// for i := 0; i < len(fp); i++ {
// fp[i] *= f
// fp[i] = math.Ceil(fp[i])
// }
//
// s.Data = schema.GetFloat64ToFloat(fp)
// }
metric.Unit.Prefix = p
}
}
}
}
return nil
}

View File

@ -0,0 +1,58 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
import (
"math"
ccunits "github.com/ClusterCockpit/cc-units"
)
func getNormalizationFactor(v float64) (float64, int) {
count := 0
scale := -3
if v > 1000.0 {
for v > 1000.0 {
v *= 1e-3
count++
}
} else {
for v < 1.0 {
v *= 1e3
count++
}
scale = 3
}
return math.Pow10(count * scale), count * scale
}
func getExponent(p float64) int {
count := 0
for p > 1.0 {
p = p / 1000.0
count++
}
return count * 3
}
func newPrefixFromFactor(op ccunits.Prefix, e int) ccunits.Prefix {
f := float64(op)
exp := math.Pow10(getExponent(f) - e)
return ccunits.Prefix(exp)
}
func Normalize(avg float64, p string) (float64, string) {
f, e := getNormalizationFactor(avg)
if e != 0 {
np := newPrefixFromFactor(ccunits.NewPrefix(p), e)
return f, np.Prefix()
}
return f, p
}

View File

@ -0,0 +1,64 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
import (
"fmt"
"testing"
ccunits "github.com/ClusterCockpit/cc-units"
)
func TestNormalizeFactor(t *testing.T) {
// var us string
s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
// r := []float64{3, 24, 390, 391}
total := 0.0
for _, number := range s {
total += number
}
avg := total / float64(len(s))
fmt.Printf("AVG: %e\n", avg)
f, e := getNormalizationFactor(avg)
fmt.Printf("Factor %e Count %d\n", f, e)
np := ccunits.NewPrefix("")
fmt.Printf("Prefix %e Short %s\n", float64(np), np.Prefix())
p := newPrefixFromFactor(np, e)
if p.Prefix() != "G" {
t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix())
}
}
func TestNormalizeKeep(t *testing.T) {
s := []float64{3.0, 24.0, 390.0, 391.0}
total := 0.0
for _, number := range s {
total += number
}
avg := total / float64(len(s))
fmt.Printf("AVG: %e\n", avg)
f, e := getNormalizationFactor(avg)
fmt.Printf("Factor %e Count %d\n", f, e)
np := ccunits.NewPrefix("G")
fmt.Printf("Prefix %e Short %s\n", float64(np), np.Prefix())
p := newPrefixFromFactor(np, e)
if p.Prefix() != "G" {
t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix())
}
}

View File

@ -0,0 +1,746 @@
{
"name": "fritz",
"metricConfig": [
{
"name": "cpu_load",
"unit": {
"base": ""
},
"scope": "node",
"aggregation": "avg",
"timestep": 60,
"peak": 72,
"normal": 72,
"caution": 36,
"alert": 20
},
{
"name": "cpu_user",
"unit": {
"base": ""
},
"scope": "hwthread",
"aggregation": "avg",
"timestep": 60,
"peak": 100,
"normal": 50,
"caution": 20,
"alert": 10
},
{
"name": "mem_used",
"unit": {
"base": "B",
"prefix": "G"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 256,
"normal": 128,
"caution": 200,
"alert": 240
},
{
"name": "flops_any",
"unit": {
"base": "F/s",
"prefix": "G"
},
"scope": "hwthread",
"aggregation": "sum",
"timestep": 60,
"peak": 5600,
"normal": 1000,
"caution": 200,
"alert": 50
},
{
"name": "flops_sp",
"unit": {
"base": "F/s",
"prefix": "G"
},
"scope": "hwthread",
"aggregation": "sum",
"timestep": 60,
"peak": 5600,
"normal": 1000,
"caution": 200,
"alert": 50
},
{
"name": "flops_dp",
"unit": {
"base": "F/s",
"prefix": "G"
},
"scope": "hwthread",
"aggregation": "sum",
"timestep": 60,
"peak": 2300,
"normal": 500,
"caution": 100,
"alert": 50
},
{
"name": "mem_bw",
"unit": {
"base": "B/s",
"prefix": "G"
},
"scope": "socket",
"aggregation": "sum",
"timestep": 60,
"peak": 350,
"normal": 100,
"caution": 50,
"alert": 10
},
{
"name": "clock",
"unit": {
"base": "Hz",
"prefix": "M"
},
"scope": "hwthread",
"aggregation": "avg",
"timestep": 60,
"peak": 3000,
"normal": 2400,
"caution": 1800,
"alert": 1200
},
{
"name": "cpu_power",
"unit": {
"base": "W"
},
"scope": "socket",
"aggregation": "sum",
"timestep": 60,
"peak": 500,
"normal": 250,
"caution": 100,
"alert": 50
},
{
"name": "mem_power",
"unit": {
"base": "W"
},
"scope": "socket",
"aggregation": "sum",
"timestep": 60,
"peak": 100,
"normal": 50,
"caution": 20,
"alert": 10
},
{
"name": "ipc",
"unit": {
"base": "IPC"
},
"scope": "hwthread",
"aggregation": "avg",
"timestep": 60,
"peak": 4,
"normal": 2,
"caution": 1,
"alert": 0.5
},
{
"name": "vectorization_ratio",
"unit": {
"base": ""
},
"scope": "hwthread",
"aggregation": "avg",
"timestep": 60,
"peak": 100,
"normal": 60,
"caution": 40,
"alert": 10
},
{
"name": "ib_recv",
"unit": {
"base": "B/s"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 1250000,
"normal": 6000000,
"caution": 200,
"alert": 1
},
{
"name": "ib_xmit",
"unit": {
"base": "B/s"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 1250000,
"normal": 6000000,
"caution": 200,
"alert": 1
},
{
"name": "ib_recv_pkts",
"unit": {
"base": ""
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
},
{
"name": "ib_xmit_pkts",
"unit": {
"base": ""
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
},
{
"name": "nfs4_read",
"unit": {
"base": "B/s",
"prefix": "M"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
},
{
"name": "nfs4_write",
"unit": {
"base": "B/s",
"prefix": "M"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
},
{
"name": "nfs4_total",
"unit": {
"base": "B/s",
"prefix": "M"
},
"scope": "node",
"aggregation": "sum",
"timestep": 60,
"peak": 6,
"normal": 4,
"caution": 2,
"alert": 1
}
],
"subClusters": [
{
"name": "main",
"nodes": "f01[01-88],f02[01-88],f03[01-88],f03[01-88],f04[01-88],f05[01-88],f06[01-88],f07[01-88],f08[01-88],f09[01-88],f10[01-88],f11[01-56],f12[01-56]",
"processorType": "Intel Icelake",
"socketsPerNode": 2,
"coresPerSocket": 36,
"threadsPerCore": 1,
"flopRateScalar": {
"unit": {
"base": "F/s",
"prefix": "G"
},
"value": 432
},
"flopRateSimd": {
"unit": {
"base": "F/s",
"prefix": "G"
},
"value": 9216
},
"memoryBandwidth": {
"unit": {
"base": "B/s",
"prefix": "G"
},
"value": 350
},
"topology": {
"node": [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71
],
"socket": [
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35
],
[
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71
]
],
"memoryDomain": [
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
],
[
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35
],
[
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53
],
[
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71
]
],
"core": [
[
0
],
[
1
],
[
2
],
[
3
],
[
4
],
[
5
],
[
6
],
[
7
],
[
8
],
[
9
],
[
10
],
[
11
],
[
12
],
[
13
],
[
14
],
[
15
],
[
16
],
[
17
],
[
18
],
[
19
],
[
20
],
[
21
],
[
22
],
[
23
],
[
24
],
[
25
],
[
26
],
[
27
],
[
28
],
[
29
],
[
30
],
[
31
],
[
32
],
[
33
],
[
34
],
[
35
],
[
36
],
[
37
],
[
38
],
[
39
],
[
40
],
[
41
],
[
42
],
[
43
],
[
44
],
[
45
],
[
46
],
[
47
],
[
48
],
[
49
],
[
50
],
[
51
],
[
52
],
[
53
],
[
54
],
[
55
],
[
56
],
[
57
],
[
58
],
[
59
],
[
60
],
[
61
],
[
62
],
[
63
],
[
64
],
[
65
],
[
66
],
[
67
],
[
68
],
[
69
],
[
70
],
[
71
]
]
}
}
]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,6 @@
{
"jobId": 398955,
"cluster": "fritz",
"startTime": 1675956725,
"duration": 260
}

View File

@ -0,0 +1,6 @@
{
"jobId": 398764,
"cluster": "fritz",
"startTime": 1675954353,
"duration": 177
}

View File

@ -0,0 +1 @@
{"jobId":398955,"user":"k106eb10","project":"k106eb","cluster":"fritz","subCluster":"main","partition":"singlenode","arrayJobId":0,"numNodes":1,"numHwthreads":72,"numAcc":0,"exclusive":1,"monitoringStatus":1,"smt":0,"jobState":"completed","duration":260,"walltime":86340,"resources":[{"hostname":"f0720"}],"metaData":{"jobName":"ams_pipeline","jobScript":"#!/bin/bash -l\n#SBATCH --job-name=ams_pipeline\n#SBATCH --time=23:59:00\n#SBATCH --partition=singlenode\n#SBATCH --ntasks=72\n#SBATCH --hint=multithread\n#SBATCH --chdir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11\n#SBATCH --export=NONE\nunset SLURM_EXPORT_ENV\nuss=$(whoami)\nfind /dev/shm/ -user $uss -type f -mmin +30 -delete\ncd \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11\"\nams_pipeline pipeline.json \u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11/ams_pipeline_job.sh.out\" 2\u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11/ams_pipeline_job.sh.err\"\n","slurmInfo":"\nJobId=398955 JobName=ams_pipeline\n UserId=k106eb10(210387) GroupId=80111\n Account=k106eb QOS=normal \n Requeue=False Restarts=0 BatchFlag=True \n TimeLimit=1439\n SubmitTime=2023-02-09T14:11:22\n Partition=singlenode \n NodeList=f0720\n NumNodes=1 NumCPUs=72 NumTasks=72 CPUs/Task=1\n NTasksPerNode:Socket:Core=0:None:None\n TRES_req=cpu=72,mem=250000M,node=1,billing=72\n TRES_alloc=cpu=72,node=1,billing=72\n Command=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11/ams_pipeline_job.sh\n WorkDir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/base-hcp/occ-shaken/hcp16.occ.4.shake.0/cfg/NiAl3NiAl11\n StdErr=\n StdOut=ams_pipeline.o%j\n"},"startTime":1675956725,"statistics":{"clock":{"unit":{"base":"Hz","prefix":"M"},"avg":2335.254,"min":800.418,"max":2734.922},"cpu_load":{"unit":{"base":""},"avg":52.72,"min":34.46,"max":71.91},"cpu_power":{"unit":{"base":"W"},"avg":407.767,"min":93.932,"max":497.636},"cpu_user":{"unit":{"base":""},"avg":63.678,"min":19.872,"max":96.633},"flops_any":{"unit":{"base":"F/s","prefix":"G"},"avg":635.672,"min":0,"max":1332.874},"flops_dp":{"unit":{"base":"F/s","prefix":"G"},"avg":261.006,"min":0,"max":382.294},"flops_sp":{"unit":{"base":"F/s","prefix":"G"},"avg":113.659,"min":0,"max":568.286},"ib_recv":{"unit":{"base":"B/s"},"avg":27981.111,"min":69.4,"max":48084.589},"ib_recv_pkts":{"unit":{"base":"packets/s"},"avg":398.939,"min":0.5,"max":693.817},"ib_xmit":{"unit":{"base":"B/s"},"avg":188.513,"min":39.597,"max":724.568},"ib_xmit_pkts":{"unit":{"base":"packets/s"},"avg":0.867,"min":0.2,"max":2.933},"ipc":{"unit":{"base":"IPC"},"avg":0.944,"min":0.564,"max":1.291},"mem_bw":{"unit":{"base":"B/s","prefix":"G"},"avg":79.565,"min":0.021,"max":116.02},"mem_power":{"unit":{"base":"W"},"avg":24.692,"min":7.883,"max":31.318},"mem_used":{"unit":{"base":"B","prefix":"G"},"avg":22.566,"min":8.225,"max":27.613},"nfs4_read":{"unit":{"base":"B/s","prefix":"M"},"avg":647,"min":0,"max":1946},"nfs4_total":{"unit":{"base":"B/s","prefix":"M"},"avg":6181.6,"min":1270,"max":11411},"nfs4_write":{"unit":{"base":"B/s","prefix":"M"},"avg":22.4,"min":11,"max":29},"vectorization_ratio":{"unit":{"base":"%"},"avg":77.351,"min":0,"max":98.837}}}

View File

@ -0,0 +1 @@
{"jobId":398764,"user":"k106eb10","project":"k106eb","cluster":"fritz","subCluster":"main","numNodes":1,"exclusive":1,"jobState":"completed","duration":177,"resources":[{"hostname":"f0649"}],"startTime":1675954353,"statistics":{"clock":{"unit":{"base":"Hz","prefix":"M"},"avg":1336.519,"min":801.564,"max":2348.215},"cpu_load":{"unit":{"base":""},"avg":31.64,"min":17.36,"max":45.54},"cpu_power":{"unit":{"base":"W"},"avg":150.018,"min":93.672,"max":261.592},"cpu_user":{"unit":{"base":""},"avg":28.518,"min":0.09,"max":57.343},"flops_any":{"unit":{"base":"F/s","prefix":"G"},"avg":45.012,"min":0,"max":135.037},"flops_dp":{"unit":{"base":"F/s","prefix":"G"},"avg":22.496,"min":0,"max":67.488},"flops_sp":{"unit":{"base":"F/s","prefix":"G"},"avg":0.02,"min":0,"max":0.061},"ib_recv":{"unit":{"base":"B/s"},"avg":14442.82,"min":219.998,"max":42581.368},"ib_recv_pkts":{"unit":{"base":"packets/s"},"avg":201.532,"min":1.25,"max":601.345},"ib_xmit":{"unit":{"base":"B/s"},"avg":282.098,"min":56.2,"max":569.363},"ib_xmit_pkts":{"unit":{"base":"packets/s"},"avg":1.228,"min":0.433,"max":2},"ipc":{"unit":{"base":"IPC"},"avg":0.77,"min":0.564,"max":0.906},"mem_bw":{"unit":{"base":"B/s","prefix":"G"},"avg":4.872,"min":0.025,"max":14.552},"mem_power":{"unit":{"base":"W"},"avg":7.725,"min":6.286,"max":10.556},"mem_used":{"unit":{"base":"B","prefix":"G"},"avg":6.162,"min":6.103,"max":6.226},"nfs4_read":{"unit":{"base":"B/s","prefix":"M"},"avg":1045.333,"min":311,"max":1525},"nfs4_total":{"unit":{"base":"B/s","prefix":"M"},"avg":6430,"min":2796,"max":11518},"nfs4_write":{"unit":{"base":"B/s","prefix":"M"},"avg":24.333,"min":0,"max":38},"vectorization_ratio":{"unit":{"base":"%"},"avg":25.528,"min":0,"max":76.585}}}

View File

@ -293,7 +293,7 @@ func (ccms *CCMetricStore) buildQueries(
scopesLoop: scopesLoop:
for _, requestedScope := range scopes { for _, requestedScope := range scopes {
nativeScope := mc.Scope nativeScope := mc.Scope
if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == nil { if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == 0 {
continue continue
} }

View File

@ -56,7 +56,10 @@ func Connect(driver string, db string) {
} }
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver} dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
checkDBVersion(driver, dbHandle.DB) err = checkDBVersion(driver, dbHandle.DB)
if err != nil {
log.Fatal(err)
}
}) })
} }

View File

@ -1,351 +0,0 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
import (
"bytes"
"database/sql"
"encoding/json"
"fmt"
"os"
"strings"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/units"
)
const NamedJobInsert string = `INSERT INTO job (
job_id, user, project, cluster, subcluster, ` + "`partition`" + `, array_job_id, num_nodes, num_hwthreads, num_acc,
exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, resources, meta_data,
mem_used_max, flops_any_avg, mem_bw_avg, load_avg, net_bw_avg, net_data_vol_total, file_bw_avg, file_data_vol_total
) VALUES (
:job_id, :user, :project, :cluster, :subcluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc,
:exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :resources, :meta_data,
:mem_used_max, :flops_any_avg, :mem_bw_avg, :load_avg, :net_bw_avg, :net_data_vol_total, :file_bw_avg, :file_data_vol_total
);`
// Import all jobs specified as `<path-to-meta.json>:<path-to-data.json>,...`
func HandleImportFlag(flag string) error {
for _, pair := range strings.Split(flag, ",") {
files := strings.Split(pair, ":")
if len(files) != 2 {
return fmt.Errorf("REPOSITORY/INIT > invalid import flag format")
}
raw, err := os.ReadFile(files[0])
if err != nil {
log.Warn("Error while reading metadata file for import")
return err
}
if config.Keys.Validate {
if err := schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil {
return fmt.Errorf("REPOSITORY/INIT > validate job meta: %v", err)
}
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
if err := dec.Decode(&jobMeta); err != nil {
log.Warn("Error while decoding raw json metadata for import")
return err
}
raw, err = os.ReadFile(files[1])
if err != nil {
log.Warn("Error while reading jobdata file for import")
return err
}
if config.Keys.Validate {
if err := schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil {
return fmt.Errorf("REPOSITORY/INIT > validate job data: %v", err)
}
}
dec = json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
jobData := schema.JobData{}
if err := dec.Decode(&jobData); err != nil {
log.Warn("Error while decoding raw json jobdata for import")
return err
}
checkJobData(&jobData)
SanityChecks(&jobMeta.BaseJob)
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
if err != nil {
log.Warn("Error while finding job in jobRepository")
return err
}
return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID)
}
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),
StartTimeUnix: jobMeta.StartTime,
}
// TODO: Other metrics...
job.FlopsAnyAvg = loadJobStat(&jobMeta, "flops_any")
job.MemBwAvg = loadJobStat(&jobMeta, "mem_bw")
job.NetBwAvg = loadJobStat(&jobMeta, "net_bw")
job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
job.RawResources, err = json.Marshal(job.Resources)
if err != nil {
log.Warn("Error while marshaling job resources")
return err
}
job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil {
log.Warn("Error while marshaling job metadata")
return err
}
if err := SanityChecks(&job.BaseJob); err != nil {
log.Warn("BaseJob SanityChecks failed")
return err
}
if err := archive.GetHandle().ImportJob(&jobMeta, &jobData); err != nil {
log.Error("Error while importing job")
return err
}
res, err := GetConnection().DB.NamedExec(NamedJobInsert, job)
if err != nil {
log.Warn("Error while NamedJobInsert")
return err
}
id, err := res.LastInsertId()
if err != nil {
log.Warn("Error while getting last insert ID")
return err
}
for _, tag := range job.Tags {
if _, err := GetJobRepository().AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
log.Error("Error while adding or creating tag")
return err
}
}
log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
}
return nil
}
// Delete the tables "job", "tag" and "jobtag" from the database and
// repopulate them using the jobs found in `archive`.
func InitDB() error {
db := GetConnection()
starttime := time.Now()
log.Print("Building job table...")
// Inserts are bundled into transactions because in sqlite,
// that speeds up inserts A LOT.
tx, err := db.DB.Beginx()
if err != nil {
log.Warn("Error while bundling transactions")
return err
}
stmt, err := tx.PrepareNamed(NamedJobInsert)
if err != nil {
log.Warn("Error while preparing namedJobInsert")
return err
}
tags := make(map[string]int64)
// Not using log.Print because we want the line to end with `\r` and
// this function is only ever called when a special command line flag
// is passed anyways.
fmt.Printf("%d jobs inserted...\r", 0)
ar := archive.GetHandle()
i := 0
errorOccured := 0
for jobContainer := range ar.Iter(false) {
jobMeta := jobContainer.Meta
// // Bundle 100 inserts into one transaction for better performance:
if i%10 == 0 {
if tx != nil {
if err := tx.Commit(); err != nil {
log.Warn("Error while committing transactions for jobMeta")
return err
}
}
tx, err = db.DB.Beginx()
if err != nil {
log.Warn("Error while bundling transactions for jobMeta")
return err
}
stmt = tx.NamedStmt(stmt)
fmt.Printf("%d jobs inserted...\r", i)
}
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),
StartTimeUnix: jobMeta.StartTime,
}
// TODO: Other metrics...
job.FlopsAnyAvg = loadJobStat(jobMeta, "flops_any")
job.MemBwAvg = loadJobStat(jobMeta, "mem_bw")
job.NetBwAvg = loadJobStat(jobMeta, "net_bw")
job.FileBwAvg = loadJobStat(jobMeta, "file_bw")
job.RawResources, err = json.Marshal(job.Resources)
if err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
if err := SanityChecks(&job.BaseJob); err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
res, err := stmt.Exec(job)
if err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
id, err := res.LastInsertId()
if err != nil {
log.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
for _, tag := range job.Tags {
tagstr := tag.Name + ":" + tag.Type
tagId, ok := tags[tagstr]
if !ok {
res, err := tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type)
if err != nil {
log.Errorf("Error while inserting tag into tag table: %v (Type %v)", tag.Name, tag.Type)
return err
}
tagId, err = res.LastInsertId()
if err != nil {
log.Warn("Error while getting last insert ID")
return err
}
tags[tagstr] = tagId
}
if _, err := tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, id, tagId); err != nil {
log.Errorf("Error while inserting jobtag into jobtag table: %v (TagID %v)", id, tagId)
return err
}
}
if err == nil {
i += 1
}
}
if errorOccured > 0 {
log.Warnf("Error in import of %d jobs!", errorOccured)
}
if err := tx.Commit(); err != nil {
log.Warn("Error while committing SQL transactions")
return err
}
log.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
return nil
}
// This function also sets the subcluster if necessary!
func SanityChecks(job *schema.BaseJob) error {
if c := archive.GetCluster(job.Cluster); c == nil {
return fmt.Errorf("no such cluster: %v", job.Cluster)
}
if err := archive.AssignSubCluster(job); err != nil {
log.Warn("Error while assigning subcluster to job")
return err
}
if !job.State.Valid() {
return fmt.Errorf("not a valid job state: %v", job.State)
}
if len(job.Resources) == 0 || len(job.User) == 0 {
return fmt.Errorf("'resources' and 'user' should not be empty")
}
if *job.NumAcc < 0 || *job.NumHWThreads < 0 || job.NumNodes < 1 {
return fmt.Errorf("'numNodes', 'numAcc' or 'numHWThreads' invalid")
}
if len(job.Resources) != int(job.NumNodes) {
return fmt.Errorf("len(resources) does not equal numNodes (%d vs %d)", len(job.Resources), job.NumNodes)
}
return nil
}
func loadJobStat(job *schema.JobMeta, metric string) float64 {
if stats, ok := job.Statistics[metric]; ok {
return stats.Avg
}
return 0.0
}
func checkJobData(d *schema.JobData) error {
for _, scopes := range *d {
var newUnit string
// Add node scope if missing
for _, metric := range scopes {
if strings.Contains(metric.Unit.Base, "B/s") ||
strings.Contains(metric.Unit.Base, "F/s") ||
strings.Contains(metric.Unit.Base, "B") {
// First get overall avg
sum := 0.0
for _, s := range metric.Series {
sum += s.Statistics.Avg
}
avg := sum / float64(len(metric.Series))
for _, s := range metric.Series {
fp := schema.ConvertFloatToFloat64(s.Data)
// Normalize values with new unit prefix
oldUnit := metric.Unit.Base
units.NormalizeSeries(fp, avg, oldUnit, &newUnit)
s.Data = schema.GetFloat64ToFloat(fp)
}
metric.Unit.Base = newUnit
}
}
}
return nil
}

View File

@ -96,6 +96,50 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
return job, nil return job, nil
} }
func (r *JobRepository) Optimize() error {
var err error
switch r.driver {
case "sqlite3":
if _, err = r.DB.Exec(`VACUUM`); err != nil {
return err
}
case "mysql":
log.Info("Optimize currently not supported for mysql driver")
}
return nil
}
func (r *JobRepository) Flush() error {
var err error
switch r.driver {
case "sqlite3":
if _, err = r.DB.Exec(`DELETE FROM jobtag`); err != nil {
return err
}
if _, err = r.DB.Exec(`DELETE FROM tag`); err != nil {
return err
}
if _, err = r.DB.Exec(`DELETE FROM job`); err != nil {
return err
}
case "mysql":
if _, err = r.DB.Exec(`TRUNCATE TABLE jobtag`); err != nil {
return err
}
if _, err = r.DB.Exec(`TRUNCATE TABLE tag`); err != nil {
return err
}
if _, err = r.DB.Exec(`TRUNCATE TABLE job`); err != nil {
return err
}
}
return nil
}
func scanJobLink(row interface{ Scan(...interface{}) error }) (*model.JobLink, error) { func scanJobLink(row interface{ Scan(...interface{}) error }) (*model.JobLink, error) {
jobLink := &model.JobLink{} jobLink := &model.JobLink{}
if err := row.Scan( if err := row.Scan(
@ -548,7 +592,7 @@ func (r *JobRepository) FindUserOrProjectOrJobname(ctx context.Context, searchte
func (r *JobRepository) FindColumnValue(user *auth.User, searchterm string, table string, selectColumn string, whereColumn string, isLike bool) (result string, err error) { func (r *JobRepository) FindColumnValue(user *auth.User, searchterm string, table string, selectColumn string, whereColumn string, isLike bool) (result string, err error) {
compareStr := " = ?" compareStr := " = ?"
query := searchterm query := searchterm
if isLike == true { if isLike {
compareStr = " LIKE ?" compareStr = " LIKE ?"
query = "%" + searchterm + "%" query = "%" + searchterm + "%"
} }
@ -689,6 +733,38 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
return nil return nil
} }
func (r *JobRepository) FindJobsBefore(startTime int64) ([]*schema.Job, error) {
query := sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
"job.start_time < %d", startTime))
sql, args, err := query.ToSql()
if err != nil {
log.Warn("Error while converting query to sql")
return nil, err
}
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
log.Error("Error while running query")
return nil, err
}
jobs := make([]*schema.Job, 0, 50)
for rows.Next() {
job, err := scanJob(rows)
if err != nil {
rows.Close()
log.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
}
return jobs, nil
}
// GraphQL validation should make sure that no unkown values can be specified. // GraphQL validation should make sure that no unkown values can be specified.
var groupBy2column = map[model.Aggregate]string{ var groupBy2column = map[model.Aggregate]string{
model.AggregateUser: "job.user", model.AggregateUser: "job.user",
@ -706,9 +782,10 @@ func (r *JobRepository) JobsStatistics(ctx context.Context,
stats := map[string]*model.JobsStatistics{} stats := map[string]*model.JobsStatistics{}
var castType string var castType string
if r.driver == "sqlite3" { switch r.driver {
case "sqlite3":
castType = "int" castType = "int"
} else if r.driver == "mysql" { case "mysql":
castType = "unsigned" castType = "unsigned"
} }
@ -890,7 +967,6 @@ func (r *JobRepository) jobsStatisticsHistogram(ctx context.Context,
value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) { value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) {
start := time.Now() start := time.Now()
query := sq.Select(value, "COUNT(job.id) AS count").From("job")
query, qerr := SecurityCheck(ctx, sq.Select(value, "COUNT(job.id) AS count").From("job")) query, qerr := SecurityCheck(ctx, sq.Select(value, "COUNT(job.id) AS count").From("job"))
if qerr != nil { if qerr != nil {
@ -924,3 +1000,121 @@ func (r *JobRepository) jobsStatisticsHistogram(ctx context.Context,
log.Infof("Timer jobsStatisticsHistogram %s", time.Since(start)) log.Infof("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil return points, nil
} }
const NamedJobInsert string = `INSERT INTO job (
job_id, user, project, cluster, subcluster, ` + "`partition`" + `, array_job_id, num_nodes, num_hwthreads, num_acc,
exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, resources, meta_data,
mem_used_max, flops_any_avg, mem_bw_avg, load_avg, net_bw_avg, net_data_vol_total, file_bw_avg, file_data_vol_total
) VALUES (
:job_id, :user, :project, :cluster, :subcluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc,
:exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :resources, :meta_data,
:mem_used_max, :flops_any_avg, :mem_bw_avg, :load_avg, :net_bw_avg, :net_data_vol_total, :file_bw_avg, :file_data_vol_total
);`
func (r *JobRepository) InsertJob(job *schema.Job) (int64, error) {
res, err := r.DB.NamedExec(NamedJobInsert, job)
if err != nil {
log.Warn("Error while NamedJobInsert")
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
log.Warn("Error while getting last insert ID")
return 0, err
}
return id, nil
}
type Transaction struct {
tx *sqlx.Tx
stmt *sqlx.NamedStmt
}
func (r *JobRepository) TransactionInit() (*Transaction, error) {
var err error
t := new(Transaction)
// Inserts are bundled into transactions because in sqlite,
// that speeds up inserts A LOT.
t.tx, err = r.DB.Beginx()
if err != nil {
log.Warn("Error while bundling transactions")
return nil, err
}
t.stmt, err = t.tx.PrepareNamed(NamedJobInsert)
if err != nil {
log.Warn("Error while preparing namedJobInsert")
return nil, err
}
return t, nil
}
func (r *JobRepository) TransactionCommit(t *Transaction) error {
var err error
if t.tx != nil {
if err = t.tx.Commit(); err != nil {
log.Warn("Error while committing transactions")
return err
}
}
t.tx, err = r.DB.Beginx()
if err != nil {
log.Warn("Error while bundling transactions")
return err
}
t.stmt = t.tx.NamedStmt(t.stmt)
return nil
}
func (r *JobRepository) TransactionEnd(t *Transaction) error {
if err := t.tx.Commit(); err != nil {
log.Warn("Error while committing SQL transactions")
return err
}
return nil
}
func (r *JobRepository) TransactionAdd(t *Transaction, job schema.Job) (int64, error) {
res, err := t.stmt.Exec(job)
if err != nil {
log.Errorf("repository initDB(): %v", err)
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
log.Errorf("repository initDB(): %v", err)
return 0, err
}
return id, nil
}
func (r *JobRepository) TransactionAddTag(t *Transaction, tag *schema.Tag) (int64, error) {
res, err := t.tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type)
if err != nil {
log.Errorf("Error while inserting tag into tag table: %v (Type %v)", tag.Name, tag.Type)
return 0, err
}
tagId, err := res.LastInsertId()
if err != nil {
log.Warn("Error while getting last insert ID")
return 0, err
}
return tagId, nil
}
func (r *JobRepository) TransactionSetTag(t *Transaction, jobId int64, tagId int64) error {
if _, err := t.tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, jobId, tagId); err != nil {
log.Errorf("Error while inserting jobtag into jobtag table: %v (TagID %v)", jobId, tagId)
return err
}
return nil
}

View File

@ -12,19 +12,21 @@ import (
_ "github.com/mattn/go-sqlite3" _ "github.com/mattn/go-sqlite3"
) )
func init() {
log.Init("info", true)
Connect("sqlite3", "../../test/test.db")
}
func setup(t *testing.T) *JobRepository { func setup(t *testing.T) *JobRepository {
log.Init("info", true)
dbfilepath := "testdata/test.db"
err := MigrateDB("sqlite3", dbfilepath)
if err != nil {
t.Fatal(err)
}
Connect("sqlite3", dbfilepath)
return GetJobRepository() return GetJobRepository()
} }
func TestFind(t *testing.T) { func TestFind(t *testing.T) {
r := setup(t) r := setup(t)
jobId, cluster, startTime := int64(1404396), "emmy", int64(1609299584) jobId, cluster, startTime := int64(398998), "fritz", int64(1675957496)
job, err := r.Find(&jobId, &cluster, &startTime) job, err := r.Find(&jobId, &cluster, &startTime)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -32,7 +34,7 @@ func TestFind(t *testing.T) {
// fmt.Printf("%+v", job) // fmt.Printf("%+v", job)
if job.ID != 1366 { if job.ID != 5 {
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1366", job.JobID) t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1366", job.JobID)
} }
} }
@ -40,14 +42,14 @@ func TestFind(t *testing.T) {
func TestFindById(t *testing.T) { func TestFindById(t *testing.T) {
r := setup(t) r := setup(t)
job, err := r.FindById(1366) job, err := r.FindById(5)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// fmt.Printf("%+v", job) // fmt.Printf("%+v", job)
if job.JobID != 1404396 { if job.JobID != 398998 {
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1404396", job.JobID) t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1404396", job.JobID)
} }
} }
@ -63,7 +65,7 @@ func TestGetTags(t *testing.T) {
fmt.Printf("TAGS %+v \n", tags) fmt.Printf("TAGS %+v \n", tags)
// fmt.Printf("COUNTS %+v \n", counts) // fmt.Printf("COUNTS %+v \n", counts)
if counts["bandwidth"] != 6 { if counts["bandwidth"] != 3 {
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 6", counts["load-imbalance"]) t.Errorf("wrong tag count \ngot: %d \nwant: 3", counts["bandwidth"])
} }
} }

View File

@ -8,7 +8,6 @@ import (
"database/sql" "database/sql"
"embed" "embed"
"fmt" "fmt"
"os"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4"
@ -22,37 +21,37 @@ const Version uint = 3
//go:embed migrations/* //go:embed migrations/*
var migrationFiles embed.FS var migrationFiles embed.FS
func checkDBVersion(backend string, db *sql.DB) { func checkDBVersion(backend string, db *sql.DB) error {
var m *migrate.Migrate var m *migrate.Migrate
if backend == "sqlite3" { switch backend {
case "sqlite3":
driver, err := sqlite3.WithInstance(db, &sqlite3.Config{}) driver, err := sqlite3.WithInstance(db, &sqlite3.Config{})
if err != nil { if err != nil {
log.Fatal(err) return err
} }
d, err := iofs.New(migrationFiles, "migrations/sqlite3") d, err := iofs.New(migrationFiles, "migrations/sqlite3")
if err != nil { if err != nil {
log.Fatal(err) return err
} }
m, err = migrate.NewWithInstance("iofs", d, "sqlite3", driver) m, err = migrate.NewWithInstance("iofs", d, "sqlite3", driver)
if err != nil { if err != nil {
log.Fatal(err) return err
} }
} else if backend == "mysql" { case "mysql":
driver, err := mysql.WithInstance(db, &mysql.Config{}) driver, err := mysql.WithInstance(db, &mysql.Config{})
if err != nil { if err != nil {
log.Fatal(err) return err
} }
d, err := iofs.New(migrationFiles, "migrations/mysql") d, err := iofs.New(migrationFiles, "migrations/mysql")
if err != nil { if err != nil {
log.Fatal(err) return err
} }
m, err = migrate.NewWithInstance("iofs", d, "mysql", driver) m, err = migrate.NewWithInstance("iofs", d, "mysql", driver)
if err != nil { if err != nil {
log.Fatal(err) return err
} }
} }
@ -61,25 +60,26 @@ func checkDBVersion(backend string, db *sql.DB) {
if err == migrate.ErrNilVersion { if err == migrate.ErrNilVersion {
log.Warn("Legacy database without version or missing database file!") log.Warn("Legacy database without version or missing database file!")
} else { } else {
log.Fatal(err) return err
} }
} }
if v < Version { if v < Version {
log.Warnf("Unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, Version) return fmt.Errorf("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, Version)
os.Exit(0)
} }
if v > Version { if v > Version {
log.Warnf("Unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool!", v, Version) return fmt.Errorf("unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool", v, Version)
os.Exit(0)
} }
return nil
} }
func MigrateDB(backend string, db string) { func MigrateDB(backend string, db string) error {
var m *migrate.Migrate var m *migrate.Migrate
if backend == "sqlite3" { switch backend {
case "sqlite3":
d, err := iofs.New(migrationFiles, "migrations/sqlite3") d, err := iofs.New(migrationFiles, "migrations/sqlite3")
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -87,17 +87,17 @@ func MigrateDB(backend string, db string) {
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db)) m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
if err != nil { if err != nil {
log.Fatal(err) return err
} }
} else if backend == "mysql" { case "mysql":
d, err := iofs.New(migrationFiles, "migrations/mysql") d, err := iofs.New(migrationFiles, "migrations/mysql")
if err != nil { if err != nil {
log.Fatal(err) return err
} }
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("mysql://%s?multiStatements=true", db)) m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("mysql://%s?multiStatements=true", db))
if err != nil { if err != nil {
log.Fatal(err) return err
} }
} }
@ -105,9 +105,10 @@ func MigrateDB(backend string, db string) {
if err == migrate.ErrNoChange { if err == migrate.ErrNoChange {
log.Info("DB already up to date!") log.Info("DB already up to date!")
} else { } else {
log.Fatal(err) return err
} }
} }
m.Close() m.Close()
return nil
} }

View File

@ -31,13 +31,15 @@ CREATE TABLE IF NOT EXISTS job (
net_bw_avg REAL NOT NULL DEFAULT 0.0, net_bw_avg REAL NOT NULL DEFAULT 0.0,
net_data_vol_total REAL NOT NULL DEFAULT 0.0, net_data_vol_total REAL NOT NULL DEFAULT 0.0,
file_bw_avg REAL NOT NULL DEFAULT 0.0, file_bw_avg REAL NOT NULL DEFAULT 0.0,
file_data_vol_total REAL NOT NULL DEFAULT 0.0); file_data_vol_total REAL NOT NULL DEFAULT 0.0,
UNIQUE (job_id, cluster, start_time)
);
CREATE TABLE IF NOT EXISTS tag ( CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY, id INTEGER PRIMARY KEY,
tag_type VARCHAR(255) NOT NULL, tag_type VARCHAR(255) NOT NULL,
tag_name VARCHAR(255) NOT NULL, tag_name VARCHAR(255) NOT NULL,
CONSTRAINT be_unique UNIQUE (tag_type, tag_name)); UNIQUE (tag_type, tag_name));
CREATE TABLE IF NOT EXISTS jobtag ( CREATE TABLE IF NOT EXISTS jobtag (
job_id INTEGER, job_id INTEGER,

View File

@ -7,10 +7,10 @@ CREATE TABLE IF NOT EXISTS job (
user VARCHAR(255) NOT NULL, user VARCHAR(255) NOT NULL,
project VARCHAR(255) NOT NULL, project VARCHAR(255) NOT NULL,
partition VARCHAR(255) NOT NULL, partition VARCHAR(255),
array_job_id BIGINT NOT NULL, array_job_id BIGINT,
duration INT NOT NULL DEFAULT 0, duration INT NOT NULL,
walltime INT NOT NULL DEFAULT 0, walltime INT NOT NULL,
job_state VARCHAR(255) NOT NULL job_state VARCHAR(255) NOT NULL
CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled',
'stopped', 'timeout', 'preempted', 'out_of_memory')), 'stopped', 'timeout', 'preempted', 'out_of_memory')),
@ -18,8 +18,8 @@ CREATE TABLE IF NOT EXISTS job (
resources TEXT NOT NULL, -- JSON resources TEXT NOT NULL, -- JSON
num_nodes INT NOT NULL, num_nodes INT NOT NULL,
num_hwthreads INT NOT NULL, num_hwthreads INT,
num_acc INT NOT NULL, num_acc INT,
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )), smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)), exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)), monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
@ -31,13 +31,15 @@ CREATE TABLE IF NOT EXISTS job (
net_bw_avg REAL NOT NULL DEFAULT 0.0, net_bw_avg REAL NOT NULL DEFAULT 0.0,
net_data_vol_total REAL NOT NULL DEFAULT 0.0, net_data_vol_total REAL NOT NULL DEFAULT 0.0,
file_bw_avg REAL NOT NULL DEFAULT 0.0, file_bw_avg REAL NOT NULL DEFAULT 0.0,
file_data_vol_total REAL NOT NULL DEFAULT 0.0); file_data_vol_total REAL NOT NULL DEFAULT 0.0,
UNIQUE (job_id, cluster, start_time)
);
CREATE TABLE IF NOT EXISTS tag ( CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY, id INTEGER PRIMARY KEY,
tag_type VARCHAR(255) NOT NULL, tag_type VARCHAR(255) NOT NULL,
tag_name VARCHAR(255) NOT NULL, tag_name VARCHAR(255) NOT NULL,
CONSTRAINT be_unique UNIQUE (tag_type, tag_name)); UNIQUE (tag_type, tag_name));
CREATE TABLE IF NOT EXISTS jobtag ( CREATE TABLE IF NOT EXISTS jobtag (
job_id INTEGER, job_id INTEGER,

View File

@ -34,11 +34,13 @@ func (r *JobRepository) QueryJobs(
if order != nil { if order != nil {
field := toSnakeCase(order.Field) field := toSnakeCase(order.Field)
if order.Order == model.SortDirectionEnumAsc {
switch order.Order {
case model.SortDirectionEnumAsc:
query = query.OrderBy(fmt.Sprintf("job.%s ASC", field)) query = query.OrderBy(fmt.Sprintf("job.%s ASC", field))
} else if order.Order == model.SortDirectionEnumDesc { case model.SortDirectionEnumDesc:
query = query.OrderBy(fmt.Sprintf("job.%s DESC", field)) query = query.OrderBy(fmt.Sprintf("job.%s DESC", field))
} else { default:
return nil, errors.New("REPOSITORY/QUERY > invalid sorting order") return nil, errors.New("REPOSITORY/QUERY > invalid sorting order")
} }
} }
@ -159,7 +161,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (queryOut sq.Sel
return query.Where("job.user = ?", user.Username), nil return query.Where("job.user = ?", user.Username), nil
} else { // Unauthorized : Error } else { // Unauthorized : Error
var qnil sq.SelectBuilder var qnil sq.SelectBuilder
return qnil, errors.New(fmt.Sprintf("User '%s' with unknown roles! [%#v]\n", user.Username, user.Roles)) return qnil, fmt.Errorf("user '%s' with unknown roles [%#v]", user.Username, user.Roles)
} }
} }

BIN
internal/repository/testdata/test.db vendored Normal file

Binary file not shown.

View File

@ -11,12 +11,10 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/auth"
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/log"
_ "github.com/mattn/go-sqlite3"
) )
func init() {
Connect("sqlite3", "../../test/test.db")
}
func setupUserTest(t *testing.T) *UserCfgRepo { func setupUserTest(t *testing.T) *UserCfgRepo {
const testconfig = `{ const testconfig = `{
"addr": "0.0.0.0:8080", "addr": "0.0.0.0:8080",
@ -34,6 +32,15 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null } "startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
} } ] } } ]
}` }`
log.Init("info", true)
dbfilepath := "testdata/test.db"
err := MigrateDB("sqlite3", dbfilepath)
if err != nil {
t.Fatal(err)
}
Connect("sqlite3", dbfilepath)
tmpdir := t.TempDir() tmpdir := t.TempDir()
cfgFilePath := filepath.Join(tmpdir, "config.json") cfgFilePath := filepath.Join(tmpdir, "config.json")
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil { if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
@ -43,9 +50,10 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
config.Init(cfgFilePath) config.Init(cfgFilePath)
return GetUserCfgRepo() return GetUserCfgRepo()
} }
func TestGetUIConfig(t *testing.T) { func TestGetUIConfig(t *testing.T) {
r := setupUserTest(t) r := setupUserTest(t)
u := auth.User{Username: "jan"} u := auth.User{Username: "demo"}
cfg, err := r.GetUIConfig(&u) cfg, err := r.GetUIConfig(&u)
if err != nil { if err != nil {
@ -53,10 +61,9 @@ func TestGetUIConfig(t *testing.T) {
} }
tmp := cfg["plot_list_selectedMetrics"] tmp := cfg["plot_list_selectedMetrics"]
metrics := tmp.([]interface{}) metrics := tmp.([]string)
str := metrics[2]
str := metrics[2].(string) if str != "mem_used" {
if str != "mem_bw" {
t.Errorf("wrong config\ngot: %s \nwant: mem_bw", str) t.Errorf("wrong config\ngot: %s \nwant: mem_bw", str)
} }
} }

View File

@ -24,7 +24,7 @@ import (
func LoadEnv(file string) error { func LoadEnv(file string) error {
f, err := os.Open(file) f, err := os.Open(file)
if err != nil { if err != nil {
log.Error("Error while opening file") log.Error("Error while opening .env file")
return err return err
} }

77
internal/util/compress.go Normal file
View File

@ -0,0 +1,77 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"compress/gzip"
"io"
"os"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func CompressFile(fileIn string, fileOut string) error {
originalFile, err := os.Open(fileIn)
if err != nil {
log.Errorf("CompressFile() error: %v", err)
return err
}
defer originalFile.Close()
gzippedFile, err := os.Create(fileOut)
if err != nil {
log.Errorf("CompressFile() error: %v", err)
return err
}
defer gzippedFile.Close()
gzipWriter := gzip.NewWriter(gzippedFile)
defer gzipWriter.Close()
_, err = io.Copy(gzipWriter, originalFile)
if err != nil {
log.Errorf("CompressFile() error: %v", err)
return err
}
gzipWriter.Flush()
if err := os.Remove(fileIn); err != nil {
log.Errorf("CompressFile() error: %v", err)
return err
}
return nil
}
func UncompressFile(fileIn string, fileOut string) error {
gzippedFile, err := os.Open(fileIn)
if err != nil {
log.Errorf("UncompressFile() error: %v", err)
return err
}
defer gzippedFile.Close()
gzipReader, _ := gzip.NewReader(gzippedFile)
defer gzipReader.Close()
uncompressedFile, err := os.Create(fileOut)
if err != nil {
log.Errorf("UncompressFile() error: %v", err)
return err
}
defer uncompressedFile.Close()
_, err = io.Copy(uncompressedFile, gzipReader)
if err != nil {
log.Errorf("UncompressFile() error: %v", err)
return err
}
if err := os.Remove(fileIn); err != nil {
log.Errorf("UncompressFile() error: %v", err)
return err
}
return nil
}

107
internal/util/copy.go Normal file
View File

@ -0,0 +1,107 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
)
func CopyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
if e := out.Close(); e != nil {
err = e
}
}()
_, err = io.Copy(out, in)
if err != nil {
return
}
err = out.Sync()
if err != nil {
return
}
si, err := os.Stat(src)
if err != nil {
return
}
err = os.Chmod(dst, si.Mode())
if err != nil {
return
}
return
}
func CopyDir(src string, dst string) (err error) {
src = filepath.Clean(src)
dst = filepath.Clean(dst)
si, err := os.Stat(src)
if err != nil {
return err
}
if !si.IsDir() {
return fmt.Errorf("source is not a directory")
}
_, err = os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
return
}
if err == nil {
return fmt.Errorf("destination already exists")
}
err = os.MkdirAll(dst, si.Mode())
if err != nil {
return
}
entries, err := ioutil.ReadDir(src)
if err != nil {
return
}
for _, entry := range entries {
srcPath := filepath.Join(src, entry.Name())
dstPath := filepath.Join(dst, entry.Name())
if entry.IsDir() {
err = CopyDir(srcPath, dstPath)
if err != nil {
return
}
} else {
// Skip symlinks.
if entry.Mode()&os.ModeSymlink != 0 {
continue
}
err = CopyFile(srcPath, dstPath)
if err != nil {
return
}
}
}
return
}

View File

@ -0,0 +1,34 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"os"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func DiskUsage(dirpath string) float64 {
var size int64
dir, err := os.Open(dirpath)
if err != nil {
log.Errorf("DiskUsage() error: %v", err)
return 0
}
defer dir.Close()
files, err := dir.Readdir(-1)
if err != nil {
log.Errorf("DiskUsage() error: %v", err)
return 0
}
for _, file := range files {
size += file.Size()
}
return float64(size) * 1e-6
}

34
internal/util/fstat.go Normal file
View File

@ -0,0 +1,34 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"errors"
"os"
"github.com/ClusterCockpit/cc-backend/pkg/log"
)
func CheckFileExists(filePath string) bool {
_, err := os.Stat(filePath)
return !errors.Is(err, os.ErrNotExist)
}
func GetFilesize(filePath string) int64 {
fileInfo, err := os.Stat(filePath)
if err != nil {
log.Errorf("Error on Stat %s: %v", filePath, err)
}
return fileInfo.Size()
}
func GetFilecount(path string) int {
files, err := os.ReadDir(path)
if err != nil {
log.Errorf("Error on ReadDir %s: %v", path, err)
}
return len(files)
}

View File

@ -0,0 +1,21 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import "golang.org/x/exp/constraints"
func Min[T constraints.Ordered](a, b T) T {
if a < b {
return a
}
return b
}
func Max[T constraints.Ordered](a, b T) T {
if a > b {
return a
}
return b
}

View File

@ -18,6 +18,10 @@ const Version uint64 = 1
type ArchiveBackend interface { type ArchiveBackend interface {
Init(rawConfig json.RawMessage) (uint64, error) Init(rawConfig json.RawMessage) (uint64, error)
Info()
Exists(job *schema.Job) bool
LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error)
LoadJobData(job *schema.Job) (schema.JobData, error) LoadJobData(job *schema.Job) (schema.JobData, error)
@ -30,6 +34,14 @@ type ArchiveBackend interface {
GetClusters() []string GetClusters() []string
CleanUp(jobs []*schema.Job)
Move(jobs []*schema.Job, path string)
Clean(before int64, after int64)
Compress(jobs []*schema.Job)
Iter(loadMetricData bool) <-chan JobContainer Iter(loadMetricData bool) <-chan JobContainer
} }
@ -44,21 +56,23 @@ var useArchive bool
func Init(rawConfig json.RawMessage, disableArchive bool) error { func Init(rawConfig json.RawMessage, disableArchive bool) error {
useArchive = !disableArchive useArchive = !disableArchive
var kind struct {
var cfg struct {
Kind string `json:"kind"` Kind string `json:"kind"`
} }
if err := json.Unmarshal(rawConfig, &kind); err != nil {
if err := json.Unmarshal(rawConfig, &cfg); err != nil {
log.Warn("Error while unmarshaling raw config json") log.Warn("Error while unmarshaling raw config json")
return err return err
} }
switch kind.Kind { switch cfg.Kind {
case "file": case "file":
ar = &FsArchive{} ar = &FsArchive{}
// case "s3": // case "s3":
// ar = &S3Archive{} // ar = &S3Archive{}
default: default:
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", kind.Kind) return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", cfg.Kind)
} }
version, err := ar.Init(rawConfig) version, err := ar.Init(rawConfig)
@ -67,6 +81,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
return err return err
} }
log.Infof("Load archive version %d", version) log.Infof("Load archive version %d", version)
return initClusterConfig() return initClusterConfig()
} }

View File

@ -0,0 +1,69 @@
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive_test
import (
"encoding/json"
"fmt"
"path/filepath"
"testing"
"time"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
var jobs []*schema.Job
func setup(t *testing.T) archive.ArchiveBackend {
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
util.CopyDir("./testdata/archive/", jobarchive)
archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", jobarchive)
if err := archive.Init(json.RawMessage(archiveCfg), false); err != nil {
t.Fatal(err)
}
jobs = make([]*schema.Job, 2)
jobs[0] = &schema.Job{}
jobs[0].JobID = 1403244
jobs[0].Cluster = "emmy"
jobs[0].StartTime = time.Unix(1608923076, 0)
jobs[1] = &schema.Job{}
jobs[0].JobID = 1404397
jobs[0].Cluster = "emmy"
jobs[0].StartTime = time.Unix(1609300556, 0)
return archive.GetHandle()
}
func TestCleanUp(t *testing.T) {
a := setup(t)
if !a.Exists(jobs[0]) {
t.Error("Job does not exist")
}
a.CleanUp(jobs)
if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
t.Error("Jobs still exist")
}
}
// func TestCompress(t *testing.T) {
// a := setup(t)
// if !a.Exists(jobs[0]) {
// t.Error("Job does not exist")
// }
//
// a.Compress(jobs)
//
// if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
// t.Error("Jobs still exist")
// }
// }

View File

@ -11,14 +11,17 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"math"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"text/tabwriter"
"time" "time"
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/santhosh-tekuri/jsonschema/v5" "github.com/santhosh-tekuri/jsonschema/v5"
@ -33,9 +36,17 @@ type FsArchive struct {
clusters []string clusters []string
} }
func checkFileExists(filePath string) bool { func getDirectory(
_, err := os.Stat(filePath) job *schema.Job,
return !errors.Is(err, os.ErrNotExist) rootPath string,
) string {
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
return filepath.Join(
rootPath,
job.Cluster,
lvl1, lvl2,
strconv.FormatInt(job.StartTime.Unix(), 10))
} }
func getPath( func getPath(
@ -43,12 +54,8 @@ func getPath(
rootPath string, rootPath string,
file string) string { file string) string {
lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000)
return filepath.Join( return filepath.Join(
rootPath, getDirectory(job, rootPath), file)
job.Cluster,
lvl1, lvl2,
strconv.FormatInt(job.StartTime.Unix(), 10), file)
} }
func loadJobMeta(filename string) (*schema.JobMeta, error) { func loadJobMeta(filename string) (*schema.JobMeta, error) {
@ -74,6 +81,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
log.Errorf("fsBackend LoadJobData()- %v", err) log.Errorf("fsBackend LoadJobData()- %v", err)
return nil, err return nil, err
} }
defer f.Close()
if isCompressed { if isCompressed {
r, err := gzip.NewReader(f) r, err := gzip.NewReader(f)
@ -91,7 +99,6 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
return DecodeJobData(r, filename) return DecodeJobData(r, filename)
} else { } else {
defer f.Close()
if config.Keys.Validate { if config.Keys.Validate {
if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil { if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil {
return schema.JobData{}, fmt.Errorf("validate job data: %v", err) return schema.JobData{}, fmt.Errorf("validate job data: %v", err)
@ -147,10 +154,205 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
return version, nil return version, nil
} }
type clusterInfo struct {
numJobs int
dateFirst int64
dateLast int64
diskSize float64
}
func (fsa *FsArchive) Info() {
fmt.Printf("Job archive %s\n", fsa.path)
clusters, err := os.ReadDir(fsa.path)
if err != nil {
log.Fatalf("Reading clusters failed: %s", err.Error())
}
ci := make(map[string]*clusterInfo)
for _, cluster := range clusters {
if !cluster.IsDir() {
continue
}
cc := cluster.Name()
ci[cc] = &clusterInfo{dateFirst: time.Now().Unix()}
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
if !lvl1Dir.IsDir() {
continue
}
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
ci[cc].numJobs++
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
if err != nil {
log.Fatalf("Cannot parse starttime: %s", err.Error())
}
ci[cc].dateFirst = util.Min(ci[cc].dateFirst, startTime)
ci[cc].dateLast = util.Max(ci[cc].dateLast, startTime)
ci[cc].diskSize += util.DiskUsage(filepath.Join(dirpath, startTimeDir.Name()))
}
}
}
}
}
cit := clusterInfo{dateFirst: time.Now().Unix()}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', tabwriter.Debug)
fmt.Fprintln(w, "cluster\t#jobs\tfrom\tto\tdu (MB)")
for cluster, clusterInfo := range ci {
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%.2f\n", cluster,
clusterInfo.numJobs,
time.Unix(clusterInfo.dateFirst, 0),
time.Unix(clusterInfo.dateLast, 0),
clusterInfo.diskSize)
cit.numJobs += clusterInfo.numJobs
cit.dateFirst = util.Min(cit.dateFirst, clusterInfo.dateFirst)
cit.dateLast = util.Max(cit.dateLast, clusterInfo.dateLast)
cit.diskSize += clusterInfo.diskSize
}
fmt.Fprintf(w, "TOTAL\t%d\t%s\t%s\t%.2f\n",
cit.numJobs, time.Unix(cit.dateFirst, 0), time.Unix(cit.dateLast, 0), cit.diskSize)
w.Flush()
}
func (fsa *FsArchive) Exists(job *schema.Job) bool {
dir := getDirectory(job, fsa.path)
_, err := os.Stat(dir)
return !errors.Is(err, os.ErrNotExist)
}
func (fsa *FsArchive) Clean(before int64, after int64) {
if after == 0 {
after = math.MaxInt64
}
clusters, err := os.ReadDir(fsa.path)
if err != nil {
log.Fatalf("Reading clusters failed: %s", err.Error())
}
for _, cluster := range clusters {
if !cluster.IsDir() {
continue
}
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
if !lvl1Dir.IsDir() {
continue
}
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
if err != nil {
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
if err != nil {
log.Fatalf("Cannot parse starttime: %s", err.Error())
}
if startTime < before || startTime > after {
if err := os.RemoveAll(filepath.Join(dirpath, startTimeDir.Name())); err != nil {
log.Errorf("JobArchive Cleanup() error: %v", err)
}
}
}
}
if util.GetFilecount(dirpath) == 0 {
if err := os.Remove(dirpath); err != nil {
log.Errorf("JobArchive Clean() error: %v", err)
}
}
}
}
}
}
func (fsa *FsArchive) Move(jobs []*schema.Job, path string) {
for _, job := range jobs {
source := getDirectory(job, fsa.path)
target := getDirectory(job, path)
if err := os.MkdirAll(filepath.Clean(filepath.Join(target, "..")), 0777); err != nil {
log.Errorf("JobArchive Move MkDir error: %v", err)
}
if err := os.Rename(source, target); err != nil {
log.Errorf("JobArchive Move() error: %v", err)
}
parent := filepath.Clean(filepath.Join(source, ".."))
if util.GetFilecount(parent) == 0 {
if err := os.Remove(parent); err != nil {
log.Errorf("JobArchive Move() error: %v", err)
}
}
}
}
func (fsa *FsArchive) CleanUp(jobs []*schema.Job) {
for _, job := range jobs {
dir := getDirectory(job, fsa.path)
if err := os.RemoveAll(dir); err != nil {
log.Errorf("JobArchive Cleanup() error: %v", err)
}
parent := filepath.Clean(filepath.Join(dir, ".."))
if util.GetFilecount(parent) == 0 {
if err := os.Remove(parent); err != nil {
log.Errorf("JobArchive Cleanup() error: %v", err)
}
}
}
}
func (fsa *FsArchive) Compress(jobs []*schema.Job) {
for _, job := range jobs {
fileIn := getPath(job, fsa.path, "data.json")
if !util.CheckFileExists(fileIn) && util.GetFilesize(fileIn) > 2000 {
util.CompressFile(fileIn, getPath(job, fsa.path, "data.json.gz"))
}
}
}
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) { func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
var isCompressed bool = true var isCompressed bool = true
filename := getPath(job, fsa.path, "data.json.gz") filename := getPath(job, fsa.path, "data.json.gz")
if !checkFileExists(filename) {
if !util.CheckFileExists(filename) {
filename = getPath(job, fsa.path, "data.json") filename = getPath(job, fsa.path, "data.json")
isCompressed = false isCompressed = false
} }
@ -159,7 +361,6 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
} }
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) { func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
filename := getPath(job, fsa.path, "meta.json") filename := getPath(job, fsa.path, "meta.json")
return loadJobMeta(filename) return loadJobMeta(filename)
} }
@ -226,7 +427,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
var isCompressed bool = true var isCompressed bool = true
filename := filepath.Join(dirpath, startTimeDir.Name(), "data.json.gz") filename := filepath.Join(dirpath, startTimeDir.Name(), "data.json.gz")
if !checkFileExists(filename) { if !util.CheckFileExists(filename) {
filename = filepath.Join(dirpath, startTimeDir.Name(), "data.json") filename = filepath.Join(dirpath, startTimeDir.Name(), "data.json")
isCompressed = false isCompressed = false
} }

View File

@ -7,20 +7,17 @@ package archive
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"path/filepath"
"testing" "testing"
"time" "time"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
) )
func init() {
log.Init("info", true)
}
func TestInitEmptyPath(t *testing.T) { func TestInitEmptyPath(t *testing.T) {
var fsa FsArchive var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"kind\":\"../../test/archive\"}")) _, err := fsa.Init(json.RawMessage("{\"kind\":\"testdata/archive\"}"))
if err == nil { if err == nil {
t.Fatal(err) t.Fatal(err)
} }
@ -28,14 +25,14 @@ func TestInitEmptyPath(t *testing.T) {
func TestInitNoJson(t *testing.T) { func TestInitNoJson(t *testing.T) {
var fsa FsArchive var fsa FsArchive
_, err := fsa.Init(json.RawMessage("\"path\":\"../../test/archive\"}")) _, err := fsa.Init(json.RawMessage("\"path\":\"testdata/archive\"}"))
if err == nil { if err == nil {
t.Fatal(err) t.Fatal(err)
} }
} }
func TestInitNotExists(t *testing.T) { func TestInitNotExists(t *testing.T) {
var fsa FsArchive var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/job-archive\"}")) _, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/job-archive\"}"))
if err == nil { if err == nil {
t.Fatal(err) t.Fatal(err)
} }
@ -43,11 +40,11 @@ func TestInitNotExists(t *testing.T) {
func TestInit(t *testing.T) { func TestInit(t *testing.T) {
var fsa FsArchive var fsa FsArchive
version, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}")) version, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if fsa.path != "../../test/archive" { if fsa.path != "testdata/archive" {
t.Fail() t.Fail()
} }
if version != 1 { if version != 1 {
@ -60,12 +57,12 @@ func TestInit(t *testing.T) {
func TestLoadJobMetaInternal(t *testing.T) { func TestLoadJobMetaInternal(t *testing.T) {
var fsa FsArchive var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}")) _, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
job, err := loadJobMeta("../../test/archive/emmy/1404/397/1609300556/meta.json") job, err := loadJobMeta("testdata/archive/emmy/1404/397/1609300556/meta.json")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -83,7 +80,7 @@ func TestLoadJobMetaInternal(t *testing.T) {
func TestLoadJobMeta(t *testing.T) { func TestLoadJobMeta(t *testing.T) {
var fsa FsArchive var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}")) _, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -111,7 +108,7 @@ func TestLoadJobMeta(t *testing.T) {
func TestLoadJobData(t *testing.T) { func TestLoadJobData(t *testing.T) {
var fsa FsArchive var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}")) _, err := fsa.Init(json.RawMessage("{\"path\": \"testdata/archive\"}"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -126,8 +123,8 @@ func TestLoadJobData(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
for name, scopes := range data { for _, scopes := range data {
fmt.Printf("Metric name: %s\n", name) // fmt.Printf("Metric name: %s\n", name)
if _, exists := scopes[schema.MetricScopeNode]; !exists { if _, exists := scopes[schema.MetricScopeNode]; !exists {
t.Fail() t.Fail()
@ -135,9 +132,54 @@ func TestLoadJobData(t *testing.T) {
} }
} }
func BenchmarkLoadJobData(b *testing.B) {
tmpdir := b.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
util.CopyDir("./testdata/archive/", jobarchive)
archiveCfg := fmt.Sprintf("{\"path\": \"%s\"}", jobarchive)
var fsa FsArchive
fsa.Init(json.RawMessage(archiveCfg))
jobIn := schema.Job{BaseJob: schema.JobDefaults}
jobIn.StartTime = time.Unix(1608923076, 0)
jobIn.JobID = 1403244
jobIn.Cluster = "emmy"
util.UncompressFile(filepath.Join(jobarchive, "emmy/1403/244/1608923076/data.json.gz"),
filepath.Join(jobarchive, "emmy/1403/244/1608923076/data.json"))
b.ResetTimer()
for i := 0; i < b.N; i++ {
fsa.LoadJobData(&jobIn)
}
}
func BenchmarkLoadJobDataCompressed(b *testing.B) {
tmpdir := b.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
util.CopyDir("./testdata/archive/", jobarchive)
archiveCfg := fmt.Sprintf("{\"path\": \"%s\"}", jobarchive)
var fsa FsArchive
fsa.Init(json.RawMessage(archiveCfg))
jobIn := schema.Job{BaseJob: schema.JobDefaults}
jobIn.StartTime = time.Unix(1608923076, 0)
jobIn.JobID = 1403244
jobIn.Cluster = "emmy"
b.ResetTimer()
for i := 0; i < b.N; i++ {
fsa.LoadJobData(&jobIn)
}
}
func TestLoadCluster(t *testing.T) { func TestLoadCluster(t *testing.T) {
var fsa FsArchive var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}")) _, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -154,7 +196,7 @@ func TestLoadCluster(t *testing.T) {
func TestIter(t *testing.T) { func TestIter(t *testing.T) {
var fsa FsArchive var fsa FsArchive
_, err := fsa.Init(json.RawMessage("{\"path\":\"../../test/archive\"}")) _, err := fsa.Init(json.RawMessage("{\"path\":\"testdata/archive\"}"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -34,11 +34,11 @@ var (
) )
var ( var (
DebugLog *log.Logger DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
InfoLog *log.Logger InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
WarnLog *log.Logger WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
ErrLog *log.Logger ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
CritLog *log.Logger CritLog *log.Logger = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
) )
/* CONFIG */ /* CONFIG */
@ -70,12 +70,6 @@ func Init(lvl string, logdate bool) {
WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile) WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile) ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
CritLog = log.New(CritWriter, CritPrefix, log.Llongfile) CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
} else {
DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
} }
} }

View File

@ -57,6 +57,13 @@ type ClusterConfig struct {
MetricDataRepository json.RawMessage `json:"metricDataRepository"` MetricDataRepository json.RawMessage `json:"metricDataRepository"`
} }
type Retention struct {
Age int `json:"age"`
IncludeDB bool `json:"includeDB"`
Policy string `json:"policy"`
Location string `json:"location"`
}
// Format of the configuration (file). See below for the defaults. // Format of the configuration (file). See below for the defaults.
type ProgramConfig struct { type ProgramConfig struct {
// Address where the http (or https) server will listen on (for example: 'localhost:80'). // Address where the http (or https) server will listen on (for example: 'localhost:80').

View File

@ -11,8 +11,9 @@ import (
"time" "time"
) )
// Non-Swaggered Comment: BaseJob // BaseJob is the common part of the job metadata structs
// Non-Swaggered Comment: Common subset of Job and JobMeta. Use one of those, not this type directly. //
// Common subset of Job and JobMeta. Use one of those, not this type directly.
type BaseJob struct { type BaseJob struct {
// The unique identifier of a job // The unique identifier of a job
@ -21,17 +22,17 @@ type BaseJob struct {
Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project
Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster
SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster
Partition *string `json:"partition,omitempty" db:"partition" example:"main"` // The Slurm partition to which the job was submitted Partition string `json:"partition,omitempty" db:"partition" example:"main"` // The Slurm partition to which the job was submitted
ArrayJobId *int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` // The unique identifier of an array job ArrayJobId int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` // The unique identifier of an array job
NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0) NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0)
NumHWThreads *int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) NumHWThreads int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0)
NumAcc *int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) NumAcc int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0)
Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user
MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull
SMT *int32 `json:"smt,omitempty" db:"smt" example:"4"` // SMT threads used by job SMT int32 `json:"smt,omitempty" db:"smt" example:"4"` // SMT threads used by job
State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job
Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0) Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0)
Walltime *int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) Walltime int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0)
Tags []*Tag `json:"tags,omitempty"` // List of tags Tags []*Tag `json:"tags,omitempty"` // List of tags
RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes] RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes]
Resources []*Resource `json:"resources"` // Resources used by job Resources []*Resource `json:"resources"` // Resources used by job
@ -40,9 +41,10 @@ type BaseJob struct {
ConcurrentJobs JobLinkResultList `json:"concurrentJobs"` ConcurrentJobs JobLinkResultList `json:"concurrentJobs"`
} }
// Non-Swaggered Comment: Job // Job struct type
// Non-Swaggered Comment: This type is used as the GraphQL interface and using sqlx as a table row. //
// This type is used as the GraphQL interface and using sqlx as a table row.
//
// Job model // Job model
// @Description Information of a HPC job. // @Description Information of a HPC job.
type Job struct { type Job struct {
@ -61,6 +63,17 @@ type Job struct {
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64 FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
} }
// JobMeta struct type
//
// When reading from the database or sending data via GraphQL, the start time
// can be in the much more convenient time.Time type. In the `meta.json`
// files, the start time is encoded as a unix epoch timestamp. This is why
// there is this struct, which contains all fields from the regular job
// struct, but "overwrites" the StartTime field with one of type int64. ID
// *int64 `json:"id,omitempty"` >> never used in the job-archive, only
// available via REST-API
//
type JobLink struct { type JobLink struct {
ID int64 `json:"id"` ID int64 `json:"id"`
JobID int64 `json:"jobId"` JobID int64 `json:"jobId"`
@ -71,13 +84,6 @@ type JobLinkResultList struct {
Count int `json:"count"` Count int `json:"count"`
} }
// Non-Swaggered Comment: JobMeta
// Non-Swaggered Comment: When reading from the database or sending data via GraphQL, the start time can be in the much more
// Non-Swaggered Comment: convenient time.Time type. In the `meta.json` files, the start time is encoded as a unix epoch timestamp.
// Non-Swaggered Comment: This is why there is this struct, which contains all fields from the regular job struct, but "overwrites"
// Non-Swaggered Comment: the StartTime field with one of type int64.
// Non-Swaggered Comment: ID *int64 `json:"id,omitempty"` >> never used in the job-archive, only available via REST-API
// JobMeta model // JobMeta model
// @Description Meta data information of a HPC job. // @Description Meta data information of a HPC job.
type JobMeta struct { type JobMeta struct {
@ -85,7 +91,7 @@ type JobMeta struct {
ID *int64 `json:"id,omitempty"` ID *int64 `json:"id,omitempty"`
BaseJob BaseJob
StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0) StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0)
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job Statistics map[string]JobStatistics `json:"statistics"` // Metric statistics of job
} }
const ( const (
@ -101,8 +107,8 @@ var JobDefaults BaseJob = BaseJob{
} }
type Unit struct { type Unit struct {
Base string `json:"base"` Base string `json:"base"`
Prefix *string `json:"prefix,omitempty"` Prefix string `json:"prefix,omitempty"`
} }
// JobStatistics model // JobStatistics model

View File

@ -3,7 +3,7 @@
"$id": "embedfs://config.schema.json", "$id": "embedfs://config.schema.json",
"title": "cc-backend configuration file schema", "title": "cc-backend configuration file schema",
"type": "object", "type": "object",
"properties":{ "properties": {
"addr": { "addr": {
"description": "Address where the http (or https) server will listen on (for example: 'localhost:80').", "description": "Address where the http (or https) server will listen on (for example: 'localhost:80').",
"type": "string" "type": "string"
@ -41,8 +41,59 @@
"type": "string" "type": "string"
}, },
"job-archive": { "job-archive": {
"description": "Path to the job-archive.", "description": "Configuration keys for job-archive",
"type": "string" "type": "object",
"properties": {
"kind": {
"description": "Backend type for job-archive",
"type": "string",
"enum": [
"file",
"s3"
]
},
"path": {
"description": "Path to job archive for file backend",
"type": "string"
},
"compression": {
"description": "Setup automatic compression for jobs older than number of days",
"type": "integer"
},
"retention": {
"description": "Configuration keys for retention",
"type": "object",
"properties": {
"policy": {
"description": "Retention policy",
"type": "string",
"enum": [
"none",
"delete",
"move"
]
},
"includeDB": {
"description": "Also remove jobs from database",
"type": "boolean"
},
"age": {
"description": "Act on jobs with startTime older than age (in days)",
"type": "integer"
},
"location": {
"description": "The target directory for retention. Only applicable for retention move.",
"type": "string"
}
},
"required": [
"policy"
]
}
},
"required": [
"kind"
]
}, },
"disable-archive": { "disable-archive": {
"description": "Keep all metric data in the metric data repositories, do not write to the job-archive.", "description": "Keep all metric data in the metric data repositories, do not write to the job-archive.",

View File

@ -1,175 +0,0 @@
# cc-units - A unit system for ClusterCockpit
When working with metrics, the problem comes up that they may use different unit name but have the same unit in fact.
There are a lot of real world examples like 'kB' and 'Kbyte'. In [cc-metric-collector](https://github.com/ClusterCockpit/cc-metric-collector), the collectors read data from different sources which may use different units or the programmer specifies a unit for a metric by hand. The cc-units system is not comparable with the SI unit system. If you are looking for a package for the SI units, see [here](https://pkg.go.dev/github.com/gurre/si).
In order to enable unit comparison and conversion, the ccUnits package provides some helpers:
```go
NewUnit(unit string) Unit // create a new unit from some string like 'GHz', 'Mbyte' or 'kevents/s'
func GetUnitUnitFactor(in Unit, out Unit) (func(value float64) float64, error) // Get conversion function between two units
func GetPrefixFactor(in Prefix, out Prefix) func(value float64) float64 // Get conversion function between two prefixes
func GetUnitPrefixFactor(in Unit, out Prefix) (func(value float64) float64, Unit) // Get conversion function for prefix changes and the new unit for further use
type Unit interface {
Valid() bool
String() string
Short() string
AddUnitDenominator(div Measure)
}
```
In order to get the "normalized" string unit back or test for validity, you can use:
```go
u := NewUnit("MB")
fmt.Println(u.Valid()) // true
fmt.Printf("Long string %q", u.String()) // MegaBytes
fmt.Printf("Short string %q", u.Short()) // MBytes
v := NewUnit("foo")
fmt.Println(v.Valid()) // false
```
If you have two units or other components and need the conversion function:
```go
// Get conversion functions for 'kB' to 'MBytes'
u1 := NewUnit("kB")
u2 := NewUnit("MBytes")
convFunc, err := GetUnitUnitFactor(u1, u2) // Returns an error if the units have different measures
if err == nil {
v2 := convFunc(v1)
fmt.Printf("%f %s\n", v2, u2.Short())
}
// Get conversion function for 'kB' -> 'G' prefix.
// Returns the function and the new unit 'GBytes'
p1 := NewPrefix("G")
convFunc, u_p1 := GetUnitPrefixFactor(u1, p1)
// or
// convFunc, u_p1 := GetUnitPrefixStringFactor(u1, "G")
if convFunc != nil {
v2 := convFunc(v1)
fmt.Printf("%f %s\n", v2, u_p1.Short())
}
// Get conversion function for two prefixes: 'G' -> 'T'
p2 := NewPrefix("T")
convFunc = GetPrefixPrefixFactor(p1, p2)
if convFunc != nil {
v2 := convFunc(v1)
fmt.Printf("%f %s -> %f %s\n", v1, p1.Prefix(), v2, p2.Prefix())
}
```
(In the ClusterCockpit ecosystem the separation between values and units if useful since they are commonly not stored as a single entity but the value is a field in the CCMetric while unit is a tag or a meta information).
If you have a metric and want the derivation to a bandwidth or events per second, you can use the original unit:
```go
in_unit, err := metric.GetMeta("unit")
if err == nil {
value, ok := metric.GetField("value")
if ok {
out_unit = NewUnit(in_unit)
out_unit.AddUnitDenominator("seconds")
seconds := timeDiff.Seconds()
y, err := lp.New(metric.Name()+"_bw",
metric.Tags(),
metric.Meta(),
map[string]interface{"value": value/seconds},
metric.Time())
if err == nil {
y.AddMeta("unit", out_unit.Short())
}
}
}
```
## Special unit detection
Some used measures like Bytes and Flops are non-dividable. Consequently there prefixes like Milli, Micro and Nano are not useful. This is quite handy since a unit `mb` for `MBytes` is not uncommon but would by default be parsed as "MilliBytes".
Special parsing rules for the following measures: iff `prefix==Milli`, use `prefix==Mega`
- `Bytes`
- `Flops`
- `Packets`
- `Events`
- `Cycles`
- `Requests`
This means the prefixes `Micro` (like `ubytes`) and `Nano` like (`nflops/sec`) are not allowed and return an invalid unit. But you can specify `mflops` and `mb`.
Prefixes for `%` or `percent` are ignored.
## Supported prefixes
```go
const (
Base Prefix = 1
Exa = 1e18
Peta = 1e15
Tera = 1e12
Giga = 1e9
Mega = 1e6
Kilo = 1e3
Milli = 1e-3
Micro = 1e-6
Nano = 1e-9
Kibi = 1024
Mebi = 1024 * 1024
Gibi = 1024 * 1024 * 1024
Tebi = 1024 * 1024 * 1024 * 1024
)
```
The prefixes are detected using a regular expression `^([kKmMgGtTpP]?[i]?)(.*)` that splits the prefix from the measure. You probably don't need to deal with the prefixes in the code.
## Supported measures
```go
const (
None Measure = iota
Bytes
Flops
Percentage
TemperatureC
TemperatureF
Rotation
Hertz
Time
Watt
Joule
Cycles
Requests
Packets
Events
)
```
There a regular expression for each of the measures like `^([bB][yY]?[tT]?[eE]?[sS]?)` for the `Bytes` measure.
## New units
If the selected units are not suitable for your metric, feel free to send a PR.
### New prefix
For a new prefix, add it to the big `const` in `ccUnitPrefix.go` and adjust the prefix-unit-splitting regular expression. Afterwards, you have to add cases to the three functions `String()`, `Prefix()` and `NewPrefix()`. `NewPrefix()` contains the parser (`k` or `K` -> `Kilo`). The other one are used for output. `String()` outputs a longer version of the prefix (`Kilo`), while `Prefix()` returns only the short notation (`K`).
### New measure
Adding new prefixes is probably rare but adding a new measure is a more common task. At first, add it to the big `const` in `ccUnitMeasure.go`. Moreover, create a regular expression matching the measure (and pre-compile it like the others). Add the expression matching to `NewMeasure()`. The `String()` and `Short()` functions return descriptive strings for the measure in long form (like `Hertz`) and short form (like `Hz`).
If there are special conversation rules between measures and you want to convert one measure to another, like temperatures in Celsius to Fahrenheit, a special case in `GetUnitPrefixFactor()` is required.
### Special parsing rules
The two parsers for prefix and measure are called under the hood by `NewUnit()` and there might some special rules apply. Like in the above section about 'special unit detection', special rules for your new measure might be required. Currently there are two special cases:
- Measures that are non-dividable like Flops, Bytes, Events, ... cannot use `Milli`, `Micro` and `Nano`. The prefix `m` is forced to `M` for these measures
- If the prefix is `p`/`P` (`Peta`) or `e`/`E` (`Exa`) and the measure is not detectable, it retries detection with the prefix. So first round it tries, for example, prefix `p` and measure `ackets` which fails, so it retries the detection with measure `packets` and `<empty>` prefix (resolves to `Base` prefix).
## Limitations
The `ccUnits` package is a simple implemtation of a unit system and comes with some limitations:
- The unit denominator (like `s` in `Mbyte/s`) can only have the `Base` prefix, you cannot specify `Byte/ms` for "Bytes per milli second".

View File

@ -1,134 +0,0 @@
package units
import "regexp"
type Measure int
const (
InvalidMeasure Measure = iota
Bytes
Flops
Percentage
TemperatureC
TemperatureF
Rotation
Frequency
Time
Watt
Joule
Cycles
Requests
Packets
Events
)
type MeasureData struct {
Long string
Short string
Regex string
}
// Different names and regex used for input and output
var InvalidMeasureLong string = "Invalid"
var InvalidMeasureShort string = "inval"
var MeasuresMap map[Measure]MeasureData = map[Measure]MeasureData{
Bytes: {
Long: "byte",
Short: "B",
Regex: "^([bB][yY]?[tT]?[eE]?[sS]?)",
},
Flops: {
Long: "Flops",
Short: "F",
Regex: "^([fF][lL]?[oO]?[pP]?[sS]?)",
},
Percentage: {
Long: "Percent",
Short: "%",
Regex: "^(%|[pP]ercent)",
},
TemperatureC: {
Long: "DegreeC",
Short: "degC",
Regex: "^(deg[Cc]|°[cC])",
},
TemperatureF: {
Long: "DegreeF",
Short: "degF",
Regex: "^(deg[fF]|°[fF])",
},
Rotation: {
Long: "RPM",
Short: "RPM",
Regex: "^([rR][pP][mM])",
},
Frequency: {
Long: "Hertz",
Short: "Hz",
Regex: "^([hH][eE]?[rR]?[tT]?[zZ])",
},
Time: {
Long: "Seconds",
Short: "s",
Regex: "^([sS][eE]?[cC]?[oO]?[nN]?[dD]?[sS]?)",
},
Cycles: {
Long: "Cycles",
Short: "cyc",
Regex: "^([cC][yY][cC]?[lL]?[eE]?[sS]?)",
},
Watt: {
Long: "Watts",
Short: "W",
Regex: "^([wW][aA]?[tT]?[tT]?[sS]?)",
},
Joule: {
Long: "Joules",
Short: "J",
Regex: "^([jJ][oO]?[uU]?[lL]?[eE]?[sS]?)",
},
Requests: {
Long: "Requests",
Short: "requests",
Regex: "^([rR][eE][qQ][uU]?[eE]?[sS]?[tT]?[sS]?)",
},
Packets: {
Long: "Packets",
Short: "packets",
Regex: "^([pP][aA]?[cC]?[kK][eE]?[tT][sS]?)",
},
Events: {
Long: "Events",
Short: "events",
Regex: "^([eE][vV]?[eE]?[nN][tT][sS]?)",
},
}
// String returns the long string for the measure like 'Percent' or 'Seconds'
func (m *Measure) String() string {
if data, ok := MeasuresMap[*m]; ok {
return data.Long
}
return InvalidMeasureLong
}
// Short returns the short string for the measure like 'B' (Bytes), 's' (Time) or 'W' (Watt). Is is recommened to use Short() over String().
func (m *Measure) Short() string {
if data, ok := MeasuresMap[*m]; ok {
return data.Short
}
return InvalidMeasureShort
}
// NewMeasure creates a new measure out of a string representing a measure like 'Bytes', 'Flops' and 'precent'.
// It uses regular expressions for matching.
func NewMeasure(unit string) Measure {
for m, data := range MeasuresMap {
regex := regexp.MustCompile(data.Regex)
match := regex.FindStringSubmatch(unit)
if match != nil {
return m
}
}
return InvalidMeasure
}

View File

@ -1,192 +0,0 @@
package units
import (
"math"
"regexp"
)
type Prefix float64
const (
InvalidPrefix Prefix = iota
Base = 1
Yotta = 1e24
Zetta = 1e21
Exa = 1e18
Peta = 1e15
Tera = 1e12
Giga = 1e9
Mega = 1e6
Kilo = 1e3
Milli = 1e-3
Micro = 1e-6
Nano = 1e-9
Kibi = 1024
Mebi = 1024 * 1024
Gibi = 1024 * 1024 * 1024
Tebi = 1024 * 1024 * 1024 * 1024
Pebi = 1024 * 1024 * 1024 * 1024 * 1024
Exbi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024
Zebi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
Yobi = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
)
const PrefixUnitSplitRegexStr = `^([kKmMgGtTpPeEzZyY]?[i]?)(.*)`
var prefixUnitSplitRegex = regexp.MustCompile(PrefixUnitSplitRegexStr)
type PrefixData struct {
Long string
Short string
Regex string
}
// Different names and regex used for input and output
var InvalidPrefixLong string = "Invalid"
var InvalidPrefixShort string = "inval"
var PrefixDataMap map[Prefix]PrefixData = map[Prefix]PrefixData{
Base: {
Long: "",
Short: "",
Regex: "^$",
},
Kilo: {
Long: "Kilo",
Short: "K",
Regex: "^[kK]$",
},
Mega: {
Long: "Mega",
Short: "M",
Regex: "^[M]$",
},
Giga: {
Long: "Giga",
Short: "G",
Regex: "^[gG]$",
},
Tera: {
Long: "Tera",
Short: "T",
Regex: "^[tT]$",
},
Peta: {
Long: "Peta",
Short: "P",
Regex: "^[pP]$",
},
Exa: {
Long: "Exa",
Short: "E",
Regex: "^[eE]$",
},
Zetta: {
Long: "Zetta",
Short: "Z",
Regex: "^[zZ]$",
},
Yotta: {
Long: "Yotta",
Short: "Y",
Regex: "^[yY]$",
},
Milli: {
Long: "Milli",
Short: "m",
Regex: "^[m]$",
},
Micro: {
Long: "Micro",
Short: "u",
Regex: "^[u]$",
},
Nano: {
Long: "Nano",
Short: "n",
Regex: "^[n]$",
},
Kibi: {
Long: "Kibi",
Short: "Ki",
Regex: "^[kK][i]$",
},
Mebi: {
Long: "Mebi",
Short: "Mi",
Regex: "^[M][i]$",
},
Gibi: {
Long: "Gibi",
Short: "Gi",
Regex: "^[gG][i]$",
},
Tebi: {
Long: "Tebi",
Short: "Ti",
Regex: "^[tT][i]$",
},
Pebi: {
Long: "Pebi",
Short: "Pi",
Regex: "^[pP][i]$",
},
Exbi: {
Long: "Exbi",
Short: "Ei",
Regex: "^[eE][i]$",
},
Zebi: {
Long: "Zebi",
Short: "Zi",
Regex: "^[zZ][i]$",
},
Yobi: {
Long: "Yobi",
Short: "Yi",
Regex: "^[yY][i]$",
},
}
// String returns the long string for the prefix like 'Kilo' or 'Mega'
func (p *Prefix) String() string {
if data, ok := PrefixDataMap[*p]; ok {
return data.Long
}
return InvalidMeasureLong
}
// Prefix returns the short string for the prefix like 'K', 'M' or 'G'. Is is recommened to use Prefix() over String().
func (p *Prefix) Prefix() string {
if data, ok := PrefixDataMap[*p]; ok {
return data.Short
}
return InvalidMeasureShort
}
// NewPrefix creates a new prefix out of a string representing a unit like 'k', 'K', 'M' or 'G'.
func NewPrefix(prefix string) Prefix {
for p, data := range PrefixDataMap {
regex := regexp.MustCompile(data.Regex)
match := regex.FindStringSubmatch(prefix)
if match != nil {
return p
}
}
return InvalidPrefix
}
func getExponent(p float64) int {
count := 0
for p > 1.0 {
p = p / 1000.0
count++
}
return count * 3
}
func NewPrefixFromFactor(op Prefix, e int) Prefix {
f := float64(op)
exp := math.Pow10(getExponent(f) - e)
return Prefix(exp)
}

View File

@ -1,339 +0,0 @@
// Unit system for cluster monitoring metrics like bytes, flops and events
package units
import (
"fmt"
"math"
"strings"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
type unit struct {
prefix Prefix
measure Measure
divMeasure Measure
}
type Unit interface {
Valid() bool
String() string
Short() string
AddUnitDenominator(div Measure)
getPrefix() Prefix
getMeasure() Measure
getUnitDenominator() Measure
setPrefix(p Prefix)
}
var INVALID_UNIT = NewUnit("foobar")
// Valid checks whether a unit is a valid unit.
// A unit is valid if it has at least a prefix and a measure.
// The unit denominator is optional.
func (u *unit) Valid() bool {
return u.prefix != InvalidPrefix && u.measure != InvalidMeasure
}
// String returns the long string for the unit like 'KiloHertz' or 'MegaBytes'
func (u *unit) String() string {
if u.divMeasure != InvalidMeasure {
return fmt.Sprintf("%s%s/%s", u.prefix.String(), u.measure.String(), u.divMeasure.String())
} else {
return fmt.Sprintf("%s%s", u.prefix.String(), u.measure.String())
}
}
// Short returns the short string for the unit like 'kHz' or 'MByte'. Is is recommened to use Short() over String().
func (u *unit) Short() string {
if u.divMeasure != InvalidMeasure {
return fmt.Sprintf("%s%s/%s", u.prefix.Prefix(), u.measure.Short(), u.divMeasure.Short())
} else {
return fmt.Sprintf("%s%s", u.prefix.Prefix(), u.measure.Short())
}
}
// AddUnitDenominator adds a unit denominator to an exising unit. Can be used if you want to derive e.g. data volume to bandwidths.
// The data volume is in a Byte unit like 'kByte' and by dividing it by the runtime in seconds, we get the bandwidth. We can use the
// data volume unit and add 'Second' as unit denominator
func (u *unit) AddUnitDenominator(div Measure) {
u.divMeasure = div
}
func (u *unit) getPrefix() Prefix {
return u.prefix
}
func (u *unit) setPrefix(p Prefix) {
u.prefix = p
}
func (u *unit) getMeasure() Measure {
return u.measure
}
func (u *unit) getUnitDenominator() Measure {
return u.divMeasure
}
func ConvertValue(v *float64, from string, to string) {
uf := NewUnit(from)
ut := NewUnit(to)
factor := float64(uf.getPrefix()) / float64(ut.getPrefix())
*v = math.Ceil(*v * factor)
}
func ConvertSeries(s []float64, from string, to string) {
uf := NewUnit(from)
ut := NewUnit(to)
factor := float64(uf.getPrefix()) / float64(ut.getPrefix())
for i := 0; i < len(s); i++ {
s[i] = math.Ceil(s[i] * factor)
}
}
func getNormalizationFactor(v float64) (float64, int) {
count := 0
scale := -3
if v > 1000.0 {
for v > 1000.0 {
v *= 1e-3
count++
}
} else {
for v < 1.0 {
v *= 1e3
count++
}
scale = 3
}
return math.Pow10(count * scale), count * scale
}
func NormalizeValue(v *float64, us string, nu *string) {
u := NewUnit(us)
f, e := getNormalizationFactor((*v))
*v = math.Ceil(*v * f)
u.setPrefix(NewPrefixFromFactor(u.getPrefix(), e))
*nu = u.Short()
}
func NormalizeSeries(s []float64, avg float64, us string, nu *string) {
u := NewUnit(us)
f, e := getNormalizationFactor(avg)
for i := 0; i < len(s); i++ {
s[i] *= f
s[i] = math.Ceil(s[i])
}
u.setPrefix(NewPrefixFromFactor(u.getPrefix(), e))
fmt.Printf("Prefix: %e \n", u.getPrefix())
*nu = u.Short()
}
func ConvertUnitString(us string) schema.Unit {
var nu schema.Unit
if us == "CPI" ||
us == "IPC" ||
us == "load" ||
us == "" {
nu.Base = us
return nu
}
u := NewUnit(us)
p := u.getPrefix()
if p.Prefix() != "" {
prefix := p.Prefix()
nu.Prefix = &prefix
}
m := u.getMeasure()
d := u.getUnitDenominator()
if d.Short() != "inval" {
nu.Base = fmt.Sprintf("%s/%s", m.Short(), d.Short())
} else {
nu.Base = m.Short()
}
return nu
}
// GetPrefixPrefixFactor creates the default conversion function between two prefixes.
// It returns a conversation function for the value.
func GetPrefixPrefixFactor(in Prefix, out Prefix) func(value interface{}) interface{} {
var factor = 1.0
var in_prefix = float64(in)
var out_prefix = float64(out)
factor = in_prefix / out_prefix
conv := func(value interface{}) interface{} {
switch v := value.(type) {
case float64:
return v * factor
case float32:
return float32(float64(v) * factor)
case int:
return int(float64(v) * factor)
case int32:
return int32(float64(v) * factor)
case int64:
return int64(float64(v) * factor)
case uint:
return uint(float64(v) * factor)
case uint32:
return uint32(float64(v) * factor)
case uint64:
return uint64(float64(v) * factor)
}
return value
}
return conv
}
// This is the conversion function between temperatures in Celsius to Fahrenheit
func convertTempC2TempF(value interface{}) interface{} {
switch v := value.(type) {
case float64:
return (v * 1.8) + 32
case float32:
return (v * 1.8) + 32
case int:
return int((float64(v) * 1.8) + 32)
case int32:
return int32((float64(v) * 1.8) + 32)
case int64:
return int64((float64(v) * 1.8) + 32)
case uint:
return uint((float64(v) * 1.8) + 32)
case uint32:
return uint32((float64(v) * 1.8) + 32)
case uint64:
return uint64((float64(v) * 1.8) + 32)
}
return value
}
// This is the conversion function between temperatures in Fahrenheit to Celsius
func convertTempF2TempC(value interface{}) interface{} {
switch v := value.(type) {
case float64:
return (v - 32) / 1.8
case float32:
return (v - 32) / 1.8
case int:
return int(((float64(v) - 32) / 1.8))
case int32:
return int32(((float64(v) - 32) / 1.8))
case int64:
return int64(((float64(v) - 32) / 1.8))
case uint:
return uint(((float64(v) - 32) / 1.8))
case uint32:
return uint32(((float64(v) - 32) / 1.8))
case uint64:
return uint64(((float64(v) - 32) / 1.8))
}
return value
}
// GetPrefixStringPrefixStringFactor is a wrapper for GetPrefixPrefixFactor with string inputs instead
// of prefixes. It also returns a conversation function for the value.
func GetPrefixStringPrefixStringFactor(in string, out string) func(value interface{}) interface{} {
var i Prefix = NewPrefix(in)
var o Prefix = NewPrefix(out)
return GetPrefixPrefixFactor(i, o)
}
// GetUnitPrefixFactor gets the conversion function and resulting unit for a unit and a prefix. This is
// the most common case where you have some input unit and want to convert it to the same unit but with
// a different prefix. The returned unit represents the value after conversation.
func GetUnitPrefixFactor(in Unit, out Prefix) (func(value interface{}) interface{}, Unit) {
outUnit := NewUnit(in.Short())
if outUnit.Valid() {
outUnit.setPrefix(out)
conv := GetPrefixPrefixFactor(in.getPrefix(), out)
return conv, outUnit
}
return nil, INVALID_UNIT
}
// GetUnitPrefixStringFactor gets the conversion function and resulting unit for a unit and a prefix as string.
// It is a wrapper for GetUnitPrefixFactor
func GetUnitPrefixStringFactor(in Unit, out string) (func(value interface{}) interface{}, Unit) {
var o Prefix = NewPrefix(out)
return GetUnitPrefixFactor(in, o)
}
// GetUnitStringPrefixStringFactor gets the conversion function and resulting unit for a unit and a prefix when both are only string representations.
// This is just a wrapper for GetUnitPrefixFactor with the given input unit and the desired output prefix.
func GetUnitStringPrefixStringFactor(in string, out string) (func(value interface{}) interface{}, Unit) {
var i = NewUnit(in)
return GetUnitPrefixStringFactor(i, out)
}
// GetUnitUnitFactor gets the conversion function and (maybe) error for unit to unit conversion.
// It is basically a wrapper for GetPrefixPrefixFactor with some special cases for temperature
// conversion between Fahrenheit and Celsius.
func GetUnitUnitFactor(in Unit, out Unit) (func(value interface{}) interface{}, error) {
if in.getMeasure() == TemperatureC && out.getMeasure() == TemperatureF {
return convertTempC2TempF, nil
} else if in.getMeasure() == TemperatureF && out.getMeasure() == TemperatureC {
return convertTempF2TempC, nil
} else if in.getMeasure() != out.getMeasure() || in.getUnitDenominator() != out.getUnitDenominator() {
return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("UNITS/UNITS > invalid measures in in and out Unit")
}
return GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()), nil
}
// NewUnit creates a new unit out of a string representing a unit like 'Mbyte/s' or 'GHz'.
// It uses regular expressions to detect the prefix, unit and (maybe) unit denominator.
func NewUnit(unitStr string) Unit {
u := &unit{
prefix: InvalidPrefix,
measure: InvalidMeasure,
divMeasure: InvalidMeasure,
}
matches := prefixUnitSplitRegex.FindStringSubmatch(unitStr)
if len(matches) > 2 {
pre := NewPrefix(matches[1])
measures := strings.Split(matches[2], "/")
m := NewMeasure(measures[0])
// Special case for prefix 'p' or 'P' (Peta) and measures starting with 'p' or 'P'
// like 'packets' or 'percent'. Same for 'e' or 'E' (Exa) for measures starting with
// 'e' or 'E' like 'events'
if m == InvalidMeasure {
switch pre {
case Peta, Exa:
t := NewMeasure(matches[1] + measures[0])
if t != InvalidMeasure {
m = t
pre = Base
}
}
}
div := InvalidMeasure
if len(measures) > 1 {
div = NewMeasure(measures[1])
}
switch m {
// Special case for 'm' as prefix for Bytes and some others as thers is no unit like MilliBytes
case Bytes, Flops, Packets, Events, Cycles, Requests:
if pre == Milli {
pre = Mega
}
// Special case for percentage. No/ignore prefix
case Percentage:
pre = Base
}
if pre != InvalidPrefix && m != InvalidMeasure {
u.prefix = pre
u.measure = m
if div != InvalidMeasure {
u.divMeasure = div
}
}
}
return u
}

View File

@ -1,307 +0,0 @@
package units
import (
"fmt"
"reflect"
"regexp"
"testing"
)
func TestUnitsExact(t *testing.T) {
testCases := []struct {
in string
want Unit
}{
{"b", NewUnit("Bytes")},
{"B", NewUnit("Bytes")},
{"byte", NewUnit("Bytes")},
{"bytes", NewUnit("Bytes")},
{"BYtes", NewUnit("Bytes")},
{"Mb", NewUnit("MBytes")},
{"MB", NewUnit("MBytes")},
{"Mbyte", NewUnit("MBytes")},
{"Mbytes", NewUnit("MBytes")},
{"MbYtes", NewUnit("MBytes")},
{"Gb", NewUnit("GBytes")},
{"GB", NewUnit("GBytes")},
{"Hz", NewUnit("Hertz")},
{"MHz", NewUnit("MHertz")},
{"GHz", NewUnit("GHertz")},
{"pkts", NewUnit("Packets")},
{"packets", NewUnit("Packets")},
{"packet", NewUnit("Packets")},
{"flop", NewUnit("Flops")},
{"flops", NewUnit("Flops")},
{"floPS", NewUnit("Flops")},
{"Mflop", NewUnit("MFlops")},
{"Gflop", NewUnit("GFlops")},
{"gflop", NewUnit("GFlops")},
{"%", NewUnit("Percent")},
{"percent", NewUnit("Percent")},
{"degc", NewUnit("degC")},
{"degC", NewUnit("degC")},
{"degf", NewUnit("degF")},
{"°f", NewUnit("degF")},
{"events", NewUnit("events")},
{"event", NewUnit("events")},
{"EveNts", NewUnit("events")},
{"reqs", NewUnit("requests")},
{"reQuEsTs", NewUnit("requests")},
{"Requests", NewUnit("requests")},
{"cyc", NewUnit("cycles")},
{"cy", NewUnit("cycles")},
{"Cycles", NewUnit("cycles")},
{"J", NewUnit("Joules")},
{"Joule", NewUnit("Joules")},
{"joule", NewUnit("Joules")},
{"W", NewUnit("Watt")},
{"Watts", NewUnit("Watt")},
{"watt", NewUnit("Watt")},
{"s", NewUnit("seconds")},
{"sec", NewUnit("seconds")},
{"secs", NewUnit("seconds")},
{"RPM", NewUnit("rpm")},
{"rPm", NewUnit("rpm")},
{"watt/byte", NewUnit("W/B")},
{"watts/bytes", NewUnit("W/B")},
{"flop/byte", NewUnit("flops/Bytes")},
{"F/B", NewUnit("flops/Bytes")},
}
compareUnitExact := func(in, out Unit) bool {
if in.getMeasure() == out.getMeasure() && in.getUnitDenominator() == out.getUnitDenominator() && in.getPrefix() == out.getPrefix() {
return true
}
return false
}
for _, c := range testCases {
u := NewUnit(c.in)
if (!u.Valid()) || (!compareUnitExact(u, c.want)) {
t.Errorf("func NewUnit(%q) == %q, want %q", c.in, u.String(), c.want.String())
} else {
t.Logf("NewUnit(%q) == %q", c.in, u.String())
}
}
}
func TestUnitUnitConversion(t *testing.T) {
testCases := []struct {
in string
want Unit
prefixFactor float64
}{
{"kb", NewUnit("Bytes"), 1000},
{"Mb", NewUnit("Bytes"), 1000000},
{"Mb/s", NewUnit("Bytes/s"), 1000000},
{"Flops/s", NewUnit("MFlops/s"), 1e-6},
{"Flops/s", NewUnit("GFlops/s"), 1e-9},
{"MHz", NewUnit("Hertz"), 1e6},
{"kb", NewUnit("Kib"), 1000.0 / 1024},
{"Mib", NewUnit("MBytes"), (1024 * 1024.0) / (1e6)},
{"mb", NewUnit("MBytes"), 1.0},
}
compareUnitWithPrefix := func(in, out Unit, factor float64) bool {
if in.getMeasure() == out.getMeasure() && in.getUnitDenominator() == out.getUnitDenominator() {
if f := GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()); f(1.0) == factor {
return true
} else {
fmt.Println(f(1.0))
}
}
return false
}
for _, c := range testCases {
u := NewUnit(c.in)
if (!u.Valid()) || (!compareUnitWithPrefix(u, c.want, c.prefixFactor)) {
t.Errorf("GetPrefixPrefixFactor(%q, %q) invalid, want %q with factor %g", c.in, u.String(), c.want.String(), c.prefixFactor)
} else {
t.Logf("GetPrefixPrefixFactor(%q, %q) = %g", c.in, c.want.String(), c.prefixFactor)
}
}
}
func TestUnitPrefixConversion(t *testing.T) {
testCases := []struct {
in string
want string
prefixFactor float64
wantUnit Unit
}{
{"KBytes", "", 1000, NewUnit("Bytes")},
{"MBytes", "", 1e6, NewUnit("Bytes")},
{"MBytes", "G", 1e-3, NewUnit("GBytes")},
{"mb", "M", 1, NewUnit("MBytes")},
}
compareUnitPrefix := func(in Unit, out Prefix, factor float64, outUnit Unit) bool {
if in.Valid() {
conv, unit := GetUnitPrefixFactor(in, out)
value := conv(1.0)
if value == factor && unit.String() == outUnit.String() {
return true
}
}
return false
}
for _, c := range testCases {
u := NewUnit(c.in)
p := NewPrefix(c.want)
if (!u.Valid()) || (!compareUnitPrefix(u, p, c.prefixFactor, c.wantUnit)) {
t.Errorf("GetUnitPrefixFactor(%q, %q) invalid, want %q with factor %g", c.in, p.Prefix(), c.wantUnit.String(), c.prefixFactor)
} else {
t.Logf("GetUnitPrefixFactor(%q, %q) = %g", c.in, c.wantUnit.String(), c.prefixFactor)
}
}
}
func TestPrefixPrefixConversion(t *testing.T) {
testCases := []struct {
in string
want string
prefixFactor float64
}{
{"K", "", 1000},
{"M", "", 1e6},
{"M", "G", 1e-3},
{"", "M", 1e-6},
{"", "m", 1e3},
{"m", "n", 1e6},
//{"", "n", 1e9}, //does not work because of IEEE rounding problems
}
for _, c := range testCases {
i := NewPrefix(c.in)
o := NewPrefix(c.want)
if i != InvalidPrefix && o != InvalidPrefix {
conv := GetPrefixPrefixFactor(i, o)
value := conv(1.0)
if value != c.prefixFactor {
t.Errorf("GetPrefixPrefixFactor(%q, %q) invalid, want %q with factor %g but got %g", c.in, c.want, o.Prefix(), c.prefixFactor, value)
} else {
t.Logf("GetPrefixPrefixFactor(%q, %q) = %g", c.in, c.want, c.prefixFactor)
}
}
}
}
func TestMeasureRegex(t *testing.T) {
for _, data := range MeasuresMap {
_, err := regexp.Compile(data.Regex)
if err != nil {
t.Errorf("failed to compile regex '%s': %s", data.Regex, err.Error())
}
t.Logf("succussfully compiled regex '%s' for measure %s", data.Regex, data.Long)
}
}
func TestPrefixRegex(t *testing.T) {
for _, data := range PrefixDataMap {
_, err := regexp.Compile(data.Regex)
if err != nil {
t.Errorf("failed to compile regex '%s': %s", data.Regex, err.Error())
}
t.Logf("succussfully compiled regex '%s' for prefix %s", data.Regex, data.Long)
}
}
func TestConvertValue(t *testing.T) {
v := float64(103456)
ConvertValue(&v, "MB/s", "GB/s")
if v != 104.00 {
t.Errorf("Failed ConvertValue: Want 103.456, Got %f", v)
}
}
func TestConvertValueUp(t *testing.T) {
v := float64(10.3456)
ConvertValue(&v, "GB/s", "MB/s")
if v != 10346.00 {
t.Errorf("Failed ConvertValue: Want 10346.00, Got %f", v)
}
}
func TestConvertSeries(t *testing.T) {
s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
r := []float64{3, 24, 390, 391}
ConvertSeries(s, "F/s", "GF/s")
if !reflect.DeepEqual(s, r) {
t.Errorf("Failed ConvertValue: Want 3, 24, 390, 391, Got %v", s)
}
}
func TestNormalizeValue(t *testing.T) {
var s string
v := float64(103456)
NormalizeValue(&v, "MB/s", &s)
if v != 104.00 {
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
}
if s != "GB/s" {
t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s)
}
}
func TestNormalizeValueNoPrefix(t *testing.T) {
var s string
v := float64(103458596)
NormalizeValue(&v, "F/s", &s)
if v != 104.00 {
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
}
if s != "MF/s" {
t.Errorf("Failed Prefix or unit: Want MF/s, Got %s", s)
}
}
func TestNormalizeValueKeep(t *testing.T) {
var s string
v := float64(345)
NormalizeValue(&v, "MB/s", &s)
if v != 345.00 {
t.Errorf("Failed ConvertValue: Want 104.00, Got %f", v)
}
if s != "MB/s" {
t.Errorf("Failed Prefix or unit: Want GB/s, Got %s", s)
}
}
func TestNormalizeValueDown(t *testing.T) {
var s string
v := float64(0.0004578)
NormalizeValue(&v, "GB/s", &s)
if v != 458.00 {
t.Errorf("Failed ConvertValue: Want 458.00, Got %f", v)
}
if s != "KB/s" {
t.Errorf("Failed Prefix or unit: Want KB/s, Got %s", s)
}
}
func TestNormalizeSeries(t *testing.T) {
var us string
s := []float64{2890031237, 23998994567, 389734042344, 390349424345}
r := []float64{3, 24, 390, 391}
total := 0.0
for _, number := range s {
total += number
}
avg := total / float64(len(s))
fmt.Printf("AVG: %e\n", avg)
NormalizeSeries(s, avg, "KB/s", &us)
if !reflect.DeepEqual(s, r) {
t.Errorf("Failed ConvertValue: Want 3, 24, 390, 391, Got %v", s)
}
if us != "TB/s" {
t.Errorf("Failed Prefix or unit: Want TB/s, Got %s", us)
}
}

View File

@ -1,496 +0,0 @@
{
"cpu_used": {
"core": {
"unit": {
"base": ""
},
"timestep": 30,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 0.09090909090909093,
"avg": 0.9173553719008265,
"max": 1.0000000000000002
},
"data": [
0.09090909090909093,
0.9999999999999999,
1.0,
1.0000000000000002,
1.0,
1.0000000000000002,
0.9999999999999999,
1.0,
1.0,
1.0,
1.0
]
},
{
"hostname": "taurusi6489",
"id": "1",
"statistics": {
"min": 0.03694102397926118,
"avg": 0.045968409230268584,
"max": 0.08809840425531917
},
"data": [
0.08809840425531917,
0.05710659898477157,
0.04034861200774694,
0.037962362102530824,
0.03976721629485936,
0.04163976759199483,
0.03694102397926118,
0.03821243523316062,
0.03851132686084142,
0.044752092723760455,
0.04231266149870802
]
},
{
"hostname": "taurusi6490",
"id": "10",
"statistics": {
"min": 0.10505319148936171,
"avg": 0.9186411992263056,
"max": 1.0000000000000002
},
"data": [
0.10505319148936171,
1.0000000000000002,
1.0,
1.0,
1.0,
0.9999999999999999,
1.0,
0.9999999999999999,
1.0,
1.0,
1.0
]
},
{
"hostname": "taurusi6490",
"id": "11",
"statistics": {
"min": 0.05286048845767815,
"avg": 0.07053823838706144,
"max": 0.075148113501715
},
"data": [
0.05286048845767815,
0.06936597614563718,
0.07254534083802376,
0.075148113501715,
0.06909547738693468,
0.07372696032489846,
0.07077983088005012,
0.07082419304293325,
0.07424812030075188,
0.07285803627267043,
0.07446808510638298
]
}
],
"statisticsSeries": null
}
},
"ipc": {
"core": {
"unit": {
"base": "IPC"
},
"timestep": 60,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 1.3808406263195592,
"avg": 1.3960848578375105,
"max": 1.4485575599350569
},
"data": [
1.4485575599350569,
1.3808406263195592,
1.3830284413690626,
1.3836692663348698,
1.3843283952290035
]
},
{
"hostname": "taurusi6489",
"id": "1",
"statistics": {
"min": 0.30469640475234366,
"avg": 0.8816944294664065,
"max": 1.797623522191001
},
"data": [
1.797623522191001,
0.954395633726228,
1.0019972349956185,
0.30469640475234366,
0.3497593516668412
]
},
{
"hostname": "taurusi6490",
"id": "10",
"statistics": {
"min": 1.3791232173760588,
"avg": 1.3850247295506815,
"max": 1.386710405495511
},
"data": [
1.3791232173760588,
1.38619977419787,
1.386397917938246,
1.3866923327457215,
1.386710405495511
]
},
{
"hostname": "taurusi6490",
"id": "11",
"statistics": {
"min": 0.6424094604392216,
"avg": 0.9544442638400293,
"max": 1.2706704244636826
},
"data": [
1.2706704244636826,
0.6424094604392216,
0.9249973908234796,
0.6940110823242276,
1.2401329611495353
]
}
],
"statisticsSeries": null
}
},
"flops_any": {
"core": {
"unit": {
"base": "F/s"
},
"timestep": 60,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 0.0,
"avg": 184.2699002412084,
"max": 921.3495012060421
},
"data": [
921.3495012060421,
0.0,
0.0,
0.0,
0.0
]
},
{
"hostname": "taurusi6489",
"id": "1",
"statistics": {
"min": 0.13559227208748068,
"avg": 273.2997868356056,
"max": 1355.9227390817396
},
"data": [
1355.9227390817396,
8.94908797747172,
0.6779613312519499,
0.13559227208748068,
0.8135535154771758
]
},
{
"hostname": "taurusi6490",
"id": "10",
"statistics": {
"min": 0.0,
"avg": 1678.8419461262179,
"max": 4346.591400350933
},
"data": [
4346.591400350933,
0.0,
578.4248288199713,
0.0,
3469.193501460185
]
},
{
"hostname": "taurusi6490",
"id": "11",
"statistics": {
"min": 45.28689133054866,
"avg": 609.6644949204072,
"max": 2582.7080822873186
},
"data": [
2582.7080822873186,
45.28689133054866,
48.67663233623293,
47.591911855555026,
324.0589567923803
]
}
],
"statisticsSeries": null
}
},
"mem_bw": {
"socket": {
"unit": {
"base": "B/s"
},
"timestep": 60,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 653671812.1661415,
"avg": 1637585527.5854635,
"max": 2614718291.9554267
},
"data": [
653671812.1661415,
2614718291.9554267,
1732453371.7073724,
1612865229.8704093,
1574218932.2279677
]
},
{
"hostname": "taurusi6490",
"id": "0",
"statistics": {
"min": 1520190251.61048,
"avg": 1572477682.3850098,
"max": 1688960732.2760606
},
"data": [
1688960732.2760606,
1580140679.8216474,
1520190251.61048,
1541841829.6250021,
1531254918.591859
]
}
],
"statisticsSeries": null
}
},
"file_bw": {
"node": {
"unit": {
"base": "B/s"
},
"timestep": 30,
"series": [
{
"hostname": "taurusi6489",
"statistics": {
"min": 0.0,
"avg": 190352.6328851857,
"max": 2093878.361723524
},
"data": [
0.0,
0.0,
0.0,
0.6000135186380174,
0.0,
0.0,
2093878.361723524,
0.0,
0.0,
0.0,
0.0
]
},
{
"hostname": "taurusi6490",
"statistics": {
"min": 0.0,
"avg": 1050832.4509396513,
"max": 11559156.360352296
},
"data": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
11559156.360352296,
0.0,
0.5999838690326298,
0.0,
0.0
]
}
],
"statisticsSeries": null
}
},
"net_bw": {
"node": {
"unit": {
"base": "B/s"
},
"timestep": 30,
"series": [
{
"hostname": "taurusi6489",
"statistics": {
"min": 126779.89655880642,
"avg": 653834.5091507058,
"max": 1285639.5107541133
},
"data": [
1158202.7403032137,
126779.89655880642,
419017.91939583793,
345766.3974972795,
645419.3296982117,
644667.7333333333,
1285639.5107541133,
643481.2108874657,
640025.3562553325,
643241.4875354709,
639938.0184386979
]
},
{
"hostname": "taurusi6490",
"statistics": {
"min": 640156.9862985397,
"avg": 872367.6551257868,
"max": 1916309.7075416835
},
"data": [
1774843.146788355,
643218.3646426039,
641681.1031071587,
644690.1512268113,
647183.5650609672,
644439.3303402043,
1916309.7075416835,
643748.3241006166,
757189.8273227927,
642583.6999539217,
640156.9862985397
]
}
],
"statisticsSeries": null
}
},
"mem_used": {
"node": {
"unit": {
"base": "B"
},
"timestep": 30,
"series": [
{
"hostname": "taurusi6489",
"statistics": {
"min": 2779066368.0,
"avg": 9282117259.636364,
"max": 10202595328.0
},
"data": [
2779066368.0,
8518217728.0,
9852760064.0,
9979805696.0,
10039619584.0,
10087104512.0,
10136084480.0,
10202595328.0,
10154196992.0,
10177409024.0,
10176430080.0
]
},
{
"hostname": "taurusi6490",
"statistics": {
"min": 9993277440.0,
"avg": 10013080110.545454,
"max": 10039676928.0
},
"data": [
10001317888.0,
10013028352.0,
10006728704.0,
10039676928.0,
10035838976.0,
10033356800.0,
10006577152.0,
10005659648.0,
9993277440.0,
9993564160.0,
10014855168.0
]
}
],
"statisticsSeries": null
}
},
"cpu_power": {
"socket": {
"unit": {
"base": "W"
},
"timestep": 60,
"series": [
{
"hostname": "taurusi6489",
"id": "0",
"statistics": {
"min": 35.50647456742635,
"avg": 72.08313211552377,
"max": 83.33799371150049
},
"data": [
35.50647456742635,
75.65022009482759,
83.33799371150049,
83.00405043233219,
82.9169217715322
]
},
{
"hostname": "taurusi6490",
"id": "0",
"statistics": {
"min": 83.8466923147859,
"avg": 85.18572681122097,
"max": 85.83909286117324
},
"data": [
83.8466923147859,
85.58816979864088,
85.31266819129794,
85.83909286117324,
85.34201089020692
]
}
],
"statisticsSeries": null
}
}
}

View File

@ -1,98 +0,0 @@
{
"jobId": 20639587,
"user": "s3804552",
"project": "p_speichersysteme",
"cluster": "taurus",
"subCluster": "haswell",
"partition": "haswell64",
"numNodes": 2,
"numHwthreads": 4,
"exclusive": 0,
"startTime": 1635856524,
"jobState": "completed",
"duration": 310,
"walltime": 3600,
"smt": 0,
"resources": [
{
"hostname": "taurusi6489",
"hwthreads": [
0,
1
]
},
{
"hostname": "taurusi6490",
"hwthreads": [
10,
11
]
}
],
"statistics": {
"cpu_used": {
"min": 0.03694102397926118,
"avg": 0.48812580468611544,
"max": 1.0000000000000002,
"unit": {
"base": ""
}
},
"ipc": {
"min": 0.30469640475234366,
"avg": 1.154312070173657,
"max": 1.797623522191001,
"unit": {
"base": "IPC"
}
},
"flops_any": {
"min": 0.0,
"avg": 686.5190320308598,
"max": 4346.591400350933,
"unit": {
"base": "F/s"
}
},
"mem_bw": {
"min": 653671812.1661415,
"avg": 1605031604.9852366,
"max": 2614718291.9554267,
"unit": {
"base": "B/s"
}
},
"file_bw": {
"min": 0.0,
"avg": 620592.5419124186,
"max": 11559156.360352296,
"unit": {
"base": "B/s"
}
},
"net_bw": {
"min": 126779.89655880642,
"avg": 763101.082138246,
"max": 1916309.7075416835,
"unit": {
"base": "B/s"
}
},
"mem_used": {
"min": 2779066368.0,
"avg": 9647598685.09091,
"max": 10202595328.0,
"unit": {
"base": "B"
}
},
"cpu_power": {
"min": 35.50647456742635,
"avg": 78.63442946337237,
"max": 85.83909286117324,
"unit": {
"base": "W"
}
}
}
}

Binary file not shown.

View File

@ -8,33 +8,65 @@ import (
"encoding/json" "encoding/json"
"flag" "flag"
"fmt" "fmt"
"os"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
) )
func parseDate(in string) int64 {
const shortForm = "2006-Jan-02"
loc, _ := time.LoadLocation("Local")
if in != "" {
t, err := time.ParseInLocation(shortForm, in, loc)
if err != nil {
fmt.Printf("date parse error %v", err)
os.Exit(0)
}
return t.Unix()
}
return 0
}
func main() { func main() {
var srcPath, flagConfigFile, flagLogLevel string var srcPath, flagConfigFile, flagLogLevel, flagRemoveCluster, flagRemoveAfter, flagRemoveBefore string
var flagLogDateTime bool var flagLogDateTime, flagValidate bool
flag.StringVar(&srcPath, "s", "./var/job-archive", "Specify the source job archive path. Default is ./var/job-archive") flag.StringVar(&srcPath, "s", "./var/job-archive", "Specify the source job archive path. Default is ./var/job-archive")
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages") flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`") flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`")
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`") flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
flag.StringVar(&flagRemoveCluster, "remove-cluster", "", "Remove cluster from archive and database")
flag.StringVar(&flagRemoveBefore, "remove-before", "", "Remove all jobs with start time before date (Format: 2006-Jan-04)")
flag.StringVar(&flagRemoveAfter, "remove-after", "", "Remove all jobs with start time after date (Format: 2006-Jan-04)")
flag.BoolVar(&flagValidate, "validate", false, "Set this flag to validate a job archive against the json schema")
flag.Parse() flag.Parse()
archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", srcPath) archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", srcPath)
log.Init(flagLogLevel, flagLogDateTime) log.Init(flagLogLevel, flagLogDateTime)
config.Init(flagConfigFile) config.Init(flagConfigFile)
config.Keys.Validate = true
if err := archive.Init(json.RawMessage(archiveCfg), false); err != nil { if err := archive.Init(json.RawMessage(archiveCfg), false); err != nil {
log.Fatal(err) log.Fatal(err)
} }
ar := archive.GetHandle() ar := archive.GetHandle()
for job := range ar.Iter(true) { if flagValidate {
log.Printf("Validate %s - %d\n", job.Meta.Cluster, job.Meta.JobID) config.Keys.Validate = true
for job := range ar.Iter(true) {
log.Printf("Validate %s - %d\n", job.Meta.Cluster, job.Meta.JobID)
}
os.Exit(0)
} }
if flagRemoveBefore != "" || flagRemoveAfter != "" {
ar.Clean(parseDate(flagRemoveBefore), parseDate(flagRemoveAfter))
os.Exit(0)
}
ar.Info()
} }

View File

@ -23,17 +23,17 @@ type BaseJob struct {
Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project
Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster
SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster
Partition *string `json:"partition" db:"partition" example:"main"` // The Slurm partition to which the job was submitted Partition string `json:"partition" db:"partition" example:"main"` // The Slurm partition to which the job was submitted
ArrayJobId *int64 `json:"arrayJobId" db:"array_job_id" example:"123000"` // The unique identifier of an array job ArrayJobId int64 `json:"arrayJobId" db:"array_job_id" example:"123000"` // The unique identifier of an array job
NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0) NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0)
NumHWThreads *int32 `json:"numHwthreads" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) NumHWThreads int32 `json:"numHwthreads" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0)
NumAcc *int32 `json:"numAcc" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) NumAcc int32 `json:"numAcc" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0)
Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user
MonitoringStatus int32 `json:"monitoringStatus" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull MonitoringStatus int32 `json:"monitoringStatus" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull
SMT *int32 `json:"smt" db:"smt" example:"4"` // SMT threads used by job SMT int32 `json:"smt" db:"smt" example:"4"` // SMT threads used by job
State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job
Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0) Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0)
Walltime *int64 `json:"walltime" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) Walltime int64 `json:"walltime" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0)
Tags []*schema.Tag `json:"tags"` // List of tags Tags []*schema.Tag `json:"tags"` // List of tags
RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes] RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes]
Resources []*Resource `json:"resources"` // Resources used by job Resources []*Resource `json:"resources"` // Resources used by job

View File

@ -17,7 +17,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/pkg/units" ccunits "github.com/ClusterCockpit/cc-units"
) )
const Version = 1 const Version = 1
@ -35,6 +35,33 @@ func loadJobData(filename string) (*JobData, error) {
return DecodeJobData(bufio.NewReader(f)) return DecodeJobData(bufio.NewReader(f))
} }
func ConvertUnitString(us string) schema.Unit {
var nu schema.Unit
if us == "CPI" ||
us == "IPC" ||
us == "load" ||
us == "" {
nu.Base = us
return nu
}
u := ccunits.NewUnit(us)
p := u.GetPrefix()
if p.Prefix() != "" {
prefix := p.Prefix()
nu.Prefix = prefix
}
m := u.GetMeasure()
d := u.GetUnitDenominator()
if d.Short() != "inval" {
nu.Base = fmt.Sprintf("%s/%s", m.Short(), d.Short())
} else {
nu.Base = m.Short()
}
return nu
}
func deepCopyJobMeta(j *JobMeta) schema.JobMeta { func deepCopyJobMeta(j *JobMeta) schema.JobMeta {
var jn schema.JobMeta var jn schema.JobMeta
@ -78,7 +105,7 @@ func deepCopyJobMeta(j *JobMeta) schema.JobMeta {
sn.Avg = v.Avg sn.Avg = v.Avg
sn.Max = v.Max sn.Max = v.Max
sn.Min = v.Min sn.Min = v.Min
tmpUnit := units.ConvertUnitString(v.Unit) tmpUnit := ConvertUnitString(v.Unit)
if tmpUnit.Base == "inval" { if tmpUnit.Base == "inval" {
sn.Unit = schema.Unit{Base: ""} sn.Unit = schema.Unit{Base: ""}
} else { } else {
@ -113,7 +140,7 @@ func deepCopyJobData(d *JobData, cluster string, subCluster string) *schema.JobD
for mk, mv := range v { for mk, mv := range v {
// fmt.Printf("Scope %s\n", mk) // fmt.Printf("Scope %s\n", mk)
var mn schema.JobMetric var mn schema.JobMetric
tmpUnit := units.ConvertUnitString(mv.Unit) tmpUnit := ConvertUnitString(mv.Unit)
if tmpUnit.Base == "inval" { if tmpUnit.Base == "inval" {
mn.Unit = schema.Unit{Base: ""} mn.Unit = schema.Unit{Base: ""}
} else { } else {
@ -174,16 +201,14 @@ func deepCopyClusterConfig(co *Cluster) schema.Cluster {
scn.SocketsPerNode = sco.SocketsPerNode scn.SocketsPerNode = sco.SocketsPerNode
scn.CoresPerSocket = sco.CoresPerSocket scn.CoresPerSocket = sco.CoresPerSocket
scn.ThreadsPerCore = sco.ThreadsPerCore scn.ThreadsPerCore = sco.ThreadsPerCore
var prefix = new(string)
*prefix = "G"
scn.FlopRateScalar = schema.MetricValue{ scn.FlopRateScalar = schema.MetricValue{
Unit: schema.Unit{Base: "F/s", Prefix: prefix}, Unit: schema.Unit{Base: "F/s", Prefix: "G"},
Value: float64(sco.FlopRateScalar)} Value: float64(sco.FlopRateScalar)}
scn.FlopRateSimd = schema.MetricValue{ scn.FlopRateSimd = schema.MetricValue{
Unit: schema.Unit{Base: "F/s", Prefix: prefix}, Unit: schema.Unit{Base: "F/s", Prefix: "G"},
Value: float64(sco.FlopRateSimd)} Value: float64(sco.FlopRateSimd)}
scn.MemoryBandwidth = schema.MetricValue{ scn.MemoryBandwidth = schema.MetricValue{
Unit: schema.Unit{Base: "B/s", Prefix: prefix}, Unit: schema.Unit{Base: "B/s", Prefix: "G"},
Value: float64(sco.MemoryBandwidth)} Value: float64(sco.MemoryBandwidth)}
scn.Topology = *sco.Topology scn.Topology = *sco.Topology
cn.SubClusters = append(cn.SubClusters, &scn) cn.SubClusters = append(cn.SubClusters, &scn)
@ -194,13 +219,13 @@ func deepCopyClusterConfig(co *Cluster) schema.Cluster {
mcn.Name = mco.Name mcn.Name = mco.Name
mcn.Scope = mco.Scope mcn.Scope = mco.Scope
if mco.Aggregation == "" { if mco.Aggregation == "" {
fmt.Println("Property aggregation missing! Please review file!") fmt.Println("cluster.json - Property aggregation missing! Please review file!")
mcn.Aggregation = "sum" mcn.Aggregation = "sum"
} else { } else {
mcn.Aggregation = mco.Aggregation mcn.Aggregation = mco.Aggregation
} }
mcn.Timestep = mco.Timestep mcn.Timestep = mco.Timestep
tmpUnit := units.ConvertUnitString(mco.Unit) tmpUnit := ConvertUnitString(mco.Unit)
if tmpUnit.Base == "inval" { if tmpUnit.Base == "inval" {
mcn.Unit = schema.Unit{Base: ""} mcn.Unit = schema.Unit{Base: ""}
} else { } else {
@ -227,8 +252,8 @@ func main() {
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages") flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`") flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`")
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`") flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
flag.StringVar(&srcPath, "s", "./var/job-archive", "Specify the source job archive path. Default is ./var/job-archive") flag.StringVar(&srcPath, "s", "./var/job-archive", "Specify the source job archive path")
flag.StringVar(&dstPath, "d", "./var/job-archive-new", "Specify the destination job archive path. Default is ./var/job-archive-new") flag.StringVar(&dstPath, "d", "./var/job-archive-new", "Specify the destination job archive path")
flag.Parse() flag.Parse()
if _, err := os.Stat(filepath.Join(srcPath, "version.txt")); !errors.Is(err, os.ErrNotExist) { if _, err := os.Stat(filepath.Join(srcPath, "version.txt")); !errors.Is(err, os.ErrNotExist) {

712
web/frontend/package-lock.json generated Normal file
View File

@ -0,0 +1,712 @@
{
"name": "cc-frontend",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "cc-frontend",
"version": "1.0.0",
"license": "MIT",
"dependencies": {
"@rollup/plugin-replace": "^5.0.2",
"@urql/svelte": "^4.0.1",
"graphql": "^16.6.0",
"sveltestrap": "^5.10.0",
"uplot": "^1.6.24",
"wonka": "^6.3.2"
},
"devDependencies": {
"@rollup/plugin-commonjs": "^24.1.0",
"@rollup/plugin-node-resolve": "^15.0.2",
"@rollup/plugin-terser": "^0.4.1",
"rollup": "^3.21.0",
"rollup-plugin-css-only": "^4.3.0",
"rollup-plugin-svelte": "^7.1.4",
"svelte": "^3.58.0"
}
},
"node_modules/@0no-co/graphql.web": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.0.1.tgz",
"integrity": "sha512-6Yaxyv6rOwRkLIvFaL0NrLDgfNqC/Ng9QOPmTmlqW4mORXMEKmh5NYGkIvvt5Yw8fZesnMAqkj8cIqTj8f40cQ==",
"peerDependencies": {
"graphql": "^14.0.0 || ^15.0.0 || ^16.0.0"
},
"peerDependenciesMeta": {
"graphql": {
"optional": true
}
}
},
"node_modules/@jridgewell/gen-mapping": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz",
"integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==",
"dev": true,
"dependencies": {
"@jridgewell/set-array": "^1.0.1",
"@jridgewell/sourcemap-codec": "^1.4.10",
"@jridgewell/trace-mapping": "^0.3.9"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
"integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
"dev": true,
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/set-array": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
"integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
"dev": true,
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/source-map": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.2.tgz",
"integrity": "sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw==",
"dev": true,
"dependencies": {
"@jridgewell/gen-mapping": "^0.3.0",
"@jridgewell/trace-mapping": "^0.3.9"
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.4.14",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
"integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw=="
},
"node_modules/@jridgewell/trace-mapping": {
"version": "0.3.14",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.14.tgz",
"integrity": "sha512-bJWEfQ9lPTvm3SneWwRFVLzrh6nhjwqw7TUFFBEMzwvg7t7PCDenf2lDwqo4NQXzdpgBXyFgDWnQA+2vkruksQ==",
"dev": true,
"dependencies": {
"@jridgewell/resolve-uri": "^3.0.3",
"@jridgewell/sourcemap-codec": "^1.4.10"
}
},
"node_modules/@popperjs/core": {
"version": "2.11.0",
"resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.0.tgz",
"integrity": "sha512-zrsUxjLOKAzdewIDRWy9nsV1GQsKBCWaGwsZQlCgr6/q+vjyZhFgqedLfFBuI9anTPEUT4APq9Mu0SZBTzIcGQ==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/popperjs"
}
},
"node_modules/@rollup/plugin-commonjs": {
"version": "24.1.0",
"resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-24.1.0.tgz",
"integrity": "sha512-eSL45hjhCWI0jCCXcNtLVqM5N1JlBGvlFfY0m6oOYnLCJ6N0qEXoZql4sY2MOUArzhH4SA/qBpTxvvZp2Sc+DQ==",
"dev": true,
"dependencies": {
"@rollup/pluginutils": "^5.0.1",
"commondir": "^1.0.1",
"estree-walker": "^2.0.2",
"glob": "^8.0.3",
"is-reference": "1.2.1",
"magic-string": "^0.27.0"
},
"engines": {
"node": ">=14.0.0"
},
"peerDependencies": {
"rollup": "^2.68.0||^3.0.0"
},
"peerDependenciesMeta": {
"rollup": {
"optional": true
}
}
},
"node_modules/@rollup/plugin-node-resolve": {
"version": "15.0.2",
"resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.0.2.tgz",
"integrity": "sha512-Y35fRGUjC3FaurG722uhUuG8YHOJRJQbI6/CkbRkdPotSpDj9NtIN85z1zrcyDcCQIW4qp5mgG72U+gJ0TAFEg==",
"dev": true,
"dependencies": {
"@rollup/pluginutils": "^5.0.1",
"@types/resolve": "1.20.2",
"deepmerge": "^4.2.2",
"is-builtin-module": "^3.2.1",
"is-module": "^1.0.0",
"resolve": "^1.22.1"
},
"engines": {
"node": ">=14.0.0"
},
"peerDependencies": {
"rollup": "^2.78.0||^3.0.0"
},
"peerDependenciesMeta": {
"rollup": {
"optional": true
}
}
},
"node_modules/@rollup/plugin-replace": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.2.tgz",
"integrity": "sha512-M9YXNekv/C/iHHK+cvORzfRYfPbq0RDD8r0G+bMiTXjNGKulPnCT9O3Ss46WfhI6ZOCgApOP7xAdmCQJ+U2LAA==",
"dependencies": {
"@rollup/pluginutils": "^5.0.1",
"magic-string": "^0.27.0"
},
"engines": {
"node": ">=14.0.0"
},
"peerDependencies": {
"rollup": "^1.20.0||^2.0.0||^3.0.0"
},
"peerDependenciesMeta": {
"rollup": {
"optional": true
}
}
},
"node_modules/@rollup/plugin-terser": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/@rollup/plugin-terser/-/plugin-terser-0.4.1.tgz",
"integrity": "sha512-aKS32sw5a7hy+fEXVy+5T95aDIwjpGHCTv833HXVtyKMDoVS7pBr5K3L9hEQoNqbJFjfANPrNpIXlTQ7is00eA==",
"dev": true,
"dependencies": {
"serialize-javascript": "^6.0.0",
"smob": "^0.0.6",
"terser": "^5.15.1"
},
"engines": {
"node": ">=14.0.0"
},
"peerDependencies": {
"rollup": "^2.x || ^3.x"
},
"peerDependenciesMeta": {
"rollup": {
"optional": true
}
}
},
"node_modules/@rollup/plugin-terser/node_modules/serialize-javascript": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz",
"integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==",
"dev": true,
"dependencies": {
"randombytes": "^2.1.0"
}
},
"node_modules/@rollup/pluginutils": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.0.2.tgz",
"integrity": "sha512-pTd9rIsP92h+B6wWwFbW8RkZv4hiR/xKsqre4SIuAOaOEQRxi0lqLke9k2/7WegC85GgUs9pjmOjCUi3In4vwA==",
"dependencies": {
"@types/estree": "^1.0.0",
"estree-walker": "^2.0.2",
"picomatch": "^2.3.1"
},
"engines": {
"node": ">=14.0.0"
},
"peerDependencies": {
"rollup": "^1.20.0||^2.0.0||^3.0.0"
},
"peerDependenciesMeta": {
"rollup": {
"optional": true
}
}
},
"node_modules/@types/estree": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz",
"integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA=="
},
"node_modules/@types/resolve": {
"version": "1.20.2",
"resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz",
"integrity": "sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q==",
"dev": true
},
"node_modules/@urql/core": {
"version": "4.0.7",
"resolved": "https://registry.npmjs.org/@urql/core/-/core-4.0.7.tgz",
"integrity": "sha512-UtZ9oSbSFODXzFydgLCXpAQz26KGT1d6uEfcylKphiRWNXSWZi8k7vhJXNceNm/Dn0MiZ+kaaJHKcnGY1jvHRQ==",
"dependencies": {
"@0no-co/graphql.web": "^1.0.1",
"wonka": "^6.3.2"
}
},
"node_modules/@urql/svelte": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.0.1.tgz",
"integrity": "sha512-WbsVjuK7IUNlJlvXAgevjQunoso0T+AngFlb0zafDvay6HN47Zc3CSVbAlP8KjETjERUMJLuiqknmPFFm2GEFQ==",
"dependencies": {
"@urql/core": "^4.0.0",
"wonka": "^6.3.2"
},
"peerDependencies": {
"svelte": "^3.0.0"
}
},
"node_modules/acorn": {
"version": "8.8.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.0.tgz",
"integrity": "sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==",
"dev": true,
"bin": {
"acorn": "bin/acorn"
},
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true
},
"node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/buffer-from": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
"dev": true
},
"node_modules/builtin-modules": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-3.3.0.tgz",
"integrity": "sha512-zhaCDicdLuWN5UbN5IMnFqNMhNfo919sH85y2/ea+5Yg9TsTkeZxpL+JLbp6cgYFS4sRLp3YV4S6yDuqVWHYOw==",
"dev": true,
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/commander": {
"version": "2.20.3",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
"integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
"dev": true
},
"node_modules/commondir": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
"integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs= sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==",
"dev": true
},
"node_modules/deepmerge": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.2.2.tgz",
"integrity": "sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/estree-walker": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz",
"integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"dev": true
},
"node_modules/fsevents": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
"dev": true,
"hasInstallScript": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/function-bind": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
"integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==",
"dev": true
},
"node_modules/glob": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz",
"integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==",
"dev": true,
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^5.0.1",
"once": "^1.3.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/graphql": {
"version": "16.6.0",
"resolved": "https://registry.npmjs.org/graphql/-/graphql-16.6.0.tgz",
"integrity": "sha512-KPIBPDlW7NxrbT/eh4qPXz5FiFdL5UbaA0XUNz2Rp3Z3hqBSkbj0GVjwFDztsWVauZUWsbKHgMg++sk8UX0bkw==",
"engines": {
"node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
}
},
"node_modules/has": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
"integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
"dev": true,
"dependencies": {
"function-bind": "^1.1.1"
},
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"dev": true,
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true
},
"node_modules/is-builtin-module": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/is-builtin-module/-/is-builtin-module-3.2.1.tgz",
"integrity": "sha512-BSLE3HnV2syZ0FK0iMA/yUGplUeMmNz4AW5fnTunbCIqZi4vG3WjJT9FHMy5D69xmAYBHXQhJdALdpwVxV501A==",
"dev": true,
"dependencies": {
"builtin-modules": "^3.3.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-core-module": {
"version": "2.12.0",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.0.tgz",
"integrity": "sha512-RECHCBCd/viahWmwj6enj19sKbHfJrddi/6cBDsNTKbNq0f7VeaUkBo60BqzvPqo/W54ChS62Z5qyun7cfOMqQ==",
"dev": true,
"dependencies": {
"has": "^1.0.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-module": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz",
"integrity": "sha1-Mlj7afeMFNW4FdZkM2tM/7ZEFZE= sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==",
"dev": true
},
"node_modules/is-reference": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/is-reference/-/is-reference-1.2.1.tgz",
"integrity": "sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==",
"dev": true,
"dependencies": {
"@types/estree": "*"
}
},
"node_modules/magic-string": {
"version": "0.27.0",
"resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.27.0.tgz",
"integrity": "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==",
"dependencies": {
"@jridgewell/sourcemap-codec": "^1.4.13"
},
"engines": {
"node": ">=12"
}
},
"node_modules/minimatch": {
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz",
"integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==",
"dev": true,
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dev": true,
"dependencies": {
"wrappy": "1"
}
},
"node_modules/path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
"dev": true
},
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/randombytes": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
"integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
"dev": true,
"dependencies": {
"safe-buffer": "^5.1.0"
}
},
"node_modules/resolve": {
"version": "1.22.2",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz",
"integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==",
"dev": true,
"dependencies": {
"is-core-module": "^2.11.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve.exports": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz",
"integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==",
"dev": true,
"engines": {
"node": ">=10"
}
},
"node_modules/rollup": {
"version": "3.21.0",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-3.21.0.tgz",
"integrity": "sha512-ANPhVcyeHvYdQMUyCbczy33nbLzI7RzrBje4uvNiTDJGIMtlKoOStmympwr9OtS1LZxiDmE2wvxHyVhoLtf1KQ==",
"devOptional": true,
"bin": {
"rollup": "dist/bin/rollup"
},
"engines": {
"node": ">=14.18.0",
"npm": ">=8.0.0"
},
"optionalDependencies": {
"fsevents": "~2.3.2"
}
},
"node_modules/rollup-plugin-css-only": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/rollup-plugin-css-only/-/rollup-plugin-css-only-4.3.0.tgz",
"integrity": "sha512-BsiCqJJQzZh2lQiHY5irejRoJ3I1EUFHEi5PjVqsr+EmOh54YrWVwd3YZEXnQJ2+fzlhif0YM/Kf0GuH90GAdQ==",
"dev": true,
"dependencies": {
"@rollup/pluginutils": "5"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"rollup": "<4"
}
},
"node_modules/rollup-plugin-svelte": {
"version": "7.1.4",
"resolved": "https://registry.npmjs.org/rollup-plugin-svelte/-/rollup-plugin-svelte-7.1.4.tgz",
"integrity": "sha512-Jm0FCydR7k8bBGe7wimXAes8x2zEK10Ew3f3lEZwYor/Zya3X0AZVeSAPRH7yiXB9hWQVzJu597EUeNwGDTdjQ==",
"dev": true,
"dependencies": {
"@rollup/pluginutils": "^4.1.0",
"resolve.exports": "^2.0.0"
},
"engines": {
"node": ">=10"
},
"peerDependencies": {
"rollup": ">=2.0.0",
"svelte": ">=3.5.0"
}
},
"node_modules/rollup-plugin-svelte/node_modules/@rollup/pluginutils": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-4.2.1.tgz",
"integrity": "sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ==",
"dev": true,
"dependencies": {
"estree-walker": "^2.0.1",
"picomatch": "^2.2.2"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/smob": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/smob/-/smob-0.0.6.tgz",
"integrity": "sha512-V21+XeNni+tTyiST1MHsa84AQhT1aFZipzPpOFAVB8DkHzwJyjjAmt9bgwnuZiZWnIbMo2duE29wybxv/7HWUw==",
"dev": true
},
"node_modules/source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/source-map-support": {
"version": "0.5.21",
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
"integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
"dev": true,
"dependencies": {
"buffer-from": "^1.0.0",
"source-map": "^0.6.0"
}
},
"node_modules/supports-preserve-symlinks-flag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
"integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/svelte": {
"version": "3.58.0",
"resolved": "https://registry.npmjs.org/svelte/-/svelte-3.58.0.tgz",
"integrity": "sha512-brIBNNB76mXFmU/Kerm4wFnkskBbluBDCjx/8TcpYRb298Yh2dztS2kQ6bhtjMcvUhd5ynClfwpz5h2gnzdQ1A==",
"engines": {
"node": ">= 8"
}
},
"node_modules/sveltestrap": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/sveltestrap/-/sveltestrap-5.10.0.tgz",
"integrity": "sha512-k6Ob+6G2AMYvBidXHBKM9W28fJqFHbmosqCe/NC8pv6TV7K+v47Yw+zmnLWkjqCzzmjkSLkL48SrHZrlWc9mYQ==",
"dependencies": {
"@popperjs/core": "^2.9.2"
},
"peerDependencies": {
"svelte": "^3.29.0"
}
},
"node_modules/terser": {
"version": "5.17.1",
"resolved": "https://registry.npmjs.org/terser/-/terser-5.17.1.tgz",
"integrity": "sha512-hVl35zClmpisy6oaoKALOpS0rDYLxRFLHhRuDlEGTKey9qHjS1w9GMORjuwIMt70Wan4lwsLYyWDVnWgF+KUEw==",
"dev": true,
"dependencies": {
"@jridgewell/source-map": "^0.3.2",
"acorn": "^8.5.0",
"commander": "^2.20.0",
"source-map-support": "~0.5.20"
},
"bin": {
"terser": "bin/terser"
},
"engines": {
"node": ">=10"
}
},
"node_modules/uplot": {
"version": "1.6.24",
"resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.24.tgz",
"integrity": "sha512-WpH2BsrFrqxkMu+4XBvc0eCDsRBhzoq9crttYeSI0bfxpzR5YoSVzZXOKFVWcVC7sp/aDXrdDPbDZGCtck2PVg=="
},
"node_modules/wonka": {
"version": "6.3.2",
"resolved": "https://registry.npmjs.org/wonka/-/wonka-6.3.2.tgz",
"integrity": "sha512-2xXbQ1LnwNS7egVm1HPhW2FyKrekolzhpM3mCwXdQr55gO+tAiY76rhb32OL9kKsW8taj++iP7C6hxlVzbnvrw=="
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"dev": true
}
}
}

View File

@ -7,20 +7,20 @@
"dev": "rollup -c -w" "dev": "rollup -c -w"
}, },
"devDependencies": { "devDependencies": {
"@rollup/plugin-commonjs": "^17.0.0", "@rollup/plugin-commonjs": "^24.1.0",
"@rollup/plugin-node-resolve": "^11.0.0", "@rollup/plugin-node-resolve": "^15.0.2",
"rollup": "^2.3.4", "@rollup/plugin-terser": "^0.4.1",
"rollup-plugin-css-only": "^3.1.0", "rollup": "^3.21.0",
"rollup-plugin-svelte": "^7.0.0", "rollup-plugin-css-only": "^4.3.0",
"rollup-plugin-terser": "^7.0.0", "rollup-plugin-svelte": "^7.1.4",
"svelte": "^3.49.0" "svelte": "^3.58.0"
}, },
"dependencies": { "dependencies": {
"@rollup/plugin-replace": "^2.4.1", "@rollup/plugin-replace": "^5.0.2",
"@urql/svelte": "^1.3.0", "@urql/svelte": "^4.0.1",
"graphql": "^15.6.0", "graphql": "^16.6.0",
"sveltestrap": "^5.6.1", "sveltestrap": "^5.10.0",
"uplot": "^1.6.7", "uplot": "^1.6.24",
"wonka": "^4.0.15" "wonka": "^6.3.2"
} }
} }

View File

@ -1,71 +0,0 @@
import svelte from 'rollup-plugin-svelte';
import replace from "@rollup/plugin-replace";
import commonjs from '@rollup/plugin-commonjs';
import resolve from '@rollup/plugin-node-resolve';
import { terser } from 'rollup-plugin-terser';
import css from 'rollup-plugin-css-only';
// const production = !process.env.ROLLUP_WATCH;
const production = true
const plugins = [
svelte({
compilerOptions: {
// enable run-time checks when not in production
dev: !production
}
}),
// If you have external dependencies installed from
// npm, you'll most likely need these plugins. In
// some cases you'll need additional configuration -
// consult the documentation for details:
// https://github.com/rollup/plugins/tree/master/packages/commonjs
resolve({
browser: true,
dedupe: ['svelte']
}),
commonjs(),
// If we're building for production (npm run build
// instead of npm run dev), minify
production && terser(),
replace({
"process.env.NODE_ENV": JSON.stringify("development"),
preventAssignment: true
})
];
const entrypoint = (name, path) => ({
input: path,
output: {
sourcemap: false,
format: 'iife',
name: 'app',
file: `public/build/${name}.js`
},
plugins: [
...plugins,
// we'll extract any component CSS out into
// a separate file - better for performance
css({ output: `${name}.css` }),
],
watch: {
clearScreen: false
}
});
export default [
entrypoint('header', 'src/header.entrypoint.js'),
entrypoint('jobs', 'src/jobs.entrypoint.js'),
entrypoint('user', 'src/user.entrypoint.js'),
entrypoint('list', 'src/list.entrypoint.js'),
entrypoint('job', 'src/job.entrypoint.js'),
entrypoint('systems', 'src/systems.entrypoint.js'),
entrypoint('node', 'src/node.entrypoint.js'),
entrypoint('analysis', 'src/analysis.entrypoint.js'),
entrypoint('status', 'src/status.entrypoint.js'),
entrypoint('config', 'src/config.entrypoint.js')
];

View File

@ -0,0 +1,71 @@
import svelte from 'rollup-plugin-svelte';
import replace from "@rollup/plugin-replace";
import commonjs from '@rollup/plugin-commonjs';
import resolve from '@rollup/plugin-node-resolve';
import terser from '@rollup/plugin-terser';
import css from 'rollup-plugin-css-only';
const production = !process.env.ROLLUP_WATCH;
// const production = false
const plugins = [
svelte({
compilerOptions: {
// enable run-time checks when not in production
dev: !production
}
}),
// If you have external dependencies installed from
// npm, you'll most likely need these plugins. In
// some cases you'll need additional configuration -
// consult the documentation for details:
// https://github.com/rollup/plugins/tree/master/packages/commonjs
resolve({
browser: true,
dedupe: ['svelte']
}),
commonjs(),
// If we're building for production (npm run build
// instead of npm run dev), minify
production && terser(),
replace({
"process.env.NODE_ENV": JSON.stringify("development"),
preventAssignment: true
})
];
const entrypoint = (name, path) => ({
input: path,
output: {
sourcemap: false,
format: 'iife',
name: 'app',
file: `public/build/${name}.js`
},
plugins: [
...plugins,
// we'll extract any component CSS out into
// a separate file - better for performance
css({ output: `${name}.css` }),
],
watch: {
clearScreen: false
}
});
export default [
entrypoint('header', 'src/header.entrypoint.js'),
entrypoint('jobs', 'src/jobs.entrypoint.js'),
entrypoint('user', 'src/user.entrypoint.js'),
entrypoint('list', 'src/list.entrypoint.js'),
entrypoint('job', 'src/job.entrypoint.js'),
entrypoint('systems', 'src/systems.entrypoint.js'),
entrypoint('node', 'src/node.entrypoint.js'),
entrypoint('analysis', 'src/analysis.entrypoint.js'),
entrypoint('status', 'src/status.entrypoint.js'),
entrypoint('config', 'src/config.entrypoint.js')
];

View File

@ -1,7 +1,7 @@
<script> <script>
import { init } from './utils.js' import { init } from './utils.js'
import { getContext, onMount } from 'svelte' import { getContext, onMount } from 'svelte'
import { operationStore, query } from '@urql/svelte' import { queryStore, gql, getContextClient } from '@urql/svelte'
import { Row, Col, Spinner, Card, Table } from 'sveltestrap' import { Row, Col, Spinner, Card, Table } from 'sveltestrap'
import Filters from './filters/Filters.svelte' import Filters from './filters/Filters.svelte'
import PlotSelection from './PlotSelection.svelte' import PlotSelection from './PlotSelection.svelte'
@ -30,6 +30,7 @@
let rooflineMaxY let rooflineMaxY
let colWidth let colWidth
let numBins = 50 let numBins = 50
let maxY = -1
const ccconfig = getContext('cc-config') const ccconfig = getContext('cc-config')
const metricConfig = getContext('metrics') const metricConfig = getContext('metrics')
@ -44,52 +45,55 @@
console.assert(cluster != null, `This cluster could not be found: ${filterPresets.cluster}`) console.assert(cluster != null, `This cluster could not be found: ${filterPresets.cluster}`)
rooflineMaxY = cluster.subClusters.reduce((max, part) => Math.max(max, part.flopRateSimd.value), 0) rooflineMaxY = cluster.subClusters.reduce((max, part) => Math.max(max, part.flopRateSimd.value), 0)
$rooflineQuery.variables.maxY = rooflineMaxY maxY = rooflineMaxY
$rooflineQuery.context.pause = false
$rooflineQuery.reexecute()
} }
}) })
const statsQuery = operationStore(` const client = getContextClient();
query($filter: [JobFilter!]!) {
stats: jobsStatistics(filter: $filter) { $: statsQuery = queryStore({
totalJobs client: client,
shortJobs query: gql`
totalWalltime query($filters: [JobFilter!]!) {
totalCoreHours stats: jobsStatistics(filter: $filters) {
histDuration { count, value } totalJobs
histNumNodes { count, value } shortJobs
totalWalltime
totalCoreHours
histDuration { count, value }
histNumNodes { count, value }
}
topUsers: jobsCount(filter: $filters, groupBy: USER, weight: NODE_HOURS, limit: 5) { name, count }
} }
`,
variables: { filters }
})
topUsers: jobsCount(filter: $filter, groupBy: USER, weight: NODE_HOURS, limit: 5) { name, count } $: footprintsQuery = queryStore({
} client: client,
`, { filter: [] }, { pause: true }) query: gql`
query($filters: [JobFilter!]!, $metrics: [String!]!) {
footprints: jobsFootprints(filter: $filters, metrics: $metrics) {
nodehours,
metrics { metric, data }
}
}`,
variables: { filters, metrics }
})
const footprintsQuery = operationStore(` $: rooflineQuery = queryStore({
query($filter: [JobFilter!]!, $metrics: [String!]!) { client: client,
footprints: jobsFootprints(filter: $filter, metrics: $metrics) { query: gql`
nodehours, query($filters: [JobFilter!]!, $rows: Int!, $cols: Int!,
metrics { metric, data } $minX: Float!, $minY: Float!, $maxX: Float!, $maxY: Float!) {
rooflineHeatmap(filter: $filters, rows: $rows, cols: $cols,
minX: $minX, minY: $minY, maxX: $maxX, maxY: $maxY)
} }
} `,
`, { filter: [], metrics }, { pause: true }) variables: { filters, rows: 50, cols: 50, minX: 0.01, minY: 1., maxX: 1000., maxY }
$: $footprintsQuery.variables = { ...$footprintsQuery.variables, metrics } })
const rooflineQuery = operationStore(`
query($filter: [JobFilter!]!, $rows: Int!, $cols: Int!,
$minX: Float!, $minY: Float!, $maxX: Float!, $maxY: Float!) {
rooflineHeatmap(filter: $filter, rows: $rows, cols: $cols,
minX: $minX, minY: $minY, maxX: $maxX, maxY: $maxY)
}
`, {
filter: [],
rows: 50, cols: 50,
minX: 0.01, minY: 1., maxX: 1000., maxY: -1.
}, { pause: true });
query(statsQuery)
query(footprintsQuery)
query(rooflineQuery)
onMount(() => filters.update()) onMount(() => filters.update())
</script> </script>
@ -116,11 +120,7 @@
disableClusterSelection={true} disableClusterSelection={true}
startTimeQuickSelect={true} startTimeQuickSelect={true}
on:update={({ detail }) => { on:update={({ detail }) => {
$statsQuery.context.pause = false filters = detail.filters;
$statsQuery.variables = { filter: detail.filters }
$footprintsQuery.context.pause = false
$footprintsQuery.variables = { metrics, filter: detail.filters }
$rooflineQuery.variables = { ...$rooflineQuery.variables, filter: detail.filters }
}} /> }} />
</Col> </Col>
</Row> </Row>

View File

@ -134,8 +134,8 @@
cluster={clusters cluster={clusters
.find(c => c.name == $initq.data.job.cluster).subClusters .find(c => c.name == $initq.data.job.cluster).subClusters
.find(sc => sc.name == $initq.data.job.subCluster)} .find(sc => sc.name == $initq.data.job.subCluster)}
flopsAny={$jobMetrics.data.jobMetrics.find(m => m.name == 'flops_any' && m.scope == 'node').metric} flopsAny={$jobMetrics.data.jobMetrics.find(m => m.name == 'flops_any' && m.scope == 'node')}
memBw={$jobMetrics.data.jobMetrics.find(m => m.name == 'mem_bw' && m.scope == 'node').metric} /> memBw={$jobMetrics.data.jobMetrics.find(m => m.name == 'mem_bw' && m.scope == 'node')} />
</Col> </Col>
{:else} {:else}
<Col></Col> <Col></Col>

View File

@ -72,7 +72,7 @@
<UserOrProject bind:authlevel={authlevel} bind:roles={roles} on:update={({ detail }) => filters.update(detail)}/> <UserOrProject bind:authlevel={authlevel} bind:roles={roles} on:update={({ detail }) => filters.update(detail)}/>
</Col> </Col>
<Col xs="2"> <Col xs="2">
<Refresher on:reload={() => jobList.update()} /> <Refresher on:reload={() => jobList.refresh()} />
</Col> </Col>
</Row> </Row>
<br/> <br/>

View File

@ -2,64 +2,78 @@
@component List of users or projects @component List of users or projects
--> -->
<script> <script>
import { onMount } from 'svelte' import { onMount } from "svelte";
import { init } from './utils.js' import { init } from "./utils.js";
import { Row, Col, Button, Icon, Table, Card, Spinner, import {
InputGroup, Input } from 'sveltestrap' Row,
import Filters from './filters/Filters.svelte' Col,
import { operationStore, query } from '@urql/svelte'; Button,
import { scramble, scrambleNames } from './joblist/JobInfo.svelte' Icon,
Table,
Card,
Spinner,
InputGroup,
Input,
} from "sveltestrap";
import Filters from "./filters/Filters.svelte";
import { queryStore, gql, getContextClient } from "@urql/svelte";
import { scramble, scrambleNames } from "./joblist/JobInfo.svelte";
const { } = init() const {} = init();
export let type export let type;
export let filterPresets export let filterPresets;
console.assert(type == 'USER' || type == 'PROJECT', 'Invalid list type provided!') console.assert(
type == "USER" || type == "PROJECT",
"Invalid list type provided!"
);
const stats = operationStore(`query($filter: [JobFilter!]!) { const client = getContextClient();
rows: jobsStatistics(filter: $filter, groupBy: ${type}) { $: stats = queryStore({
id client: client,
name query: gql`
totalJobs query($filters: [JobFilter!]!) {
totalWalltime rows: jobsStatistics(filter: $filters, groupBy: ${type}) {
totalCoreHours id
} name
}`, { totalJobs
filter: [] totalWalltime
}, { totalCoreHours
pause: true }
}) }`,
variables: { filters }
});
query(stats) let filters;
let nameFilter = "";
let filters let sorting = { field: "totalJobs", direction: "down" };
let nameFilter = ''
let sorting = { field: 'totalJobs', direction: 'down' }
function changeSorting(event, field) { function changeSorting(event, field) {
let target = event.target let target = event.target;
while (target.tagName != 'BUTTON') while (target.tagName != "BUTTON") target = target.parentElement;
target = target.parentElement
let direction = target.children[0].className.includes('up') ? 'down' : 'up' let direction = target.children[0].className.includes("up")
target.children[0].className = `bi-sort-numeric-${direction}` ? "down"
sorting = { field, direction } : "up";
target.children[0].className = `bi-sort-numeric-${direction}`;
sorting = { field, direction };
} }
function sort(stats, sorting, nameFilter) { function sort(stats, sorting, nameFilter) {
const cmp = sorting.field == 'id' const cmp =
? (sorting.direction == 'up' sorting.field == "id"
? (a, b) => a.id < b.id ? sorting.direction == "up"
: (a, b) => a.id > b.id) ? (a, b) => a.id < b.id
: (sorting.direction == 'up' : (a, b) => a.id > b.id
: sorting.direction == "up"
? (a, b) => a[sorting.field] - b[sorting.field] ? (a, b) => a[sorting.field] - b[sorting.field]
: (a, b) => b[sorting.field] - a[sorting.field]) : (a, b) => b[sorting.field] - a[sorting.field];
return stats.filter(u => u.id.includes(nameFilter)).sort(cmp) return stats.filter((u) => u.id.includes(nameFilter)).sort(cmp);
} }
onMount(() => filters.update()) onMount(() => filters.update());
</script> </script>
<Row> <Row>
@ -68,59 +82,86 @@
<Button disabled outline> <Button disabled outline>
Search {type.toLowerCase()}s Search {type.toLowerCase()}s
</Button> </Button>
<Input bind:value={nameFilter} placeholder="Filter by {({ USER: 'username', PROJECT: 'project' })[type]}" /> <Input
bind:value={nameFilter}
placeholder="Filter by {{
USER: 'username',
PROJECT: 'project',
}[type]}"
/>
</InputGroup> </InputGroup>
</Col> </Col>
<Col xs="auto"> <Col xs="auto">
<Filters <Filters
bind:this={filters} bind:this={filters}
filterPresets={filterPresets} {filterPresets}
startTimeQuickSelect={true} startTimeQuickSelect={true}
menuText="Only {type.toLowerCase()}s with jobs that match the filters will show up" menuText="Only {type.toLowerCase()}s with jobs that match the filters will show up"
on:update={({ detail }) => { on:update={({ detail }) => {
$stats.variables = { filter: detail.filters } filters = detail.filters;
$stats.context.pause = false }}
$stats.reexecute() />
}} />
</Col> </Col>
</Row> </Row>
<Table> <Table>
<thead> <thead>
<tr> <tr>
<th scope="col"> <th scope="col">
{({ USER: 'Username', PROJECT: 'Project Name' })[type]} <!-- {({ -->
<Button color="{sorting.field == 'id' ? 'primary' : 'light'}" <!-- USER: "Username", -->
size="sm" on:click={e => changeSorting(e, 'id')}> <!-- PROJECT: "Project Name", -->
<!-- })[type]} -->
<Button
color={sorting.field == "id" ? "primary" : "light"}
size="sm"
on:click={(e) => changeSorting(e, "id")}
>
<Icon name="sort-numeric-down" /> <Icon name="sort-numeric-down" />
</Button> </Button>
</th> </th>
{#if type == 'USER'} {#if type == "USER"}
<th scope="col"> <th scope="col">
Name Name
<Button color="{sorting.field == 'name' ? 'primary' : 'light'}" <Button
size="sm" on:click={e => changeSorting(e, 'name')}> color={sorting.field == "name" ? "primary" : "light"}
size="sm"
on:click={(e) => changeSorting(e, "name")}
>
<Icon name="sort-numeric-down" /> <Icon name="sort-numeric-down" />
</Button> </Button>
</th> </th>
{/if} {/if}
<th scope="col"> <th scope="col">
Total Jobs Total Jobs
<Button color="{sorting.field == 'totalJobs' ? 'primary' : 'light'}" <Button
size="sm" on:click={e => changeSorting(e, 'totalJobs')}> color={sorting.field == "totalJobs" ? "primary" : "light"}
size="sm"
on:click={(e) => changeSorting(e, "totalJobs")}
>
<Icon name="sort-numeric-down" /> <Icon name="sort-numeric-down" />
</Button> </Button>
</th> </th>
<th scope="col"> <th scope="col">
Total Walltime Total Walltime
<Button color="{sorting.field == 'totalWalltime' ? 'primary' : 'light'}" <Button
size="sm" on:click={e => changeSorting(e, 'totalWalltime')}> color={sorting.field == "totalWalltime"
? "primary"
: "light"}
size="sm"
on:click={(e) => changeSorting(e, "totalWalltime")}
>
<Icon name="sort-numeric-down" /> <Icon name="sort-numeric-down" />
</Button> </Button>
</th> </th>
<th scope="col"> <th scope="col">
Total Core Hours Total Core Hours
<Button color="{sorting.field == 'totalCoreHours' ? 'primary' : 'light'}" <Button
size="sm" on:click={e => changeSorting(e, 'totalCoreHours')}> color={sorting.field == "totalCoreHours"
? "primary"
: "light"}
size="sm"
on:click={(e) => changeSorting(e, "totalCoreHours")}
>
<Icon name="sort-numeric-down" /> <Icon name="sort-numeric-down" />
</Button> </Button>
</th> </th>
@ -129,26 +170,36 @@
<tbody> <tbody>
{#if $stats.fetching} {#if $stats.fetching}
<tr> <tr>
<td colspan="4" style="text-align: center;"><Spinner secondary/></td> <td colspan="4" style="text-align: center;"
><Spinner secondary /></td
>
</tr> </tr>
{:else if $stats.error} {:else if $stats.error}
<tr> <tr>
<td colspan="4"><Card body color="danger" class="mb-3">{$stats.error.message}</Card></td> <td colspan="4"
><Card body color="danger" class="mb-3"
>{$stats.error.message}</Card
></td
>
</tr> </tr>
{:else if $stats.data} {:else if $stats.data}
{#each sort($stats.data.rows, sorting, nameFilter) as row (row.id)} {#each sort($stats.data.rows, sorting, nameFilter) as row (row.id)}
<tr> <tr>
<td> <td>
{#if type == 'USER'} {#if type == "USER"}
<a href="/monitoring/user/{row.id}">{scrambleNames ? scramble(row.id) : row.id}</a> <a href="/monitoring/user/{row.id}"
{:else if type == 'PROJECT'} >{scrambleNames ? scramble(row.id) : row.id}</a
<a href="/monitoring/jobs/?project={row.id}">{row.id}</a> >
{:else if type == "PROJECT"}
<a href="/monitoring/jobs/?project={row.id}"
>{row.id}</a
>
{:else} {:else}
{row.id} {row.id}
{/if} {/if}
</td> </td>
{#if type == 'USER'} {#if type == "USER"}
<td>{row?.name ? row.name : ''}</td> <td>{row?.name ? row.name : ""}</td>
{/if} {/if}
<td>{row.totalJobs}</td> <td>{row.totalJobs}</td>
<td>{row.totalWalltime}</td> <td>{row.totalWalltime}</td>
@ -156,7 +207,9 @@
</tr> </tr>
{:else} {:else}
<tr> <tr>
<td colspan="4"><i>No {type.toLowerCase()}s/jobs found</i></td> <td colspan="4"
><i>No {type.toLowerCase()}s/jobs found</i></td
>
</tr> </tr>
{/each} {/each}
{/if} {/if}

View File

@ -10,7 +10,7 @@
<script> <script>
import { Modal, ModalBody, ModalHeader, ModalFooter, Button, ListGroup } from 'sveltestrap' import { Modal, ModalBody, ModalHeader, ModalFooter, Button, ListGroup } from 'sveltestrap'
import { getContext } from 'svelte' import { getContext } from 'svelte'
import { mutation } from '@urql/svelte' import { gql, getContextClient , mutationStore } from '@urql/svelte'
export let metrics export let metrics
export let isOpen export let isOpen
@ -53,11 +53,17 @@
} }
} }
const updateConfiguration = mutation({ const client = getContextClient();
query: `mutation($name: String!, $value: String!) { const updateConfigurationMutation = ({ name, value }) => {
updateConfiguration(name: $name, value: $value) return mutationStore({
}` client: client,
}) query: gql`
mutation($name: String!, $value: String!) {
updateConfiguration(name: $name, value: $value)
}
`,
variables: { name, value }
})}
let columnHovering = null let columnHovering = null
@ -84,14 +90,15 @@
metrics = newMetricsOrder.filter(m => unorderedMetrics.includes(m)) metrics = newMetricsOrder.filter(m => unorderedMetrics.includes(m))
isOpen = false isOpen = false
updateConfiguration({ updateConfigurationMutation({
name: cluster == null ? configName : `${configName}:${cluster}`, name: cluster == null ? configName : `${configName}:${cluster}`,
value: JSON.stringify(metrics) value: JSON.stringify(metrics)
}) }).subscribe(res => {
.then(res => { if (res.fetching === false && res.error) {
if (res.error) throw res.error
console.error(res.error) // console.log('Error on subscription: ' + res.error)
}) }
})
} }
</script> </script>

View File

@ -1,7 +1,7 @@
<script> <script>
import { init } from './utils.js' import { init } from './utils.js'
import { Row, Col, InputGroup, InputGroupText, Icon, Spinner, Card } from 'sveltestrap' import { Row, Col, InputGroup, InputGroupText, Icon, Spinner, Card } from 'sveltestrap'
import { operationStore, query } from '@urql/svelte' import { queryStore, gql, getContextClient } from '@urql/svelte'
import TimeSelection from './filters/TimeSelection.svelte' import TimeSelection from './filters/TimeSelection.svelte'
import PlotTable from './PlotTable.svelte' import PlotTable from './PlotTable.svelte'
import MetricPlot from './plots/MetricPlot.svelte' import MetricPlot from './plots/MetricPlot.svelte'
@ -22,8 +22,8 @@
const ccconfig = getContext('cc-config') const ccconfig = getContext('cc-config')
const clusters = getContext('clusters') const clusters = getContext('clusters')
const client = getContextClient();
const nodesQuery = operationStore(`query($cluster: String!, $nodes: [String!], $from: Time!, $to: Time!) { const query = gql`query($cluster: String!, $nodes: [String!], $from: Time!, $to: Time!) {
nodeMetrics(cluster: $cluster, nodes: $nodes, from: $from, to: $to) { nodeMetrics(cluster: $cluster, nodes: $nodes, from: $from, to: $to) {
host host
subCluster subCluster
@ -40,14 +40,18 @@
} }
} }
} }
}`, { }`;
cluster: cluster,
nodes: [hostname],
from: from.toISOString(),
to: to.toISOString()
})
$: $nodesQuery.variables = { cluster, nodes: [hostname], from: from.toISOString(), to: to.toISOString() } $: nodesQuery = queryStore({
client: client,
query: query,
variables: {
cluster: cluster,
nodes: [hostname],
from: from.toISOString(),
to: to.toISOString(),
}
});
let metricUnits = {} let metricUnits = {}
$: if ($nodesQuery.data) { $: if ($nodesQuery.data) {
@ -59,9 +63,6 @@
} }
} }
} }
query(nodesQuery)
// $: console.log($nodesQuery?.data?.nodeMetrics[0].metrics) // $: console.log($nodesQuery?.data?.nodeMetrics[0].metrics)
</script> </script>

View File

@ -1,17 +1,22 @@
<script> <script>
import { Modal, ModalBody, ModalHeader, ModalFooter, InputGroup, import { Modal, ModalBody, ModalHeader, ModalFooter, InputGroup,
Button, ListGroup, ListGroupItem, Icon } from 'sveltestrap' Button, ListGroup, ListGroupItem, Icon } from 'sveltestrap'
import { mutation } from '@urql/svelte' import { gql, getContextClient , mutationStore } from '@urql/svelte'
export let availableMetrics export let availableMetrics
export let metricsInHistograms export let metricsInHistograms
export let metricsInScatterplots export let metricsInScatterplots
const updateConfigurationMutation = mutation({ const client = getContextClient();
query: `mutation($name: String!, $value: String!) { const updateConfigurationMutation = ({ name, value }) => {
updateConfiguration(name: $name, value: $value) return mutationStore({
}` client: client,
}) query: gql`mutation($name: String!, $value: String!) {
updateConfiguration(name: $name, value: $value)
}`,
variables: { name, value }
})
}
let isHistogramConfigOpen = false, isScatterPlotConfigOpen = false let isHistogramConfigOpen = false, isScatterPlotConfigOpen = false
let selectedMetric1 = null, selectedMetric2 = null let selectedMetric1 = null, selectedMetric2 = null
@ -20,11 +25,12 @@
updateConfigurationMutation({ updateConfigurationMutation({
name: data.name, name: data.name,
value: JSON.stringify(data.value) value: JSON.stringify(data.value)
}) }).subscribe(res => {
.then(res => { if (res.fetching === false && res.error) {
if (res.error) throw res.error
console.error(res.error) // console.log('Error on subscription: ' + res.error)
}); }
})
} }
</script> </script>

View File

@ -4,16 +4,19 @@
import Histogram from './plots/Histogram.svelte' import Histogram from './plots/Histogram.svelte'
import { Row, Col, Spinner, Card, CardHeader, CardTitle, CardBody, Table, Progress, Icon } from 'sveltestrap' import { Row, Col, Spinner, Card, CardHeader, CardTitle, CardBody, Table, Progress, Icon } from 'sveltestrap'
import { init } from './utils.js' import { init } from './utils.js'
import { operationStore, query } from '@urql/svelte' import { queryStore, gql, getContextClient } from '@urql/svelte'
const { query: initq } = init() const { query: initq } = init()
export let cluster export let cluster
let plotWidths = [], colWidth1 = 0, colWidth2 let plotWidths = [], colWidth1 = 0, colWidth2
let from = new Date(Date.now() - 5 * 60 * 1000), to = new Date(Date.now()) let from = new Date(Date.now() - 5 * 60 * 1000), to = new Date(Date.now())
const mainQuery = operationStore(`query($cluster: String!, $filter: [JobFilter!]!, $metrics: [String!], $from: Time!, $to: Time!) {
const client = getContextClient();
$: mainQuery = queryStore({
client: client,
query: gql`query($cluster: String!, $filter: [JobFilter!]!, $metrics: [String!], $from: Time!, $to: Time!) {
nodeMetrics(cluster: $cluster, metrics: $metrics, from: $from, to: $to) { nodeMetrics(cluster: $cluster, metrics: $metrics, from: $from, to: $to) {
host host
subCluster subCluster
@ -36,12 +39,11 @@
allocatedNodes(cluster: $cluster) { name, count } allocatedNodes(cluster: $cluster) { name, count }
topUsers: jobsCount(filter: $filter, groupBy: USER, weight: NODE_COUNT, limit: 10) { name, count } topUsers: jobsCount(filter: $filter, groupBy: USER, weight: NODE_COUNT, limit: 10) { name, count }
topProjects: jobsCount(filter: $filter, groupBy: PROJECT, weight: NODE_COUNT, limit: 10) { name, count } topProjects: jobsCount(filter: $filter, groupBy: PROJECT, weight: NODE_COUNT, limit: 10) { name, count }
}`, { }`,
cluster: cluster, variables: {
metrics: ['flops_any', 'mem_bw'], cluster: cluster, metrics: ['flops_any', 'mem_bw'], from: from.toISOString(), to: to.toISOString(),
from: from.toISOString(),
to: to.toISOString(),
filter: [{ state: ['running'] }, { cluster: { eq: cluster } }] filter: [{ state: ['running'] }, { cluster: { eq: cluster } }]
}
}) })
const sumUp = (data, subcluster, metric) => data.reduce((sum, node) => node.subCluster == subcluster const sumUp = (data, subcluster, metric) => data.reduce((sum, node) => node.subCluster == subcluster
@ -60,7 +62,6 @@
} }
} }
query(mainQuery)
</script> </script>
<!-- Loading indicator & Refresh --> <!-- Loading indicator & Refresh -->
@ -80,13 +81,8 @@
</Col> </Col>
<Col xs="auto" style="margin-left: auto;"> <Col xs="auto" style="margin-left: auto;">
<Refresher initially={120} on:reload={() => { <Refresher initially={120} on:reload={() => {
console.log('reload...')
from = new Date(Date.now() - 5 * 60 * 1000) from = new Date(Date.now() - 5 * 60 * 1000)
to = new Date(Date.now()) to = new Date(Date.now())
$mainQuery.variables = { ...$mainQuery.variables, from: from, to: to }
$mainQuery.reexecute({ requestPolicy: 'network-only' })
}} /> }} />
</Col> </Col>
</Row> </Row>

View File

@ -1,7 +1,7 @@
<script> <script>
import { init } from './utils.js' import { init } from './utils.js'
import { Row, Col, Input, InputGroup, InputGroupText, Icon, Spinner, Card } from 'sveltestrap' import { Row, Col, Input, InputGroup, InputGroupText, Icon, Spinner, Card } from 'sveltestrap'
import { operationStore, query } from '@urql/svelte' import { queryStore, gql, getContextClient } from '@urql/svelte'
import TimeSelection from './filters/TimeSelection.svelte' import TimeSelection from './filters/TimeSelection.svelte'
import PlotTable from './PlotTable.svelte' import PlotTable from './PlotTable.svelte'
import MetricPlot from './plots/MetricPlot.svelte' import MetricPlot from './plots/MetricPlot.svelte'
@ -27,28 +27,33 @@
let hostnameFilter = '' let hostnameFilter = ''
let selectedMetric = ccconfig.system_view_selectedMetric let selectedMetric = ccconfig.system_view_selectedMetric
const nodesQuery = operationStore(`query($cluster: String!, $metrics: [String!], $from: Time!, $to: Time!) { const client = getContextClient();
nodeMetrics(cluster: $cluster, metrics: $metrics, from: $from, to: $to) { $: nodesQuery = queryStore({
host client: client,
subCluster query: gql`query($cluster: String!, $metrics: [String!], $from: Time!, $to: Time!) {
metrics { nodeMetrics(cluster: $cluster, metrics: $metrics, from: $from, to: $to) {
name host
scope subCluster
metric { metrics {
timestep name
unit { base, prefix } scope
series { metric {
statistics { min, avg, max } timestep
data unit { base, prefix }
series {
statistics { min, avg, max }
data
}
} }
} }
} }
}`,
variables: {
cluster: cluster,
metrics: [selectedMetric],
from: from.toISOString(),
to: to.toISOString()
} }
}`, {
cluster: cluster,
metrics: [],
from: from.toISOString(),
to: to.toISOString()
}) })
let metricUnits = {} let metricUnits = {}
@ -63,9 +68,6 @@
} }
} }
$: $nodesQuery.variables = { cluster, metrics: [selectedMetric], from: from.toISOString(), to: to.toISOString() }
query(nodesQuery)
</script> </script>
<Row> <Row>

View File

@ -1,6 +1,6 @@
<script> <script>
import { getContext } from 'svelte' import { getContext } from 'svelte'
import { mutation } from '@urql/svelte' import { gql, getContextClient , mutationStore } from '@urql/svelte'
import { Icon, Button, ListGroupItem, Spinner, Modal, Input, import { Icon, Button, ListGroupItem, Spinner, Modal, Input,
ModalBody, ModalHeader, ModalFooter, Alert } from 'sveltestrap' ModalBody, ModalHeader, ModalFooter, Alert } from 'sveltestrap'
import { fuzzySearchTags } from './utils.js' import { fuzzySearchTags } from './utils.js'
@ -15,23 +15,37 @@
let pendingChange = false let pendingChange = false
let isOpen = false let isOpen = false
const createTagMutation = mutation({ const client = getContextClient();
query: `mutation($type: String!, $name: String!) {
createTag(type: $type, name: $name) { id, type, name }
}`
})
const addTagsToJobMutation = mutation({ const createTagMutation = ({ type, name }) => {
query: `mutation($job: ID!, $tagIds: [ID!]!) { return mutationStore({
addTagsToJob(job: $job, tagIds: $tagIds) { id, type, name } client: client,
}` query: gql`mutation($type: String!, $name: String!) {
}) createTag(type: $type, name: $name) { id, type, name }
}`,
variables: { type, name}
})
}
const removeTagsFromJobMutation = mutation({ const addTagsToJobMutation = ({ job, tagIds }) => {
query: `mutation($job: ID!, $tagIds: [ID!]!) { return mutationStore({
removeTagsFromJob(job: $job, tagIds: $tagIds) { id, type, name } client: client,
}` query: gql`mutation($job: ID!, $tagIds: [ID!]!) {
}) addTagsToJob(job: $job, tagIds: $tagIds) { id, type, name }
}`,
variables: {job, tagIds}
})
}
const removeTagsFromJobMutation = ({ job, tagIds }) => {
return mutationStore({
client: client,
query: gql`mutation($job: ID!, $tagIds: [ID!]!) {
removeTagsFromJob(job: $job, tagIds: $tagIds) { id, type, name }
}`,
variables: {job, tagIds}
})
}
let allTagsFiltered // $initialized is in there because when it becomes true, allTags is initailzed. let allTagsFiltered // $initialized is in there because when it becomes true, allTags is initailzed.
$: allTagsFiltered = ($initialized, fuzzySearchTags(filterTerm, allTags)) $: allTagsFiltered = ($initialized, fuzzySearchTags(filterTerm, allTags))
@ -55,43 +69,47 @@
function createTag(type, name) { function createTag(type, name) {
pendingChange = true pendingChange = true
return createTagMutation({ type: type, name: name }) createTagMutation({ type: type, name: name })
.then(res => { .subscribe(res => {
if (res.error) if (res.fetching === false && !res.error) {
throw res.error
pendingChange = false pendingChange = false
allTags = [...allTags, res.data.createTag] allTags = [...allTags, res.data.createTag]
newTagType = '' newTagType = ''
newTagName = '' newTagName = ''
return res.data.createTag addTagToJob(res.data.createTag)
}, err => console.error(err)) } else if (res.fetching === false && res.error) {
throw res.error
// console.log('Error on subscription: ' + res.error)
}
})
} }
function addTagToJob(tag) { function addTagToJob(tag) {
pendingChange = tag.id pendingChange = tag.id
addTagsToJobMutation({ job: job.id, tagIds: [tag.id] }) addTagsToJobMutation({ job: job.id, tagIds: [tag.id] })
.then(res => { .subscribe(res => {
if (res.error) if (res.fetching === false && !res.error) {
throw res.error
jobTags = job.tags = res.data.addTagsToJob; jobTags = job.tags = res.data.addTagsToJob;
pendingChange = false; pendingChange = false;
}) } else if (res.fetching === false && res.error) {
.catch(err => console.error(err)) throw res.error
// console.log('Error on subscription: ' + res.error)
}
})
} }
function removeTagFromJob(tag) { function removeTagFromJob(tag) {
pendingChange = tag.id pendingChange = tag.id
removeTagsFromJobMutation({ job: job.id, tagIds: [tag.id] }) removeTagsFromJobMutation({ job: job.id, tagIds: [tag.id] })
.then(res => { .subscribe(res => {
if (res.error) if (res.fetching === false && !res.error) {
throw res.error
jobTags = job.tags = res.data.removeTagsFromJob jobTags = job.tags = res.data.removeTagsFromJob
pendingChange = false pendingChange = false
}) } else if (res.fetching === false && res.error) {
.catch(err => console.error(err)) throw res.error
// console.log('Error on subscription: ' + res.error)
}
})
} }
</script> </script>
@ -154,8 +172,7 @@
<br/> <br/>
{#if newTagType && newTagName && isNewTag(newTagType, newTagName)} {#if newTagType && newTagName && isNewTag(newTagType, newTagName)}
<Button outline color="success" <Button outline color="success"
on:click={e => (e.preventDefault(), createTag(newTagType, newTagName)) on:click={e => (e.preventDefault(), createTag(newTagType, newTagName))}>
.then(tag => addTagToJob(tag))}>
Create & Add Tag: Create & Add Tag:
<Tag tag={({ type: newTagType, name: newTagName })} clickable={false}/> <Tag tag={({ type: newTagType, name: newTagName })} clickable={false}/>
</Button> </Button>

View File

@ -2,7 +2,7 @@
import { onMount, getContext } from 'svelte' import { onMount, getContext } from 'svelte'
import { init } from './utils.js' import { init } from './utils.js'
import { Table, Row, Col, Button, Icon, Card, Spinner } from 'sveltestrap' import { Table, Row, Col, Button, Icon, Card, Spinner } from 'sveltestrap'
import { operationStore, query } from '@urql/svelte' import { queryStore, gql, getContextClient } from '@urql/svelte'
import Filters from './filters/Filters.svelte' import Filters from './filters/Filters.svelte'
import JobList from './joblist/JobList.svelte' import JobList from './joblist/JobList.svelte'
import Sorting from './joblist/SortSelection.svelte' import Sorting from './joblist/SortSelection.svelte'
@ -25,31 +25,24 @@
let w1, w2, histogramHeight = 250 let w1, w2, histogramHeight = 250
let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null
const stats = operationStore(` const client = getContextClient();
query($filter: [JobFilter!]!) { $: stats = queryStore({
jobsStatistics(filter: $filter) { client: client,
totalJobs query: gql`
shortJobs query($filters: [JobFilter!]!) {
totalWalltime jobsStatistics(filter: $filters) {
totalCoreHours totalJobs
histDuration { count, value } shortJobs
histNumNodes { count, value } totalWalltime
} totalCoreHours
} histDuration { count, value }
`, { histNumNodes { count, value }
filter: [] }}`,
}, { variables: { filters }
pause: true
}) })
// filters[filters.findIndex(filter => filter.cluster != null)] ?
// filters[filters.findIndex(filter => filter.cluster != null)].cluster.eq :
// null
// Cluster filter has to be alwas @ first index, above will throw error
$: selectedCluster = filters[0]?.cluster ? filters[0].cluster.eq : null $: selectedCluster = filters[0]?.cluster ? filters[0].cluster.eq : null
query(stats)
onMount(() => filters.update()) onMount(() => filters.update())
</script> </script>
@ -84,15 +77,12 @@
bind:this={filters} bind:this={filters}
on:update={({ detail }) => { on:update={({ detail }) => {
let jobFilters = [...detail.filters, { user: { eq: user.username } }] let jobFilters = [...detail.filters, { user: { eq: user.username } }]
$stats.variables = { filter: jobFilters }
$stats.context.pause = false
$stats.reexecute()
filters = jobFilters filters = jobFilters
jobList.update(jobFilters) jobList.update(jobFilters)
}} /> }} />
</Col> </Col>
<Col xs="auto" style="margin-left: auto;"> <Col xs="auto" style="margin-left: auto;">
<Refresher on:reload={() => jobList.update()} /> <Refresher on:reload={() => jobList.refresh()} />
</Col> </Col>
</Row> </Row>
<br/> <br/>

View File

@ -9,83 +9,144 @@
- update(filters?: [JobFilter]) - update(filters?: [JobFilter])
--> -->
<script> <script>
import { operationStore, query, mutation } from '@urql/svelte' import {
import { getContext } from 'svelte'; queryStore,
import { Row, Table, Card, Spinner } from 'sveltestrap' gql,
import Pagination from './Pagination.svelte' getContextClient,
import JobListRow from './Row.svelte' mutationStore,
import { stickyHeader } from '../utils.js' } from "@urql/svelte";
import { getContext } from "svelte";
import { Row, Table, Card, Spinner } from "sveltestrap";
import Pagination from "./Pagination.svelte";
import JobListRow from "./Row.svelte";
import { stickyHeader } from "../utils.js";
const ccconfig = getContext('cc-config'), const ccconfig = getContext("cc-config"),
clusters = getContext('clusters'), clusters = getContext("clusters"),
initialized = getContext('initialized') initialized = getContext("initialized");
export let sorting = { field: "startTime", order: "DESC" } export let sorting = { field: "startTime", order: "DESC" };
export let matchedJobs = 0 export let matchedJobs = 0;
export let metrics = ccconfig.plot_list_selectedMetrics export let metrics = ccconfig.plot_list_selectedMetrics;
let itemsPerPage = ccconfig.plot_list_jobsPerPage let itemsPerPage = ccconfig.plot_list_jobsPerPage;
let page = 1 let page = 1;
let paging = { itemsPerPage, page } let paging = { itemsPerPage, page };
let filter = [] let filter = [];
const jobs = operationStore(` const client = getContextClient();
query($filter: [JobFilter!]!, $sorting: OrderByInput!, $paging: PageRequest! ){ const query = gql`
jobs(filter: $filter, order: $sorting, page: $paging) { query (
items { $filter: [JobFilter!]!
id, jobId, user, project, jobName, cluster, subCluster, startTime, $sorting: OrderByInput!
duration, numNodes, numHWThreads, numAcc, walltime, resources { hostname }, $paging: PageRequest!
SMT, exclusive, partition, arrayJobId, ) {
monitoringStatus, state, jobs(filter: $filter, order: $sorting, page: $paging) {
tags { id, type, name } items {
userData { name } id
metaData jobId
user
project
jobName
cluster
subCluster
startTime
duration
numNodes
numHWThreads
numAcc
walltime
resources {
hostname
}
SMT
exclusive
partition
arrayJobId
monitoringStatus
state
tags {
id
type
name
}
userData {
name
}
metaData
}
count
} }
count
} }
}`, { `;
paging,
sorting,
filter,
}, {
pause: true
})
const updateConfiguration = mutation({ $: jobs = queryStore({
query: `mutation($name: String!, $value: String!) { client: client,
updateConfiguration(name: $name, value: $value) query: query,
}` variables: { paging, sorting, filter }
}) });
$: $jobs.variables = { ...$jobs.variables, sorting, paging } $: matchedJobs = $jobs.data != null ? $jobs.data.jobs.count : 0;
$: matchedJobs = $jobs.data != null ? $jobs.data.jobs.count : 0
// Force refresh list with existing unchanged variables (== usually would not trigger reactivity)
export function refresh() {
queryStore({
client: client,
query: query,
variables: { paging, sorting, filter },
requestPolicy: 'network-only'
});
}
// (Re-)query and optionally set new filters. // (Re-)query and optionally set new filters.
export function update(filters) { export function update(filters) {
if (filters != null) { if (filters != null) {
let minRunningFor = ccconfig.plot_list_hideShortRunningJobs let minRunningFor = ccconfig.plot_list_hideShortRunningJobs;
if (minRunningFor && minRunningFor > 0) { if (minRunningFor && minRunningFor > 0) {
filters.push({ minRunningFor }) filters.push({ minRunningFor });
} }
filter = filters;
$jobs.variables.filter = filters
// console.log('filters:', ...filters.map(f => Object.entries(f)).flat(2))
} }
page = 1;
page = 1 paging = paging = { page, itemsPerPage };
$jobs.variables.paging = paging = { page, itemsPerPage };
$jobs.context.pause = false
$jobs.reexecute({ requestPolicy: 'network-only' })
} }
query(jobs) const updateConfigurationMutation = ({ name, value }) => {
return mutationStore({
client: client,
query: gql`
mutation ($name: String!, $value: String!) {
updateConfiguration(name: $name, value: $value)
}
`,
variables: { name, value }
});
}
let tableWidth = null function updateConfiguration(value, page) {
let jobInfoColumnWidth = 250 updateConfigurationMutation({ name: 'plot_list_jobsPerPage', value: value })
$: plotWidth = Math.floor((tableWidth - jobInfoColumnWidth) / metrics.length - 10) .subscribe(res => {
if (res.fetching === false && !res.error) {
paging = { itemsPerPage: value, page: page }; // Trigger reload of jobList
} else if (res.fetching === false && res.error) {
throw res.error
// console.log('Error on subscription: ' + res.error)
}
})
};
let headerPaddingTop = 0 let tableWidth = null;
stickyHeader('.cc-table-wrapper > table.table >thead > tr > th.position-sticky:nth-child(1)', (x) => (headerPaddingTop = x)) let jobInfoColumnWidth = 250;
$: plotWidth = Math.floor(
(tableWidth - jobInfoColumnWidth) / metrics.length - 10
);
let headerPaddingTop = 0;
stickyHeader(
".cc-table-wrapper > table.table >thead > tr > th.position-sticky:nth-child(1)",
(x) => (headerPaddingTop = x)
);
</script> </script>
<Row> <Row>
@ -93,20 +154,43 @@
<Table cellspacing="0px" cellpadding="0px"> <Table cellspacing="0px" cellpadding="0px">
<thead> <thead>
<tr> <tr>
<th class="position-sticky top-0" scope="col" style="width: {jobInfoColumnWidth}px; padding-top: {headerPaddingTop}px"> <th
class="position-sticky top-0"
scope="col"
style="width: {jobInfoColumnWidth}px; padding-top: {headerPaddingTop}px"
>
Job Info Job Info
</th> </th>
{#each metrics as metric (metric)} {#each metrics as metric (metric)}
<th class="position-sticky top-0 text-center" scope="col" style="width: {plotWidth}px; padding-top: {headerPaddingTop}px"> <th
class="position-sticky top-0 text-center"
scope="col"
style="width: {plotWidth}px; padding-top: {headerPaddingTop}px"
>
{metric} {metric}
{#if $initialized} {#if $initialized}
({clusters ({clusters
.map(cluster => cluster.metricConfig.find(m => m.name == metric)) .map((cluster) =>
.filter(m => m != null) cluster.metricConfig.find(
.map(m => (m.unit?.prefix?m.unit?.prefix:'') + (m.unit?.base?m.unit?.base:'')) // Build unitStr (m) => m.name == metric
.reduce((arr, unitStr) => arr.includes(unitStr) ? arr : [...arr, unitStr], []) // w/o this, output would be [unitStr, unitStr] )
.join(', ') )
}) .filter((m) => m != null)
.map(
(m) =>
(m.unit?.prefix
? m.unit?.prefix
: "") +
(m.unit?.base ? m.unit?.base : "")
) // Build unitStr
.reduce(
(arr, unitStr) =>
arr.includes(unitStr)
? arr
: [...arr, unitStr],
[]
) // w/o this, output would be [unitStr, unitStr]
.join(", ")})
{/if} {/if}
</th> </th>
{/each} {/each}
@ -115,28 +199,27 @@
<tbody> <tbody>
{#if $jobs.error} {#if $jobs.error}
<tr> <tr>
<td colspan="{metrics.length + 1}"> <td colspan={metrics.length + 1}>
<Card body color="danger" class="mb-3"><h2>{$jobs.error.message}</h2></Card> <Card body color="danger" class="mb-3"
><h2>{$jobs.error.message}</h2></Card
>
</td> </td>
</tr> </tr>
{:else if $jobs.fetching || !$jobs.data} {:else if $jobs.fetching || !$jobs.data}
<tr> <tr>
<td colspan="{metrics.length + 1}"> <td colspan={metrics.length + 1}>
<Spinner secondary /> <Spinner secondary />
</td> </td>
</tr> </tr>
{:else if $jobs.data && $initialized} {:else if $jobs.data && $initialized}
{#each $jobs.data.jobs.items as job (job)} {#each $jobs.data.jobs.items as job (job)}
<JobListRow <JobListRow {job} {metrics} {plotWidth} />
job={job}
metrics={metrics}
plotWidth={plotWidth} />
{:else} {:else}
<tr> <tr>
<td colspan="{metrics.length + 1}"> <td colspan={metrics.length + 1}>
No jobs found No jobs found
</td> </td>
</tr> </tr>
{/each} {/each}
{/if} {/if}
</tbody> </tbody>
@ -145,24 +228,21 @@
</Row> </Row>
<Pagination <Pagination
bind:page={page} bind:page
{itemsPerPage} {itemsPerPage}
itemText="Jobs" itemText="Jobs"
totalItems={matchedJobs} totalItems={matchedJobs}
on:update={({ detail }) => { on:update={({ detail }) => {
if (detail.itemsPerPage != itemsPerPage) { if (detail.itemsPerPage != itemsPerPage) {
itemsPerPage = detail.itemsPerPage updateConfiguration(
updateConfiguration({ detail.itemsPerPage.toString(),
name: "plot_list_jobsPerPage", detail.page
value: itemsPerPage.toString() )
}).then(res => { } else {
if (res.error) paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }
console.error(res.error);
})
} }
}}
paging = { itemsPerPage: detail.itemsPerPage, page: detail.page } />
}} />
<style> <style>
.cc-table-wrapper { .cc-table-wrapper {

View File

@ -9,132 +9,180 @@
--> -->
<script> <script>
import { operationStore, query } from '@urql/svelte' import { queryStore, gql, getContextClient } from "@urql/svelte";
import { getContext } from 'svelte' import { getContext } from "svelte";
import { Card, Spinner } from 'sveltestrap' import { Card, Spinner } from "sveltestrap";
import MetricPlot from '../plots/MetricPlot.svelte' import MetricPlot from "../plots/MetricPlot.svelte";
import JobInfo from './JobInfo.svelte' import JobInfo from "./JobInfo.svelte";
import { maxScope } from '../utils.js' import { maxScope } from "../utils.js";
export let job export let job;
export let metrics export let metrics;
export let plotWidth export let plotWidth;
export let plotHeight = 275 export let plotHeight = 275;
let scopes = [job.numNodes == 1 ? 'core' : 'node'] let { id } = job;
let scopes = [job.numNodes == 1 ? "core" : "node"];
const cluster = getContext('clusters').find(c => c.name == job.cluster) const cluster = getContext("clusters").find((c) => c.name == job.cluster);
// Get all MetricConfs which include subCluster-specific settings for this job const metricConfig = getContext("metrics"); // Get all MetricConfs which include subCluster-specific settings for this job
const metricConfig = getContext('metrics') const client = getContextClient();
const metricsQuery = operationStore(`query($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!) { const query = gql`
jobMetrics(id: $id, metrics: $metrics, scopes: $scopes) { query ($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!) {
name jobMetrics(id: $id, metrics: $metrics, scopes: $scopes) {
scope name
metric { scope
unit { prefix, base }, timestep metric {
statisticsSeries { min, mean, max } unit {
series { prefix
hostname, id, data base
statistics { min, avg, max } }
timestep
statisticsSeries {
min
mean
max
}
series {
hostname
id
data
statistics {
min
avg
max
}
}
} }
} }
} }
}`, { `;
id: job.id,
metrics,
scopes
})
const selectScope = (jobMetrics) => jobMetrics.reduce( $: metricsQuery = queryStore({
(a, b) => maxScope([a.scope, b.scope]) == a.scope client: client,
? (job.numNodes > 1 ? a : b) query: query,
: (job.numNodes > 1 ? b : a), jobMetrics[0]) variables: { id, metrics, scopes }
});
const sortAndSelectScope = (jobMetrics) => metrics function refresh() {
.map(function(name) { queryStore({
// Get MetricConf for this selected/requested metric client: client,
let thisConfig = metricConfig(cluster, name) query: query,
let thisSCIndex = thisConfig.subClusters.findIndex(sc => sc.name == job.subCluster) variables: { id, metrics, scopes }
// Check if Subcluster has MetricConf: If not found (index == -1), no further remove flag check required });
if (thisSCIndex >= 0) { }
// SubCluster Config present: Check if remove flag is set
if (thisConfig.subClusters[thisSCIndex].remove == true) { const selectScope = (jobMetrics) =>
// Return null data and informational flag jobMetrics.reduce(
return {removed: true, data: null} (a, b) =>
} else { maxScope([a.scope, b.scope]) == a.scope
// load and return metric, if data available ? job.numNodes > 1
let thisMetric = jobMetrics.filter(jobMetric => jobMetric.name == name) // Returns Array ? a
if (thisMetric.length > 0) { : b
return {removed: false, data: thisMetric} : job.numNodes > 1
? b
: a,
jobMetrics[0]
);
const sortAndSelectScope = (jobMetrics) =>
metrics
.map(function (name) {
// Get MetricConf for this selected/requested metric
let thisConfig = metricConfig(cluster, name);
let thisSCIndex = -1
if (thisConfig) {
thisSCIndex = thisConfig.subClusters.findIndex(
(sc) => sc.name == job.subCluster
);
};
// Check if Subcluster has MetricConf: If not found (index == -1), no further remove flag check required
if (thisSCIndex >= 0) {
// SubCluster Config present: Check if remove flag is set
if (thisConfig.subClusters[thisSCIndex].remove == true) {
// Return null data and informational flag
return { removed: true, data: null };
} else { } else {
return {removed: false, data: null} // load and return metric, if data available
let thisMetric = jobMetrics.filter(
(jobMetric) => jobMetric.name == name
); // Returns Array
if (thisMetric.length > 0) {
return { removed: false, data: thisMetric };
} else {
return { removed: false, data: null };
}
}
} else {
// No specific subCluster config: 'remove' flag not set, deemed false -> load and return metric, if data available
let thisMetric = jobMetrics.filter(
(jobMetric) => jobMetric.name == name
); // Returns Array
if (thisMetric.length > 0) {
return { removed: false, data: thisMetric };
} else {
return { removed: false, data: null };
} }
} }
} else { })
// No specific subCluster config: 'remove' flag not set, deemed false -> load and return metric, if data available .map(function (jobMetrics) {
let thisMetric = jobMetrics.filter(jobMetric => jobMetric.name == name) // Returns Array if (jobMetrics.data != null && jobMetrics.data.length > 0) {
if (thisMetric.length > 0) { return {
return {removed: false, data: thisMetric} removed: jobMetrics.removed,
data: selectScope(jobMetrics.data),
};
} else { } else {
return {removed: false, data: null} return jobMetrics;
} }
} });
})
.map(function(jobMetrics) {
if (jobMetrics.data != null && jobMetrics.data.length > 0) {
return {removed: jobMetrics.removed, data: selectScope(jobMetrics.data)}
} else {
return jobMetrics
}
})
$: metricsQuery.variables = { id: job.id, metrics, scopes } if (job.monitoringStatus) refresh();
if (job.monitoringStatus)
query(metricsQuery)
</script> </script>
<tr> <tr>
<td> <td>
<JobInfo job={job}/> <JobInfo {job} />
</td> </td>
{#if job.monitoringStatus == 0 || job.monitoringStatus == 2} {#if job.monitoringStatus == 0 || job.monitoringStatus == 2}
<td colspan="{metrics.length}"> <td colspan={metrics.length}>
<Card body color="warning">Not monitored or archiving failed</Card> <Card body color="warning">Not monitored or archiving failed</Card>
</td> </td>
{:else if $metricsQuery.fetching} {:else if $metricsQuery.fetching}
<td colspan="{metrics.length}" style="text-align: center;"> <td colspan={metrics.length} style="text-align: center;">
<Spinner secondary /> <Spinner secondary />
</td> </td>
{:else if $metricsQuery.error} {:else if $metricsQuery.error}
<td colspan="{metrics.length}"> <td colspan={metrics.length}>
<Card body color="danger" class="mb-3"> <Card body color="danger" class="mb-3">
{$metricsQuery.error.message.length > 500 {$metricsQuery.error.message.length > 500
? $metricsQuery.error.message.substring(0, 499)+'...' ? $metricsQuery.error.message.substring(0, 499) + "..."
: $metricsQuery.error.message} : $metricsQuery.error.message}
</Card> </Card>
</td> </td>
{:else} {:else}
{#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)}
<td> <td>
<!-- Subluster Metricconfig remove keyword for jobtables (joblist main, user joblist, project joblist) to be used here as toplevel case--> <!-- Subluster Metricconfig remove keyword for jobtables (joblist main, user joblist, project joblist) to be used here as toplevel case-->
{#if metric.removed == false && metric.data != null} {#if metric.removed == false && metric.data != null}
<MetricPlot <MetricPlot
width={plotWidth} width={plotWidth}
height={plotHeight} height={plotHeight}
timestep={metric.data.metric.timestep} timestep={metric.data.metric.timestep}
scope={metric.data.scope} scope={metric.data.scope}
series={metric.data.metric.series} series={metric.data.metric.series}
statisticsSeries={metric.data.metric.statisticsSeries} statisticsSeries={metric.data.metric.statisticsSeries}
metric={metric.data.name} metric={metric.data.name}
cluster={cluster} {cluster}
subCluster={job.subCluster} subCluster={job.subCluster}
isShared={(job.exclusive != 1)}/> isShared={(job.exclusive != 1)}
{:else if metric.removed == true && metric.data == null} />
<Card body color="info">Metric disabled for subcluster '{ job.subCluster }'</Card> {:else if metric.removed == true && metric.data == null}
{:else} <Card body color="info"
<Card body color="warning">Missing Data</Card> >Metric disabled for subcluster '{job.subCluster}'</Card
{/if} >
{:else}
<Card body color="warning">Missing Data</Card>
{/if}
</td> </td>
{/each} {/each}
{/if} {/if}

View File

@ -323,7 +323,7 @@
let ctx, canvasElement, prevWidth = width, prevHeight = height let ctx, canvasElement, prevWidth = width, prevHeight = height
data = data != null ? data : (flopsAny && memBw data = data != null ? data : (flopsAny && memBw
? transformData(flopsAny, memBw, colorDots) // Use Metric Object from Parent ? transformData(flopsAny.metric, memBw.metric, colorDots) // Use Metric Object from Parent
: { : {
tiles: tiles, tiles: tiles,
xLabel: 'Intensity [FLOPS/byte]', xLabel: 'Intensity [FLOPS/byte]',

View File

@ -1,8 +1,11 @@
import { expiringCacheExchange } from './cache-exchange.js' import { expiringCacheExchange } from "./cache-exchange.js";
import { initClient } from '@urql/svelte' import {
import { setContext, getContext, hasContext, onDestroy, tick } from 'svelte' Client,
import { dedupExchange, fetchExchange } from '@urql/core' setContextClient,
import { readable } from 'svelte/store' fetchExchange,
} from "@urql/svelte";
import { setContext, getContext, hasContext, onDestroy, tick } from "svelte";
import { readable } from "svelte/store";
/* /*
* Call this function only at component initialization time! * Call this function only at component initialization time!
@ -14,26 +17,29 @@ import { readable } from 'svelte/store'
* - Adds 'clusters' to the context (object with cluster names as keys) * - Adds 'clusters' to the context (object with cluster names as keys)
* - Adds 'metrics' to the context, a function that takes a cluster and metric name and returns the MetricConfig (or undefined) * - Adds 'metrics' to the context, a function that takes a cluster and metric name and returns the MetricConfig (or undefined)
*/ */
export function init(extraInitQuery = '') { export function init(extraInitQuery = "") {
const jwt = hasContext('jwt') const jwt = hasContext("jwt")
? getContext('jwt') ? getContext("jwt")
: getContext('cc-config')['jwt'] : getContext("cc-config")["jwt"];
const client = initClient({ const client = new Client({
url: `${window.location.origin}/query`, url: `${window.location.origin}/query`,
fetchOptions: jwt != null fetchOptions:
? { headers: { 'Authorization': `Bearer ${jwt}` } } : {}, jwt != null ? { headers: { Authorization: `Bearer ${jwt}` } } : {},
exchanges: [ exchanges: [
dedupExchange,
expiringCacheExchange({ expiringCacheExchange({
ttl: 5 * 60 * 1000, ttl: 5 * 60 * 1000,
maxSize: 150, maxSize: 150,
}), }),
fetchExchange fetchExchange,
] ],
}) });
const query = client.query(`query { setContextClient(client);
const query = client
.query(
`query {
clusters { clusters {
name, name,
metricConfig { metricConfig {
@ -61,227 +67,247 @@ export function init(extraInitQuery = '') {
} }
tags { id, name, type } tags { id, name, type }
${extraInitQuery} ${extraInitQuery}
}`).toPromise() }`
)
.toPromise();
let state = { fetching: true, error: null, data: null } let state = { fetching: true, error: null, data: null };
let subscribers = [] let subscribers = [];
const subscribe = (callback) => { const subscribe = (callback) => {
callback(state) callback(state);
subscribers.push(callback) subscribers.push(callback);
return () => { return () => {
subscribers = subscribers.filter(cb => cb != callback) subscribers = subscribers.filter((cb) => cb != callback);
} };
}; };
const tags = [], clusters = [] const tags = [],
setContext('tags', tags) clusters = [];
setContext('clusters', clusters) setContext("tags", tags);
setContext('metrics', (cluster, metric) => { setContext("clusters", clusters);
if (typeof cluster !== 'object') setContext("metrics", (cluster, metric) => {
cluster = clusters.find(c => c.name == cluster) if (typeof cluster !== "object")
cluster = clusters.find((c) => c.name == cluster);
return cluster.metricConfig.find(m => m.name == metric) return cluster.metricConfig.find((m) => m.name == metric);
}) });
setContext('on-init', callback => state.fetching setContext("on-init", (callback) =>
? subscribers.push(callback) state.fetching ? subscribers.push(callback) : callback(state)
: callback(state)) );
setContext('initialized', readable(false, (set) => setContext(
subscribers.push(() => set(true)))) "initialized",
readable(false, (set) => subscribers.push(() => set(true)))
);
query.then(({ error, data }) => { query.then(({ error, data }) => {
state.fetching = false state.fetching = false;
if (error != null) { if (error != null) {
console.error(error) console.error(error);
state.error = error state.error = error;
tick().then(() => subscribers.forEach(cb => cb(state))) tick().then(() => subscribers.forEach((cb) => cb(state)));
return return;
} }
for (let tag of data.tags) for (let tag of data.tags) tags.push(tag);
tags.push(tag)
for (let cluster of data.clusters) for (let cluster of data.clusters) clusters.push(cluster);
clusters.push(cluster)
state.data = data state.data = data;
tick().then(() => subscribers.forEach(cb => cb(state))) tick().then(() => subscribers.forEach((cb) => cb(state)));
}) });
return { return {
query: { subscribe }, query: { subscribe },
tags, tags,
clusters, clusters,
} };
} }
export function formatNumber(x) { export function formatNumber(x) {
let suffix = '' let suffix = "";
if (x >= 1000000000) { if (x >= 1000000000) {
x /= 1000000 x /= 1000000;
suffix = 'G' suffix = "G";
} else if (x >= 1000000) { } else if (x >= 1000000) {
x /= 1000000 x /= 1000000;
suffix = 'M' suffix = "M";
} else if (x >= 1000) { } else if (x >= 1000) {
x /= 1000 x /= 1000;
suffix = 'k' suffix = "k";
} }
return `${(Math.round(x * 100) / 100)} ${suffix}` return `${Math.round(x * 100) / 100} ${suffix}`;
} }
// Use https://developer.mozilla.org/en-US/docs/Web/API/structuredClone instead? // Use https://developer.mozilla.org/en-US/docs/Web/API/structuredClone instead?
export function deepCopy(x) { export function deepCopy(x) {
return JSON.parse(JSON.stringify(x)) return JSON.parse(JSON.stringify(x));
} }
function fuzzyMatch(term, string) { function fuzzyMatch(term, string) {
return string.toLowerCase().includes(term) return string.toLowerCase().includes(term);
} }
export function fuzzySearchTags(term, tags) { export function fuzzySearchTags(term, tags) {
if (!tags) if (!tags) return [];
return []
let results = [] let results = [];
let termparts = term.split(':').map(s => s.trim()).filter(s => s.length > 0) let termparts = term
.split(":")
.map((s) => s.trim())
.filter((s) => s.length > 0);
if (termparts.length == 0) { if (termparts.length == 0) {
results = tags.slice() results = tags.slice();
} else if (termparts.length == 1) { } else if (termparts.length == 1) {
for (let tag of tags) for (let tag of tags)
if (fuzzyMatch(termparts[0], tag.type) if (
|| fuzzyMatch(termparts[0], tag.name)) fuzzyMatch(termparts[0], tag.type) ||
results.push(tag) fuzzyMatch(termparts[0], tag.name)
)
results.push(tag);
} else if (termparts.length == 2) { } else if (termparts.length == 2) {
for (let tag of tags) for (let tag of tags)
if (fuzzyMatch(termparts[0], tag.type) if (
&& fuzzyMatch(termparts[1], tag.name)) fuzzyMatch(termparts[0], tag.type) &&
results.push(tag) fuzzyMatch(termparts[1], tag.name)
)
results.push(tag);
} }
return results.sort((a, b) => { return results.sort((a, b) => {
if (a.type < b.type) return -1 if (a.type < b.type) return -1;
if (a.type > b.type) return 1 if (a.type > b.type) return 1;
if (a.name < b.name) return -1 if (a.name < b.name) return -1;
if (a.name > b.name) return 1 if (a.name > b.name) return 1;
return 0 return 0;
}) });
} }
export function groupByScope(jobMetrics) { export function groupByScope(jobMetrics) {
let metrics = new Map() let metrics = new Map();
for (let metric of jobMetrics) { for (let metric of jobMetrics) {
if (metrics.has(metric.name)) if (metrics.has(metric.name)) metrics.get(metric.name).push(metric);
metrics.get(metric.name).push(metric) else metrics.set(metric.name, [metric]);
else
metrics.set(metric.name, [metric])
} }
return [...metrics.values()].sort((a, b) => a[0].name.localeCompare(b[0].name)) return [...metrics.values()].sort((a, b) =>
a[0].name.localeCompare(b[0].name)
);
} }
const scopeGranularity = { const scopeGranularity = {
"node": 10, node: 10,
"socket": 5, socket: 5,
"accelerator": 5, accelerator: 5,
"core": 2, core: 2,
"hwthread": 1 hwthread: 1,
}; };
export function maxScope(scopes) { export function maxScope(scopes) {
console.assert(scopes.length > 0 && scopes.every(x => scopeGranularity[x] != null)) console.assert(
let sm = scopes[0], gran = scopeGranularity[scopes[0]] scopes.length > 0 && scopes.every((x) => scopeGranularity[x] != null)
);
let sm = scopes[0],
gran = scopeGranularity[scopes[0]];
for (let scope of scopes) { for (let scope of scopes) {
let otherGran = scopeGranularity[scope] let otherGran = scopeGranularity[scope];
if (otherGran > gran) { if (otherGran > gran) {
sm = scope sm = scope;
gran = otherGran gran = otherGran;
} }
} }
return sm return sm;
} }
export function minScope(scopes) { export function minScope(scopes) {
console.assert(scopes.length > 0 && scopes.every(x => scopeGranularity[x] != null)) console.assert(
let sm = scopes[0], gran = scopeGranularity[scopes[0]] scopes.length > 0 && scopes.every((x) => scopeGranularity[x] != null)
);
let sm = scopes[0],
gran = scopeGranularity[scopes[0]];
for (let scope of scopes) { for (let scope of scopes) {
let otherGran = scopeGranularity[scope] let otherGran = scopeGranularity[scope];
if (otherGran < gran) { if (otherGran < gran) {
sm = scope sm = scope;
gran = otherGran gran = otherGran;
} }
} }
return sm return sm;
} }
export async function fetchMetrics(job, metrics, scopes) { export async function fetchMetrics(job, metrics, scopes) {
if (job.monitoringStatus == 0) if (job.monitoringStatus == 0) return null;
return null
let query = [] let query = [];
if (metrics != null) { if (metrics != null) {
for (let metric of metrics) { for (let metric of metrics) {
query.push(`metric=${metric}`) query.push(`metric=${metric}`);
} }
} }
if (scopes != null) { if (scopes != null) {
for (let scope of scopes) { for (let scope of scopes) {
query.push(`scope=${scope}`) query.push(`scope=${scope}`);
} }
} }
try { try {
let res = await fetch(`/api/jobs/metrics/${job.id}${(query.length > 0) ? '?' : ''}${query.join('&')}`) let res = await fetch(
`/api/jobs/metrics/${job.id}${query.length > 0 ? "?" : ""}${query.join(
"&"
)}`
);
if (res.status != 200) { if (res.status != 200) {
return { error: { status: res.status, message: await res.text() } } return { error: { status: res.status, message: await res.text() } };
} }
return await res.json() return await res.json();
} catch (e) { } catch (e) {
return { error: e } return { error: e };
} }
} }
export function fetchMetricsStore() { export function fetchMetricsStore() {
let set = null let set = null;
let prev = { fetching: true, error: null, data: null } let prev = { fetching: true, error: null, data: null };
return [ return [
readable(prev, (_set) => { set = _set }), readable(prev, (_set) => {
(job, metrics, scopes) => fetchMetrics(job, metrics, scopes).then(res => { set = _set;
let next = { fetching: false, error: res.error, data: res.data } }),
if (prev.data && next.data) (job, metrics, scopes) =>
next.data.jobMetrics.push(...prev.data.jobMetrics) fetchMetrics(job, metrics, scopes).then((res) => {
let next = { fetching: false, error: res.error, data: res.data };
if (prev.data && next.data)
next.data.jobMetrics.push(...prev.data.jobMetrics);
prev = next prev = next;
set(next) set(next);
}) }),
] ];
} }
export function stickyHeader(datatableHeaderSelector, updatePading) { export function stickyHeader(datatableHeaderSelector, updatePading) {
const header = document.querySelector('header > nav.navbar') const header = document.querySelector("header > nav.navbar");
if (!header) if (!header) return;
return
let ticking = false, datatableHeader = null let ticking = false,
const onscroll = event => { datatableHeader = null;
if (ticking) const onscroll = (event) => {
return if (ticking) return;
ticking = true ticking = true;
window.requestAnimationFrame(() => { window.requestAnimationFrame(() => {
ticking = false ticking = false;
if (!datatableHeader) if (!datatableHeader)
datatableHeader = document.querySelector(datatableHeaderSelector) datatableHeader = document.querySelector(datatableHeaderSelector);
const top = datatableHeader.getBoundingClientRect().top const top = datatableHeader.getBoundingClientRect().top;
updatePading(top < header.clientHeight updatePading(
? (header.clientHeight - top) + 10 top < header.clientHeight ? header.clientHeight - top + 10 : 10
: 10) );
}) });
} };
document.addEventListener('scroll', onscroll) document.addEventListener("scroll", onscroll);
onDestroy(() => document.removeEventListener('scroll', onscroll)) onDestroy(() => document.removeEventListener("scroll", onscroll));
} }

View File

@ -1,534 +0,0 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@babel/code-frame@^7.10.4":
version "7.16.0"
resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.16.0.tgz#0dfc80309beec8411e65e706461c408b0bb9b431"
integrity sha512-IF4EOMEV+bfYwOmNxGzSnjR2EmQod7f1UXOpZM3l4i4o4QNwzjtJAu/HxdjHq0aYBvdqMuQEY1eg0nqW9ZPORA==
dependencies:
"@babel/highlight" "^7.16.0"
"@babel/helper-validator-identifier@^7.15.7":
version "7.15.7"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.15.7.tgz#220df993bfe904a4a6b02ab4f3385a5ebf6e2389"
integrity sha512-K4JvCtQqad9OY2+yTU8w+E82ywk/fe+ELNlt1G8z3bVGlZfn/hOcQQsUhGhW/N+tb3fxK800wLtKOE/aM0m72w==
"@babel/highlight@^7.16.0":
version "7.16.0"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.16.0.tgz#6ceb32b2ca4b8f5f361fb7fd821e3fddf4a1725a"
integrity sha512-t8MH41kUQylBtu2+4IQA3atqevA2lRgqA2wyVB/YiWmsDSuylZZuXOUy9ric30hfzauEFfdsuk/eXTRrGrfd0g==
dependencies:
"@babel/helper-validator-identifier" "^7.15.7"
chalk "^2.0.0"
js-tokens "^4.0.0"
"@graphql-typed-document-node/core@^3.1.0":
version "3.1.1"
resolved "https://registry.yarnpkg.com/@graphql-typed-document-node/core/-/core-3.1.1.tgz#076d78ce99822258cf813ecc1e7fa460fa74d052"
integrity sha512-NQ17ii0rK1b34VZonlmT2QMJFI70m0TRwbknO/ihlbatXyaktDhN/98vBiUU6kNBPljqGqyIrl2T4nY2RpFANg==
"@jridgewell/gen-mapping@^0.3.0":
version "0.3.2"
resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9"
integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==
dependencies:
"@jridgewell/set-array" "^1.0.1"
"@jridgewell/sourcemap-codec" "^1.4.10"
"@jridgewell/trace-mapping" "^0.3.9"
"@jridgewell/resolve-uri@^3.0.3":
version "3.1.0"
resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78"
integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==
"@jridgewell/set-array@^1.0.1":
version "1.1.2"
resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72"
integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==
"@jridgewell/source-map@^0.3.2":
version "0.3.2"
resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.2.tgz#f45351aaed4527a298512ec72f81040c998580fb"
integrity sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw==
dependencies:
"@jridgewell/gen-mapping" "^0.3.0"
"@jridgewell/trace-mapping" "^0.3.9"
"@jridgewell/sourcemap-codec@^1.4.10":
version "1.4.14"
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
"@jridgewell/trace-mapping@^0.3.9":
version "0.3.14"
resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.14.tgz#b231a081d8f66796e475ad588a1ef473112701ed"
integrity sha512-bJWEfQ9lPTvm3SneWwRFVLzrh6nhjwqw7TUFFBEMzwvg7t7PCDenf2lDwqo4NQXzdpgBXyFgDWnQA+2vkruksQ==
dependencies:
"@jridgewell/resolve-uri" "^3.0.3"
"@jridgewell/sourcemap-codec" "^1.4.10"
"@popperjs/core@^2.9.2":
version "2.11.0"
resolved "https://registry.yarnpkg.com/@popperjs/core/-/core-2.11.0.tgz#6734f8ebc106a0860dff7f92bf90df193f0935d7"
integrity sha512-zrsUxjLOKAzdewIDRWy9nsV1GQsKBCWaGwsZQlCgr6/q+vjyZhFgqedLfFBuI9anTPEUT4APq9Mu0SZBTzIcGQ==
"@rollup/plugin-commonjs@^17.0.0":
version "17.1.0"
resolved "https://registry.yarnpkg.com/@rollup/plugin-commonjs/-/plugin-commonjs-17.1.0.tgz#757ec88737dffa8aa913eb392fade2e45aef2a2d"
integrity sha512-PoMdXCw0ZyvjpCMT5aV4nkL0QywxP29sODQsSGeDpr/oI49Qq9tRtAsb/LbYbDzFlOydVEqHmmZWFtXJEAX9ew==
dependencies:
"@rollup/pluginutils" "^3.1.0"
commondir "^1.0.1"
estree-walker "^2.0.1"
glob "^7.1.6"
is-reference "^1.2.1"
magic-string "^0.25.7"
resolve "^1.17.0"
"@rollup/plugin-node-resolve@^11.0.0":
version "11.2.1"
resolved "https://registry.yarnpkg.com/@rollup/plugin-node-resolve/-/plugin-node-resolve-11.2.1.tgz#82aa59397a29cd4e13248b106e6a4a1880362a60"
integrity sha512-yc2n43jcqVyGE2sqV5/YCmocy9ArjVAP/BeXyTtADTBBX6V0e5UMqwO8CdQ0kzjb6zu5P1qMzsScCMRvE9OlVg==
dependencies:
"@rollup/pluginutils" "^3.1.0"
"@types/resolve" "1.17.1"
builtin-modules "^3.1.0"
deepmerge "^4.2.2"
is-module "^1.0.0"
resolve "^1.19.0"
"@rollup/plugin-replace@^2.4.1":
version "2.4.2"
resolved "https://registry.yarnpkg.com/@rollup/plugin-replace/-/plugin-replace-2.4.2.tgz#a2d539314fbc77c244858faa523012825068510a"
integrity sha512-IGcu+cydlUMZ5En85jxHH4qj2hta/11BHq95iHEyb2sbgiN0eCdzvUcHw5gt9pBL5lTi4JDYJ1acCoMGpTvEZg==
dependencies:
"@rollup/pluginutils" "^3.1.0"
magic-string "^0.25.7"
"@rollup/pluginutils@4":
version "4.1.1"
resolved "https://registry.yarnpkg.com/@rollup/pluginutils/-/pluginutils-4.1.1.tgz#1d4da86dd4eded15656a57d933fda2b9a08d47ec"
integrity sha512-clDjivHqWGXi7u+0d2r2sBi4Ie6VLEAzWMIkvJLnDmxoOhBYOTfzGbOQBA32THHm11/LiJbd01tJUpJsbshSWQ==
dependencies:
estree-walker "^2.0.1"
picomatch "^2.2.2"
"@rollup/pluginutils@^3.1.0":
version "3.1.0"
resolved "https://registry.yarnpkg.com/@rollup/pluginutils/-/pluginutils-3.1.0.tgz#706b4524ee6dc8b103b3c995533e5ad680c02b9b"
integrity sha512-GksZ6pr6TpIjHm8h9lSQ8pi8BE9VeubNT0OMJ3B5uZJ8pz73NPiqOtCog/x2/QzM1ENChPKxMDhiQuRHsqc+lg==
dependencies:
"@types/estree" "0.0.39"
estree-walker "^1.0.1"
picomatch "^2.2.2"
"@types/estree@*":
version "0.0.50"
resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.50.tgz#1e0caa9364d3fccd2931c3ed96fdbeaa5d4cca83"
integrity sha512-C6N5s2ZFtuZRj54k2/zyRhNDjJwwcViAM3Nbm8zjBpbqAdZ00mr0CFxvSKeO8Y/e03WVFLpQMdHYVfUd6SB+Hw==
"@types/estree@0.0.39":
version "0.0.39"
resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.39.tgz#e177e699ee1b8c22d23174caaa7422644389509f"
integrity sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw==
"@types/node@*":
version "16.11.12"
resolved "https://registry.yarnpkg.com/@types/node/-/node-16.11.12.tgz#ac7fb693ac587ee182c3780c26eb65546a1a3c10"
integrity sha512-+2Iggwg7PxoO5Kyhvsq9VarmPbIelXP070HMImEpbtGCoyWNINQj4wzjbQCXzdHTRXnqufutJb5KAURZANNBAw==
"@types/resolve@1.17.1":
version "1.17.1"
resolved "https://registry.yarnpkg.com/@types/resolve/-/resolve-1.17.1.tgz#3afd6ad8967c77e4376c598a82ddd58f46ec45d6"
integrity sha512-yy7HuzQhj0dhGpD8RLXSZWEkLsV9ibvxvi6EiJ3bkqLAO1RGo0WbkWQiwpRlSFymTJRz0d3k5LM3kkx8ArDbLw==
dependencies:
"@types/node" "*"
"@urql/core@^2.3.4":
version "2.3.5"
resolved "https://registry.yarnpkg.com/@urql/core/-/core-2.3.5.tgz#eb1cbbfe23236615ecb8e65850bb772d4f61b6b5"
integrity sha512-kM/um4OjXmuN6NUS/FSm7dESEKWT7By1kCRCmjvU4+4uEoF1cd4TzIhQ7J1I3zbDAFhZzmThq9X0AHpbHAn3bA==
dependencies:
"@graphql-typed-document-node/core" "^3.1.0"
wonka "^4.0.14"
"@urql/svelte@^1.3.0":
version "1.3.2"
resolved "https://registry.yarnpkg.com/@urql/svelte/-/svelte-1.3.2.tgz#7fc16253a36669dddec39755fc9c31077a9c279a"
integrity sha512-L/fSKb+jTrxfeKbnA4+7T69sL0XlzMv4d9i0j9J+fCkBCpUOGgPsYzsyBttbVbjrlaw61Wrc6J2NKuokrd570w==
dependencies:
"@urql/core" "^2.3.4"
wonka "^4.0.14"
acorn@^8.5.0:
version "8.8.0"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.0.tgz#88c0187620435c7f6015803f5539dae05a9dbea8"
integrity sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==
ansi-styles@^3.2.1:
version "3.2.1"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d"
integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==
dependencies:
color-convert "^1.9.0"
balanced-match@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
brace-expansion@^1.1.7:
version "1.1.11"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd"
integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
dependencies:
balanced-match "^1.0.0"
concat-map "0.0.1"
buffer-from@^1.0.0:
version "1.1.2"
resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5"
integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
builtin-modules@^3.1.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-3.2.0.tgz#45d5db99e7ee5e6bc4f362e008bf917ab5049887"
integrity sha512-lGzLKcioL90C7wMczpkY0n/oART3MbBa8R9OFGE1rJxoVI86u4WAGfEk8Wjv10eKSyTHVGkSo3bvBylCEtk7LA==
chalk@^2.0.0:
version "2.4.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424"
integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
dependencies:
ansi-styles "^3.2.1"
escape-string-regexp "^1.0.5"
supports-color "^5.3.0"
color-convert@^1.9.0:
version "1.9.3"
resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8"
integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==
dependencies:
color-name "1.1.3"
color-name@1.1.3:
version "1.1.3"
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25"
integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=
commander@^2.20.0:
version "2.20.3"
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
commondir@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b"
integrity sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=
concat-map@0.0.1:
version "0.0.1"
resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==
deepmerge@^4.2.2:
version "4.2.2"
resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.2.2.tgz#44d2ea3679b8f4d4ffba33f03d865fc1e7bf4955"
integrity sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==
escape-string-regexp@^1.0.5:
version "1.0.5"
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=
estree-walker@^0.6.1:
version "0.6.1"
resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-0.6.1.tgz#53049143f40c6eb918b23671d1fe3219f3a1b362"
integrity sha512-SqmZANLWS0mnatqbSfRP5g8OXZC12Fgg1IwNtLsyHDzJizORW4khDfjPqJZsemPWBB2uqykUah5YpQ6epsqC/w==
estree-walker@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-1.0.1.tgz#31bc5d612c96b704106b477e6dd5d8aa138cb700"
integrity sha512-1fMXF3YP4pZZVozF8j/ZLfvnR8NSIljt56UhbZ5PeeDmmGHpgpdwQt7ITlGvYaQukCvuBRMLEiKiYC+oeIg4cg==
estree-walker@^2.0.1:
version "2.0.2"
resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-2.0.2.tgz#52f010178c2a4c117a7757cfe942adb7d2da4cac"
integrity sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==
fs.realpath@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8=
fsevents@~2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
function-bind@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
glob@^7.1.6:
version "7.2.0"
resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023"
integrity sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==
dependencies:
fs.realpath "^1.0.0"
inflight "^1.0.4"
inherits "2"
minimatch "^3.0.4"
once "^1.3.0"
path-is-absolute "^1.0.0"
graphql@^15.6.0:
version "15.8.0"
resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.8.0.tgz#33410e96b012fa3bdb1091cc99a94769db212b38"
integrity sha512-5gghUc24tP9HRznNpV2+FIoq3xKkj5dTQqf4v0CpdPbFVwFkWoxOM+o+2OC9ZSvjEMTjfmG9QT+gcvggTwW1zw==
has-flag@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0=
has-flag@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
has@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796"
integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==
dependencies:
function-bind "^1.1.1"
inflight@^1.0.4:
version "1.0.6"
resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=
dependencies:
once "^1.3.0"
wrappy "1"
inherits@2:
version "2.0.4"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
is-core-module@^2.2.0:
version "2.8.0"
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.8.0.tgz#0321336c3d0925e497fd97f5d95cb114a5ccd548"
integrity sha512-vd15qHsaqrRL7dtH6QNuy0ndJmRDrS9HAM1CAiSifNUFv4x1a0CCVsj18hJ1mShxIG6T2i1sO78MkP56r0nYRw==
dependencies:
has "^1.0.3"
is-module@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/is-module/-/is-module-1.0.0.tgz#3258fb69f78c14d5b815d664336b4cffb6441591"
integrity sha1-Mlj7afeMFNW4FdZkM2tM/7ZEFZE=
is-reference@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/is-reference/-/is-reference-1.2.1.tgz#8b2dac0b371f4bc994fdeaba9eb542d03002d0b7"
integrity sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==
dependencies:
"@types/estree" "*"
jest-worker@^26.2.1:
version "26.6.2"
resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-26.6.2.tgz#7f72cbc4d643c365e27b9fd775f9d0eaa9c7a8ed"
integrity sha512-KWYVV1c4i+jbMpaBC+U++4Va0cp8OisU185o73T1vo99hqi7w8tSJfUXYswwqqrjzwxa6KpRK54WhPvwf5w6PQ==
dependencies:
"@types/node" "*"
merge-stream "^2.0.0"
supports-color "^7.0.0"
js-tokens@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499"
integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==
magic-string@^0.25.7:
version "0.25.7"
resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.25.7.tgz#3f497d6fd34c669c6798dcb821f2ef31f5445051"
integrity sha512-4CrMT5DOHTDk4HYDlzmwu4FVCcIYI8gauveasrdCu2IKIFOJ3f0v/8MDGJCDL9oD2ppz/Av1b0Nj345H9M+XIA==
dependencies:
sourcemap-codec "^1.4.4"
merge-stream@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60"
integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==
minimatch@^3.0.4:
version "3.1.2"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b"
integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
dependencies:
brace-expansion "^1.1.7"
once@^1.3.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E=
dependencies:
wrappy "1"
path-is-absolute@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18=
path-parse@^1.0.6:
version "1.0.7"
resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735"
integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
picomatch@^2.2.2:
version "2.3.0"
resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.0.tgz#f1f061de8f6a4bf022892e2d128234fb98302972"
integrity sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==
randombytes@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a"
integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==
dependencies:
safe-buffer "^5.1.0"
require-relative@^0.8.7:
version "0.8.7"
resolved "https://registry.yarnpkg.com/require-relative/-/require-relative-0.8.7.tgz#7999539fc9e047a37928fa196f8e1563dabd36de"
integrity sha1-eZlTn8ngR6N5KPoZb44VY9q9Nt4=
resolve@^1.17.0, resolve@^1.19.0:
version "1.20.0"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975"
integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==
dependencies:
is-core-module "^2.2.0"
path-parse "^1.0.6"
rollup-plugin-css-only@^3.1.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/rollup-plugin-css-only/-/rollup-plugin-css-only-3.1.0.tgz#6a701cc5b051c6b3f0961e69b108a9a118e1b1df"
integrity sha512-TYMOE5uoD76vpj+RTkQLzC9cQtbnJNktHPB507FzRWBVaofg7KhIqq1kGbcVOadARSozWF883Ho9KpSPKH8gqA==
dependencies:
"@rollup/pluginutils" "4"
rollup-plugin-svelte@^7.0.0:
version "7.1.0"
resolved "https://registry.yarnpkg.com/rollup-plugin-svelte/-/rollup-plugin-svelte-7.1.0.tgz#d45f2b92b1014be4eb46b55aa033fb9a9c65f04d"
integrity sha512-vopCUq3G+25sKjwF5VilIbiY6KCuMNHP1PFvx2Vr3REBNMDllKHFZN2B9jwwC+MqNc3UPKkjXnceLPEjTjXGXg==
dependencies:
require-relative "^0.8.7"
rollup-pluginutils "^2.8.2"
rollup-plugin-terser@^7.0.0:
version "7.0.2"
resolved "https://registry.yarnpkg.com/rollup-plugin-terser/-/rollup-plugin-terser-7.0.2.tgz#e8fbba4869981b2dc35ae7e8a502d5c6c04d324d"
integrity sha512-w3iIaU4OxcF52UUXiZNsNeuXIMDvFrr+ZXK6bFZ0Q60qyVfq4uLptoS4bbq3paG3x216eQllFZX7zt6TIImguQ==
dependencies:
"@babel/code-frame" "^7.10.4"
jest-worker "^26.2.1"
serialize-javascript "^4.0.0"
terser "^5.0.0"
rollup-pluginutils@^2.8.2:
version "2.8.2"
resolved "https://registry.yarnpkg.com/rollup-pluginutils/-/rollup-pluginutils-2.8.2.tgz#72f2af0748b592364dbd3389e600e5a9444a351e"
integrity sha512-EEp9NhnUkwY8aif6bxgovPHMoMoNr2FulJziTndpt5H9RdwC47GSGuII9XxpSdzVGM0GWrNPHV6ie1LTNJPaLQ==
dependencies:
estree-walker "^0.6.1"
rollup@^2.3.4:
version "2.61.0"
resolved "https://registry.yarnpkg.com/rollup/-/rollup-2.61.0.tgz#ccd927bcd6cc0c78a4689c918627a717977208f4"
integrity sha512-teQ+T1mUYbyvGyUavCodiyA9hD4DxwYZJwr/qehZGhs1Z49vsmzelMVYMxGU4ZhGRKxYPupHuz5yzm/wj7VpWA==
optionalDependencies:
fsevents "~2.3.2"
safe-buffer@^5.1.0:
version "5.2.1"
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
serialize-javascript@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-4.0.0.tgz#b525e1238489a5ecfc42afacc3fe99e666f4b1aa"
integrity sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==
dependencies:
randombytes "^2.1.0"
source-map-support@~0.5.20:
version "0.5.21"
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f"
integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==
dependencies:
buffer-from "^1.0.0"
source-map "^0.6.0"
source-map@^0.6.0:
version "0.6.1"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
sourcemap-codec@^1.4.4:
version "1.4.8"
resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4"
integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==
supports-color@^5.3.0:
version "5.5.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f"
integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==
dependencies:
has-flag "^3.0.0"
supports-color@^7.0.0:
version "7.2.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da"
integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==
dependencies:
has-flag "^4.0.0"
svelte@^3.49.0:
version "3.49.0"
resolved "https://registry.yarnpkg.com/svelte/-/svelte-3.49.0.tgz#5baee3c672306de1070c3b7888fc2204e36a4029"
integrity sha512-+lmjic1pApJWDfPCpUUTc1m8azDqYCG1JN9YEngrx/hUyIcFJo6VZhj0A1Ai0wqoHcEIuQy+e9tk+4uDgdtsFA==
sveltestrap@^5.6.1:
version "5.6.3"
resolved "https://registry.yarnpkg.com/sveltestrap/-/sveltestrap-5.6.3.tgz#afb81b00d0b378719988e5339f92254dce41194f"
integrity sha512-/geTKJbPmJGzwHFKYC3NkUNDk/GKxrppgdSxcg58w/qcxs0S6RiN4PaQ1tgBKsdSrZDfbHfkFF+dybHAyUlV0A==
dependencies:
"@popperjs/core" "^2.9.2"
terser@^5.0.0:
version "5.14.2"
resolved "https://registry.yarnpkg.com/terser/-/terser-5.14.2.tgz#9ac9f22b06994d736174f4091aa368db896f1c10"
integrity sha512-oL0rGeM/WFQCUd0y2QrWxYnq7tfSuKBiqTjRPWrRgB46WD/kiwHwF8T23z78H6Q6kGCuuHcPB+KULHRdxvVGQA==
dependencies:
"@jridgewell/source-map" "^0.3.2"
acorn "^8.5.0"
commander "^2.20.0"
source-map-support "~0.5.20"
uplot@^1.6.7:
version "1.6.17"
resolved "https://registry.yarnpkg.com/uplot/-/uplot-1.6.17.tgz#1f8fc07a0e48008798beca463523621ad66dcc46"
integrity sha512-WHNHvDCXURn+Qwb3QUUzP6rOxx+3kUZUspREyhkqmXCxFIND99l5z9intTh+uPEt+/EEu7lCaMjSd1uTfuTXfg==
wonka@^4.0.14, wonka@^4.0.15:
version "4.0.15"
resolved "https://registry.yarnpkg.com/wonka/-/wonka-4.0.15.tgz#9aa42046efa424565ab8f8f451fcca955bf80b89"
integrity sha512-U0IUQHKXXn6PFo9nqsHphVCE5m3IntqZNB9Jjn7EB1lrR7YTDY3YWgFvEvwniTzXSvOH/XMzAZaIfJF/LvHYXg==
wrappy@1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=