From 27894ea0a9e34685f4b1e6f4ee7913c7589a4ba6 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 4 May 2023 07:00:30 +0200 Subject: [PATCH 1/2] Restructure tests --- .../api/api_test.go | 410 +----------------- internal/api/rest.go | 2 +- internal/importer/handleImport.go | 16 +- internal/importer/importer_test.go | 5 +- internal/importer/initDB.go | 2 +- internal/metricdata/cc-metric-store.go | 2 +- internal/repository/init_test.go | 64 --- 7 files changed, 28 insertions(+), 473 deletions(-) rename test/integration_test.go => internal/api/api_test.go (53%) delete mode 100644 internal/repository/init_test.go diff --git a/test/integration_test.go b/internal/api/api_test.go similarity index 53% rename from test/integration_test.go rename to internal/api/api_test.go index db1e522..9e11383 100644 --- a/test/integration_test.go +++ b/internal/api/api_test.go @@ -2,7 +2,7 @@ // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package test +package api_test import ( "bytes" @@ -48,16 +48,7 @@ func setup(t *testing.T) *api.RestApi { "duration": { "from": 0, "to": 86400 }, "startTime": { "from": "2022-01-01T00:00:00Z", "to": null } } - }, - { - "name": "taurus", - "metricDataRepository": {"kind": "test", "url": "bla:8081"}, - "filterRanges": { - "numNodes": { "from": 1, "to": 4000 }, - "duration": { "from": 0, "to": 604800 }, - "startTime": { "from": "2010-01-01T00:00:00Z", "to": null } - } - } + } ] }` const testclusterJson = `{ @@ -115,215 +106,6 @@ func setup(t *testing.T) *api.RestApi { } ] }` - const taurusclusterJson = `{ - "name": "taurus", - "subClusters": [ - { - "name": "haswell", - "processorType": "Intel Haswell", - "socketsPerNode": 2, - "coresPerSocket": 12, - "threadsPerCore": 1, - "flopRateScalar": { - "unit": { - "prefix": "G", - "base": "F/s" - }, - "value": 14 - }, - "flopRateSimd": { - "unit": { - "prefix": "G", - "base": "F/s" - }, - "value": 112 - }, - "memoryBandwidth": { - "unit": { - "prefix": "G", - "base": "B/s" - }, - "value": 24 - }, - "numberOfNodes": 70, - "nodes": "w11[27-45,49-63,69-72]", - "topology": { - "node": [ 0, 1 ], - "socket": [ - [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ], - [ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ] - ], - "memoryDomain": [ - [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ], - [ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ] - ], - "core": [ [ 0 ], [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ], [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ], [ 11 ], [ 12 ], [ 13 ], [ 14 ], [ 15 ], [ 16 ], [ 17 ], [ 18 ], [ 19 ], [ 20 ], [ 21 ], [ 22 ], [ 23 ] ] - } - } - ], - "metricConfig": [ - { - "name": "cpu_used", - "scope": "core", - "unit": {"base": ""}, - "aggregation": "avg", - "timestep": 30, - "peak": 1, - "normal": 0.5, - "caution": 2e-07, - "alert": 1e-07, - "subClusters": [ - { - "name": "haswell", - "peak": 1, - "normal": 0.5, - "caution": 2e-07, - "alert": 1e-07 - } - ] - }, - { - "name": "ipc", - "scope": "core", - "unit": { "base": "IPC"}, - "aggregation": "avg", - "timestep": 60, - "peak": 2, - "normal": 1, - "caution": 0.1, - "alert": 0.5, - "subClusters": [ - { - "name": "haswell", - "peak": 2, - "normal": 1, - "caution": 0.1, - "alert": 0.5 - } - ] - }, - { - "name": "flops_any", - "scope": "core", - "unit": { "base": "F/s"}, - "aggregation": "sum", - "timestep": 60, - "peak": 40000000000, - "normal": 20000000000, - "caution": 30000000000, - "alert": 35000000000, - "subClusters": [ - { - "name": "haswell", - "peak": 40000000000, - "normal": 20000000000, - "caution": 30000000000, - "alert": 35000000000 - } - ] - }, - { - "name": "mem_bw", - "scope": "socket", - "unit": { "base": "B/s"}, - "aggregation": "sum", - "timestep": 60, - "peak": 58800000000, - "normal": 28800000000, - "caution": 38800000000, - "alert": 48800000000, - "subClusters": [ - { - "name": "haswell", - "peak": 58800000000, - "normal": 28800000000, - "caution": 38800000000, - "alert": 48800000000 - } - ] - }, - { - "name": "file_bw", - "scope": "node", - "unit": { "base": "B/s"}, - "aggregation": "sum", - "timestep": 30, - "peak": 20000000000, - "normal": 5000000000, - "caution": 9000000000, - "alert": 19000000000, - "subClusters": [ - { - "name": "haswell", - "peak": 20000000000, - "normal": 5000000000, - "caution": 9000000000, - "alert": 19000000000 - } - ] - }, - { - "name": "net_bw", - "scope": "node", - "unit": { "base": "B/s"}, - "timestep": 30, - "aggregation": "sum", - "peak": 7000000000, - "normal": 5000000000, - "caution": 6000000000, - "alert": 6500000000, - "subClusters": [ - { - "name": "haswell", - "peak": 7000000000, - "normal": 5000000000, - "caution": 6000000000, - "alert": 6500000000 - } - ] - }, - { - "name": "mem_used", - "scope": "node", - "unit": {"base": "B"}, - "aggregation": "sum", - "timestep": 30, - "peak": 32000000000, - "normal": 2000000000, - "caution": 31000000000, - "alert": 30000000000, - "subClusters": [ - { - "name": "haswell", - "peak": 32000000000, - "normal": 2000000000, - "caution": 31000000000, - "alert": 30000000000 - } - ] - }, - { - "name": "cpu_power", - "scope": "socket", - "unit": {"base": "W"}, - "aggregation": "sum", - "timestep": 60, - "peak": 100, - "normal": 80, - "caution": 90, - "alert": 90, - "subClusters": [ - { - "name": "haswell", - "peak": 100, - "normal": 80, - "caution": 90, - "alert": 90 - } - ] - } - ] - }` log.Init("info", true) tmpdir := t.TempDir() @@ -344,13 +126,6 @@ func setup(t *testing.T) *api.RestApi { t.Fatal(err) } - if err := os.Mkdir(filepath.Join(jobarchive, "taurus"), 0777); err != nil { - t.Fatal(err) - } - - if err := os.WriteFile(filepath.Join(jobarchive, "taurus", "cluster.json"), []byte(taurusclusterJson), 0666); err != nil { - t.Fatal(err) - } dbfilepath := filepath.Join(tmpdir, "test.db") err := repository.MigrateDB("sqlite3", dbfilepath) if err != nil { @@ -392,7 +167,7 @@ func cleanup() { /* * This function starts a job, stops it, and then reads its data from the job-archive. * Do not run sub-tests in parallel! Tests should not be run in parallel at all, because -* at least `setup` modifies global state. Log-Output is redirected to /dev/null on purpose. +* at least `setup` modifies global state. */ func TestRestApi(t *testing.T) { restapi := setup(t) @@ -477,15 +252,15 @@ func TestRestApi(t *testing.T) { job.Project != "testproj" || job.Cluster != "testcluster" || job.SubCluster != "sc1" || - *job.Partition != "default" || - *job.Walltime != 3600 || - *job.ArrayJobId != 0 || + job.Partition != "default" || + job.Walltime != 3600 || + job.ArrayJobId != 0 || job.NumNodes != 1 || - *job.NumHWThreads != 8 || - *job.NumAcc != 0 || + job.NumHWThreads != 8 || + job.NumAcc != 0 || job.Exclusive != 1 || job.MonitoringStatus != 1 || - *job.SMT != 1 || + job.SMT != 1 || !reflect.DeepEqual(job.Resources, []*schema.Resource{{Hostname: "host123", HWThreads: []int{0, 1, 2, 3, 4, 5, 6, 7}}}) || job.StartTime.Unix() != 123456789 { t.Fatalf("unexpected job properties: %#v", job) @@ -573,8 +348,7 @@ func TestRestApi(t *testing.T) { } }) - t.Run("FailedJob", func(t *testing.T) { - const startJobBody string = `{ + const startJobBodyFailed string = `{ "jobId": 12345, "user": "testuser", "project": "testproj", @@ -593,120 +367,8 @@ func TestRestApi(t *testing.T) { "startTime": 12345678 }` - ok := t.Run("StartJob", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBody))) - recorder := httptest.NewRecorder() - - r.ServeHTTP(recorder, req) - response := recorder.Result() - if response.StatusCode != http.StatusCreated { - t.Fatal(response.Status, recorder.Body.String()) - } - }) - if !ok { - t.Fatal("subtest failed") - } - - const stopJobBody string = `{ - "jobId": 12345, - "cluster": "testcluster", - - "jobState": "failed", - "stopTime": 12355678 - }` - - ok = t.Run("StopJob", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody))) - recorder := httptest.NewRecorder() - - r.ServeHTTP(recorder, req) - response := recorder.Result() - if response.StatusCode != http.StatusOK { - t.Fatal(response.Status, recorder.Body.String()) - } - - restapi.JobRepository.WaitForArchiving() - jobid, cluster := int64(12345), "testcluster" - job, err := restapi.JobRepository.Find(&jobid, &cluster, nil) - if err != nil { - t.Fatal(err) - } - - if job.State != schema.JobStateFailed { - t.Fatal("expected job to be failed") - } - }) - if !ok { - t.Fatal("subtest failed") - } - - }) - - t.Run("ImportJob", func(t *testing.T) { - if err := repository.HandleImportFlag("meta.json:data.json"); err != nil { - t.Fatal(err) - } - - repo := repository.GetJobRepository() - jobId := int64(20639587) - cluster := "taurus" - startTime := int64(1635856524) - job, err := repo.Find(&jobId, &cluster, &startTime) - if err != nil { - t.Fatal(err) - } - - if job.NumNodes != 2 { - t.Errorf("NumNode: Received %d, expected 2", job.NumNodes) - } - - ar := archive.GetHandle() - data, err := ar.LoadJobData(job) - if err != nil { - t.Fatal(err) - } - - if len(data) != 8 { - t.Errorf("Job data length: Got %d, want 8", len(data)) - } - - r := map[string]string{"mem_used": "GB", "net_bw": "KB/s", - "cpu_power": "W", "cpu_used": "", - "file_bw": "KB/s", "flops_any": "F/s", - "mem_bw": "GB/s", "ipc": "IPC"} - - for name, scopes := range data { - for _, metric := range scopes { - if metric.Unit.Base != r[name] { - t.Errorf("Metric %s unit: Got %s, want %s", name, metric.Unit.Base, r[name]) - } - } - } - }) -} - -func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) { - const startJobBody string = `{ - "jobId": 12345, - "user": "testuser", - "project": "testproj", - "cluster": "testcluster", - "partition": "default", - "walltime": 3600, - "numNodes": 1, - "exclusive": 1, - "monitoringStatus": 1, - "smt": 1, - "resources": [ - { - "hostname": "host123" - } - ], - "startTime": 12345678 - }` - - ok := t.Run("StartJob", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBody))) + ok := t.Run("StartJobFailed", func(t *testing.T) { + req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBodyFailed))) recorder := httptest.NewRecorder() r.ServeHTTP(recorder, req) @@ -719,7 +381,7 @@ func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) { t.Fatal("subtest failed") } - const stopJobBody string = `{ + const stopJobBodyFailed string = `{ "jobId": 12345, "cluster": "testcluster", @@ -727,8 +389,8 @@ func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) { "stopTime": 12355678 }` - ok = t.Run("StopJob", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody))) + ok = t.Run("StopJobFailed", func(t *testing.T) { + req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBodyFailed))) recorder := httptest.NewRecorder() r.ServeHTTP(recorder, req) @@ -752,45 +414,3 @@ func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) { t.Fatal("subtest failed") } } - -func testImportFlag(t *testing.T) { - if err := repository.HandleImportFlag("meta.json:data.json"); err != nil { - t.Fatal(err) - } - - repo := repository.GetJobRepository() - jobId := int64(20639587) - cluster := "taurus" - startTime := int64(1635856524) - job, err := repo.Find(&jobId, &cluster, &startTime) - if err != nil { - t.Fatal(err) - } - - if job.NumNodes != 2 { - t.Errorf("NumNode: Received %d, expected 2", job.NumNodes) - } - - ar := archive.GetHandle() - data, err := ar.LoadJobData(job) - if err != nil { - t.Fatal(err) - } - - if len(data) != 8 { - t.Errorf("Job data length: Got %d, want 8", len(data)) - } - - r := map[string]string{"mem_used": "GB", "net_bw": "KB/s", - "cpu_power": "W", "cpu_used": "", - "file_bw": "KB/s", "flops_any": "F/s", - "mem_bw": "GB/s", "ipc": "IPC"} - - for name, scopes := range data { - for _, metric := range scopes { - if metric.Unit.Base != r[name] { - t.Errorf("Metric %s unit: Got %s, want %s", name, metric.Unit.Base, r[name]) - } - } - } -} diff --git a/internal/api/rest.go b/internal/api/rest.go index 484f7a1..ae9e8e9 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -253,7 +253,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { results := make([]*schema.JobMeta, 0, len(jobs)) for _, job := range jobs { if withMetadata { - if _, err := api.JobRepository.FetchMetadata(job); err != nil { + if _, err = api.JobRepository.FetchMetadata(job); err != nil { handleError(err, http.StatusInternalServerError, rw) return } diff --git a/internal/importer/handleImport.go b/internal/importer/handleImport.go index 48b224f..c144534 100644 --- a/internal/importer/handleImport.go +++ b/internal/importer/handleImport.go @@ -37,14 +37,14 @@ func HandleImportFlag(flag string) error { } if config.Keys.Validate { - if err := schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil { + if err = schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil { return fmt.Errorf("REPOSITORY/INIT > validate job meta: %v", err) } } dec := json.NewDecoder(bytes.NewReader(raw)) dec.DisallowUnknownFields() jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults} - if err := dec.Decode(&jobMeta); err != nil { + if err = dec.Decode(&jobMeta); err != nil { log.Warn("Error while decoding raw json metadata for import") return err } @@ -56,14 +56,14 @@ func HandleImportFlag(flag string) error { } if config.Keys.Validate { - if err := schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil { + if err = schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil { return fmt.Errorf("REPOSITORY/INIT > validate job data: %v", err) } } dec = json.NewDecoder(bytes.NewReader(raw)) dec.DisallowUnknownFields() jobData := schema.JobData{} - if err := dec.Decode(&jobData); err != nil { + if err = dec.Decode(&jobData); err != nil { log.Warn("Error while decoding raw json jobdata for import") return err } @@ -71,13 +71,13 @@ func HandleImportFlag(flag string) error { //checkJobData(&jobData) // SanityChecks(&jobMeta.BaseJob) jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful - if job, err := r.Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows { + if _, err = r.Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows { if err != nil { log.Warn("Error while finding job in jobRepository") return err } - return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID) + return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist") } job := schema.Job{ @@ -102,12 +102,12 @@ func HandleImportFlag(flag string) error { return err } - if err := SanityChecks(&job.BaseJob); err != nil { + if err = SanityChecks(&job.BaseJob); err != nil { log.Warn("BaseJob SanityChecks failed") return err } - if err := archive.GetHandle().ImportJob(&jobMeta, &jobData); err != nil { + if err = archive.GetHandle().ImportJob(&jobMeta, &jobData); err != nil { log.Error("Error while importing job") return err } diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go index e31bab8..83ba5eb 100644 --- a/internal/importer/importer_test.go +++ b/internal/importer/importer_test.go @@ -34,7 +34,7 @@ func copyFile(s string, d string) error { return nil } -func setupRepo(t *testing.T) *repository.JobRepository { +func setup(t *testing.T) *repository.JobRepository { const testconfig = `{ "addr": "0.0.0.0:8080", "validate": false, @@ -138,7 +138,7 @@ func readResult(t *testing.T, testname string) Result { } func TestHandleImportFlag(t *testing.T) { - r := setupRepo(t) + r := setup(t) tests, err := filepath.Glob(filepath.Join("testdata", "*.input")) if err != nil { @@ -167,7 +167,6 @@ func TestHandleImportFlag(t *testing.T) { if job.Duration != result.Duration { t.Errorf("wrong duration for job\ngot: %d \nwant: %d", job.Duration, result.Duration) } - }) } } diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index 0bfd13b..36072e4 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -136,7 +136,7 @@ func SanityChecks(job *schema.BaseJob) error { if len(job.Resources) == 0 || len(job.User) == 0 { return fmt.Errorf("'resources' and 'user' should not be empty") } - if *job.NumAcc < 0 || *job.NumHWThreads < 0 || job.NumNodes < 1 { + if job.NumAcc < 0 || job.NumHWThreads < 0 || job.NumNodes < 1 { return fmt.Errorf("'numNodes', 'numAcc' or 'numHWThreads' invalid") } if len(job.Resources) != int(job.NumNodes) { diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 0df3fda..6b3153f 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -293,7 +293,7 @@ func (ccms *CCMetricStore) buildQueries( scopesLoop: for _, requestedScope := range scopes { nativeScope := mc.Scope - if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == nil { + if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == 0 { continue } diff --git a/internal/repository/init_test.go b/internal/repository/init_test.go deleted file mode 100644 index 4f5930c..0000000 --- a/internal/repository/init_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package repository - -import ( - "fmt" - "testing" - - "github.com/ClusterCockpit/cc-backend/pkg/units" -) - -func TestNormalizeFactor(t *testing.T) { - // var us string - s := []float64{2890031237, 23998994567, 389734042344, 390349424345} - // r := []float64{3, 24, 390, 391} - - total := 0.0 - for _, number := range s { - total += number - } - avg := total / float64(len(s)) - - fmt.Printf("AVG: %e\n", avg) - f, e := getNormalizationFactor(avg) - - fmt.Printf("Factor %e Count %d\n", f, e) - - np := units.NewPrefix("") - - fmt.Printf("Prefix %e Short %s\n", float64(np), np.Prefix()) - - p := units.NewPrefixFromFactor(np, e) - - if p.Prefix() != "G" { - t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix()) - } -} - -func TestNormalizeKeep(t *testing.T) { - s := []float64{3.0, 24.0, 390.0, 391.0} - - total := 0.0 - for _, number := range s { - total += number - } - avg := total / float64(len(s)) - - fmt.Printf("AVG: %e\n", avg) - f, e := getNormalizationFactor(avg) - - fmt.Printf("Factor %e Count %d\n", f, e) - - np := units.NewPrefix("G") - - fmt.Printf("Prefix %e Short %s\n", float64(np), np.Prefix()) - - p := units.NewPrefixFromFactor(np, e) - - if p.Prefix() != "G" { - t.Errorf("Failed Prefix or unit: Want G, Got %s", p.Prefix()) - } -} From dce02ab1e0d6a28190c031fb8518ca1aa0ed18c6 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 4 May 2023 07:00:47 +0200 Subject: [PATCH 2/2] Revert optional pointer attributes --- pkg/schema/job.go | 12 ++++++------ tools/archive-migration/job.go | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/pkg/schema/job.go b/pkg/schema/job.go index aae14f6..b8b0570 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -22,17 +22,17 @@ type BaseJob struct { Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster - Partition *string `json:"partition,omitempty" db:"partition" example:"main"` // The Slurm partition to which the job was submitted - ArrayJobId *int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` // The unique identifier of an array job + Partition string `json:"partition,omitempty" db:"partition" example:"main"` // The Slurm partition to which the job was submitted + ArrayJobId int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` // The unique identifier of an array job NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0) - NumHWThreads *int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) - NumAcc *int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) + NumHWThreads int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) + NumAcc int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull - SMT *int32 `json:"smt,omitempty" db:"smt" example:"4"` // SMT threads used by job + SMT int32 `json:"smt,omitempty" db:"smt" example:"4"` // SMT threads used by job State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0) - Walltime *int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) + Walltime int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) Tags []*Tag `json:"tags,omitempty"` // List of tags RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes] Resources []*Resource `json:"resources"` // Resources used by job diff --git a/tools/archive-migration/job.go b/tools/archive-migration/job.go index 883ea81..cd54d6c 100644 --- a/tools/archive-migration/job.go +++ b/tools/archive-migration/job.go @@ -23,17 +23,17 @@ type BaseJob struct { Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster - Partition *string `json:"partition" db:"partition" example:"main"` // The Slurm partition to which the job was submitted - ArrayJobId *int64 `json:"arrayJobId" db:"array_job_id" example:"123000"` // The unique identifier of an array job + Partition string `json:"partition" db:"partition" example:"main"` // The Slurm partition to which the job was submitted + ArrayJobId int64 `json:"arrayJobId" db:"array_job_id" example:"123000"` // The unique identifier of an array job NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0) - NumHWThreads *int32 `json:"numHwthreads" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) - NumAcc *int32 `json:"numAcc" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) + NumHWThreads int32 `json:"numHwthreads" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) + NumAcc int32 `json:"numAcc" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user MonitoringStatus int32 `json:"monitoringStatus" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull - SMT *int32 `json:"smt" db:"smt" example:"4"` // SMT threads used by job + SMT int32 `json:"smt" db:"smt" example:"4"` // SMT threads used by job State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0) - Walltime *int64 `json:"walltime" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) + Walltime int64 `json:"walltime" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) Tags []*schema.Tag `json:"tags"` // List of tags RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes] Resources []*Resource `json:"resources"` // Resources used by job