cc-backend/test/integration_test.go

693 lines
17 KiB
Go
Raw Normal View History

2022-03-15 08:29:29 +01:00
package test
2022-01-24 10:08:47 +01:00
import (
"bytes"
"context"
"encoding/json"
2022-09-06 09:31:52 +02:00
"fmt"
2022-01-24 10:08:47 +01:00
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
2022-01-24 10:08:47 +01:00
"testing"
2022-06-22 18:06:02 +02:00
"github.com/ClusterCockpit/cc-backend/internal/api"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/internal/repository"
2022-09-06 09:31:52 +02:00
"github.com/ClusterCockpit/cc-backend/pkg/archive"
2022-06-22 18:06:02 +02:00
"github.com/ClusterCockpit/cc-backend/pkg/schema"
2022-01-24 10:08:47 +01:00
"github.com/gorilla/mux"
2022-03-15 08:29:29 +01:00
_ "github.com/mattn/go-sqlite3"
2022-01-24 10:08:47 +01:00
)
func setup(t *testing.T) *api.RestApi {
2022-09-06 09:31:52 +02:00
const testconfig = `{
2022-09-06 14:40:14 +02:00
"addr": "0.0.0.0:8080",
2022-09-13 15:32:01 +02:00
"validate": false,
2022-09-06 09:31:52 +02:00
"archive": {
"kind": "file",
"path": "./var/job-archive"
2022-09-06 14:40:14 +02:00
},
"clusters": [
{
"name": "testcluster",
2022-09-20 12:21:20 +02:00
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 },
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
}
},
{
"name": "taurus",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": {
"numNodes": { "from": 1, "to": 4000 },
"duration": { "from": 0, "to": 604800 },
"startTime": { "from": "2010-01-01T00:00:00Z", "to": null }
}
}
2022-09-06 14:40:14 +02:00
]
2022-09-06 09:31:52 +02:00
}`
2022-01-24 10:08:47 +01:00
const testclusterJson = `{
2022-09-20 12:21:20 +02:00
"name": "testcluster",
"subClusters": [
{
"name": "sc1",
"nodes": "host123,host124,host125",
2022-01-24 10:08:47 +01:00
"processorType": "Intel Core i7-4770",
"socketsPerNode": 1,
"coresPerSocket": 4,
"threadsPerCore": 2,
2023-03-22 07:05:41 +01:00
"flopRateScalar": {
"unit": {
"prefix": "G",
"base": "F/s"
},
"value": 14
},
"flopRateSimd": {
"unit": {
"prefix": "G",
"base": "F/s"
},
"value": 112
},
"memoryBandwidth": {
"unit": {
"prefix": "G",
"base": "B/s"
},
"value": 24
},
"numberOfNodes": 70,
2022-01-24 10:08:47 +01:00
"topology": {
"node": [0, 1, 2, 3, 4, 5, 6, 7],
"socket": [[0, 1, 2, 3, 4, 5, 6, 7]],
"memoryDomain": [[0, 1, 2, 3, 4, 5, 6, 7]],
"die": [[0, 1, 2, 3, 4, 5, 6, 7]],
2022-09-20 12:21:20 +02:00
"core": [[0], [1], [2], [3], [4], [5], [6], [7]]
2022-01-24 10:08:47 +01:00
}
}
],
"metricConfig": [
{
"name": "load_one",
2023-04-07 08:04:40 +02:00
"unit": { "base": ""},
2022-01-24 10:08:47 +01:00
"scope": "node",
"timestep": 60,
2023-03-22 07:05:41 +01:00
"aggregation": "avg",
2022-01-24 10:08:47 +01:00
"peak": 8,
"normal": 0,
"caution": 0,
"alert": 0
}
2022-09-20 12:21:20 +02:00
]
2022-01-24 10:08:47 +01:00
}`
const taurusclusterJson = `{
"name": "taurus",
2023-04-07 08:04:40 +02:00
"subClusters": [
{
"name": "haswell",
"processorType": "Intel Haswell",
"socketsPerNode": 2,
"coresPerSocket": 12,
2023-03-22 07:05:41 +01:00
"threadsPerCore": 1,
"flopRateScalar": {
"unit": {
"prefix": "G",
"base": "F/s"
},
"value": 14
},
"flopRateSimd": {
"unit": {
"prefix": "G",
"base": "F/s"
},
"value": 112
},
"memoryBandwidth": {
"unit": {
"prefix": "G",
"base": "B/s"
},
"value": 24
},
"numberOfNodes": 70,
"nodes": "w11[27-45,49-63,69-72]",
"topology": {
"node": [ 0, 1 ],
"socket": [
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ],
[ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]
],
"memoryDomain": [
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ],
[ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]
],
"core": [ [ 0 ], [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ], [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ], [ 11 ], [ 12 ], [ 13 ], [ 14 ], [ 15 ], [ 16 ], [ 17 ], [ 18 ], [ 19 ], [ 20 ], [ 21 ], [ 22 ], [ 23 ] ]
}
}
],
"metricConfig": [
{
"name": "cpu_used",
"scope": "core",
2022-11-09 19:47:56 +01:00
"unit": {"base": ""},
2023-03-22 07:05:41 +01:00
"aggregation": "avg",
"timestep": 30,
2023-04-07 08:04:40 +02:00
"peak": 1,
"normal": 0.5,
"caution": 2e-07,
"alert": 1e-07,
"subClusters": [
{
"name": "haswell",
"peak": 1,
"normal": 0.5,
"caution": 2e-07,
"alert": 1e-07
}
]
},
{
"name": "ipc",
"scope": "core",
2022-11-09 19:47:56 +01:00
"unit": { "base": "IPC"},
2023-03-22 07:05:41 +01:00
"aggregation": "avg",
"timestep": 60,
2023-04-07 08:04:40 +02:00
"peak": 2,
"normal": 1,
"caution": 0.1,
"alert": 0.5,
"subClusters": [
{
"name": "haswell",
"peak": 2,
"normal": 1,
"caution": 0.1,
"alert": 0.5
}
]
},
{
"name": "flops_any",
"scope": "core",
2022-11-09 19:47:56 +01:00
"unit": { "base": "F/s"},
2023-03-22 07:05:41 +01:00
"aggregation": "sum",
"timestep": 60,
2023-04-07 08:04:40 +02:00
"peak": 40000000000,
"normal": 20000000000,
"caution": 30000000000,
"alert": 35000000000,
"subClusters": [
{
"name": "haswell",
"peak": 40000000000,
"normal": 20000000000,
"caution": 30000000000,
"alert": 35000000000
}
]
},
{
"name": "mem_bw",
"scope": "socket",
2022-11-09 19:47:56 +01:00
"unit": { "base": "B/s"},
2023-03-22 07:05:41 +01:00
"aggregation": "sum",
"timestep": 60,
2023-04-07 08:04:40 +02:00
"peak": 58800000000,
"normal": 28800000000,
"caution": 38800000000,
"alert": 48800000000,
"subClusters": [
{
"name": "haswell",
"peak": 58800000000,
"normal": 28800000000,
"caution": 38800000000,
"alert": 48800000000
}
]
},
{
"name": "file_bw",
"scope": "node",
2022-11-09 19:47:56 +01:00
"unit": { "base": "B/s"},
2023-03-22 07:05:41 +01:00
"aggregation": "sum",
"timestep": 30,
2023-04-07 08:04:40 +02:00
"peak": 20000000000,
"normal": 5000000000,
"caution": 9000000000,
"alert": 19000000000,
"subClusters": [
{
"name": "haswell",
"peak": 20000000000,
"normal": 5000000000,
"caution": 9000000000,
"alert": 19000000000
}
]
},
{
"name": "net_bw",
"scope": "node",
2022-11-09 19:47:56 +01:00
"unit": { "base": "B/s"},
"timestep": 30,
2023-04-07 08:04:40 +02:00
"aggregation": "sum",
"peak": 7000000000,
"normal": 5000000000,
"caution": 6000000000,
"alert": 6500000000,
"subClusters": [
{
"name": "haswell",
"peak": 7000000000,
"normal": 5000000000,
"caution": 6000000000,
"alert": 6500000000
}
]
},
{
"name": "mem_used",
"scope": "node",
2022-11-09 19:47:56 +01:00
"unit": {"base": "B"},
2023-03-22 07:05:41 +01:00
"aggregation": "sum",
"timestep": 30,
2023-04-07 08:04:40 +02:00
"peak": 32000000000,
"normal": 2000000000,
"caution": 31000000000,
"alert": 30000000000,
"subClusters": [
{
"name": "haswell",
"peak": 32000000000,
"normal": 2000000000,
"caution": 31000000000,
"alert": 30000000000
}
]
},
{
"name": "cpu_power",
"scope": "socket",
2022-11-09 19:47:56 +01:00
"unit": {"base": "W"},
2023-03-22 07:05:41 +01:00
"aggregation": "sum",
"timestep": 60,
2023-04-07 08:04:40 +02:00
"peak": 100,
"normal": 80,
"caution": 90,
"alert": 90,
"subClusters": [
{
"name": "haswell",
"peak": 100,
"normal": 80,
"caution": 90,
"alert": 90
}
]
}
]
}`
2022-01-24 10:08:47 +01:00
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
if err := os.Mkdir(jobarchive, 0777); err != nil {
t.Fatal(err)
}
2023-03-27 14:41:00 +02:00
if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
t.Fatal(err)
}
2022-01-24 10:08:47 +01:00
if err := os.Mkdir(filepath.Join(jobarchive, "testcluster"), 0777); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(jobarchive, "testcluster", "cluster.json"), []byte(testclusterJson), 0666); err != nil {
t.Fatal(err)
}
if err := os.Mkdir(filepath.Join(jobarchive, "taurus"), 0777); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(jobarchive, "taurus", "cluster.json"), []byte(taurusclusterJson), 0666); err != nil {
t.Fatal(err)
}
2022-01-24 10:08:47 +01:00
dbfilepath := filepath.Join(tmpdir, "test.db")
f, err := os.Create(dbfilepath)
if err != nil {
t.Fatal(err)
}
f.Close()
2022-09-06 09:31:52 +02:00
cfgFilePath := filepath.Join(tmpdir, "config.json")
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
t.Fatal(err)
}
config.Init(cfgFilePath)
2022-09-20 12:21:20 +02:00
archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", jobarchive)
2022-09-06 09:31:52 +02:00
2022-06-22 18:06:02 +02:00
repository.Connect("sqlite3", dbfilepath)
db := repository.GetConnection()
2022-01-24 10:08:47 +01:00
2022-09-06 14:40:14 +02:00
if err := archive.Init(json.RawMessage(archiveCfg)); err != nil {
2022-01-24 10:08:47 +01:00
t.Fatal(err)
}
2022-09-06 09:31:52 +02:00
if err := metricdata.Init(config.Keys.DisableArchive); err != nil {
2022-01-24 10:08:47 +01:00
t.Fatal(err)
}
2022-09-06 09:31:52 +02:00
if _, err := db.DB.Exec(repository.JobsDBSchema); err != nil {
2022-01-24 10:08:47 +01:00
t.Fatal(err)
}
2022-09-06 09:31:52 +02:00
jobRepo := repository.GetJobRepository()
2022-06-22 18:06:02 +02:00
resolver := &graph.Resolver{DB: db.DB, Repo: jobRepo}
2022-02-19 10:28:29 +01:00
2022-01-24 10:08:47 +01:00
return &api.RestApi{
2022-02-17 09:04:57 +01:00
JobRepository: resolver.Repo,
2022-02-07 14:20:44 +01:00
Resolver: resolver,
2022-01-24 10:08:47 +01:00
}
}
func cleanup() {
// TODO: Clear all caches, reset all modules, etc...
}
/*
2022-09-06 09:31:52 +02:00
* This function starts a job, stops it, and then reads its data from the job-archive.
* Do not run sub-tests in parallel! Tests should not be run in parallel at all, because
* at least `setup` modifies global state. Log-Output is redirected to /dev/null on purpose.
2022-01-24 10:08:47 +01:00
*/
func TestRestApi(t *testing.T) {
restapi := setup(t)
t.Cleanup(cleanup)
testData := schema.JobData{
"load_one": map[schema.MetricScope]*schema.JobMetric{
schema.MetricScopeNode: {
2022-11-09 19:47:56 +01:00
Unit: schema.Unit{Base: "load"},
2022-01-24 10:08:47 +01:00
Timestep: 60,
Series: []schema.Series{
{
Hostname: "host123",
Statistics: schema.MetricStatistics{Min: 0.1, Avg: 0.2, Max: 0.3},
2022-01-24 10:08:47 +01:00
Data: []schema.Float{0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.3, 0.3, 0.3},
},
},
},
},
}
metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) {
return testData, nil
}
r := mux.NewRouter()
restapi.MountRoutes(r)
const startJobBody string = `{
2023-03-27 14:41:00 +02:00
"jobId": 123,
2022-09-06 09:31:52 +02:00
"user": "testuser",
"project": "testproj",
"cluster": "testcluster",
"partition": "default",
2022-03-30 09:39:13 +02:00
"walltime": 3600,
2022-09-06 09:31:52 +02:00
"arrayJobId": 0,
"numNodes": 1,
"numHwthreads": 8,
"numAcc": 0,
"exclusive": 1,
"monitoringStatus": 1,
"smt": 1,
"tags": [{ "type": "testTagType", "name": "testTagName" }],
"resources": [
{
"hostname": "host123",
"hwthreads": [0, 1, 2, 3, 4, 5, 6, 7]
}
],
"metaData": { "jobScript": "blablabla..." },
"startTime": 123456789
2022-01-24 10:08:47 +01:00
}`
var dbid int64
if ok := t.Run("StartJob", func(t *testing.T) {
req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBody)))
recorder := httptest.NewRecorder()
r.ServeHTTP(recorder, req)
response := recorder.Result()
if response.StatusCode != http.StatusCreated {
t.Fatal(response.Status, recorder.Body.String())
}
2022-02-07 14:20:44 +01:00
var res api.StartJobApiResponse
2022-01-24 10:08:47 +01:00
if err := json.Unmarshal(recorder.Body.Bytes(), &res); err != nil {
t.Fatal(err)
}
job, err := restapi.Resolver.Query().Job(context.Background(), strconv.Itoa(int(res.DBID)))
if err != nil {
t.Fatal(err)
}
2022-02-08 12:49:28 +01:00
job.Tags, err = restapi.Resolver.Job().Tags(context.Background(), job)
if err != nil {
t.Fatal(err)
}
2022-01-24 10:08:47 +01:00
if job.JobID != 123 ||
job.User != "testuser" ||
job.Project != "testproj" ||
job.Cluster != "testcluster" ||
job.SubCluster != "sc1" ||
2023-03-29 06:46:33 +02:00
*job.Partition != "default" ||
*job.Walltime != 3600 ||
2023-03-29 14:37:42 +02:00
*job.ArrayJobId != 0 ||
2022-01-24 10:08:47 +01:00
job.NumNodes != 1 ||
2023-03-29 06:46:33 +02:00
*job.NumHWThreads != 8 ||
2023-03-29 14:37:42 +02:00
*job.NumAcc != 0 ||
2022-01-24 10:08:47 +01:00
job.Exclusive != 1 ||
job.MonitoringStatus != 1 ||
2023-03-29 06:46:33 +02:00
*job.SMT != 1 ||
!reflect.DeepEqual(job.Resources, []*schema.Resource{{Hostname: "host123", HWThreads: []int{0, 1, 2, 3, 4, 5, 6, 7}}}) ||
2022-01-24 10:08:47 +01:00
job.StartTime.Unix() != 123456789 {
t.Fatalf("unexpected job properties: %#v", job)
}
2022-02-08 12:49:28 +01:00
if len(job.Tags) != 1 || job.Tags[0].Type != "testTagType" || job.Tags[0].Name != "testTagName" {
t.Fatalf("unexpected tags: %#v", job.Tags)
}
2022-01-24 10:08:47 +01:00
dbid = res.DBID
}); !ok {
return
}
const stopJobBody string = `{
2023-03-27 11:11:14 +02:00
"jobId": 123,
2022-01-24 10:08:47 +01:00
"startTime": 123456789,
"cluster": "testcluster",
"jobState": "completed",
"stopTime": 123457789
}`
var stoppedJob *schema.Job
if ok := t.Run("StopJob", func(t *testing.T) {
req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody)))
recorder := httptest.NewRecorder()
r.ServeHTTP(recorder, req)
response := recorder.Result()
if response.StatusCode != http.StatusOK {
t.Fatal(response.Status, recorder.Body.String())
}
restapi.OngoingArchivings.Wait()
job, err := restapi.Resolver.Query().Job(context.Background(), strconv.Itoa(int(dbid)))
if err != nil {
t.Fatal(err)
}
if job.State != schema.JobStateCompleted {
t.Fatal("expected job to be completed")
}
if job.Duration != (123457789 - 123456789) {
t.Fatalf("unexpected job properties: %#v", job)
}
job.MetaData, err = restapi.JobRepository.FetchMetadata(job)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(job.MetaData, map[string]string{"jobScript": "blablabla..."}) {
t.Fatalf("unexpected job.metaData: %#v", job.MetaData)
}
2022-01-24 10:08:47 +01:00
stoppedJob = job
}); !ok {
return
}
t.Run("CheckArchive", func(t *testing.T) {
data, err := metricdata.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background())
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(data, testData) {
t.Fatal("unexpected data fetched from archive")
}
})
t.Run("CheckDoubleStart", func(t *testing.T) {
// Starting a job with the same jobId and cluster should only be allowed if the startTime is far appart!
body := strings.Replace(startJobBody, `"startTime": 123456789`, `"startTime": 123456790`, -1)
req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(body)))
recorder := httptest.NewRecorder()
r.ServeHTTP(recorder, req)
response := recorder.Result()
if response.StatusCode != http.StatusUnprocessableEntity {
t.Fatal(response.Status, recorder.Body.String())
}
})
2022-07-12 10:22:22 +02:00
2023-03-29 14:37:42 +02:00
// t.Run("FailedJob", func(t *testing.T) {
// subtestLetJobFail(t, restapi, r)
// })
2023-03-29 14:37:42 +02:00
// t.Run("ImportJob", func(t *testing.T) {
// testImportFlag(t)
// })
2022-07-12 10:22:22 +02:00
}
func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) {
const startJobBody string = `{
2023-03-27 11:11:14 +02:00
"jobId": 12345,
2022-09-06 09:31:52 +02:00
"user": "testuser",
"project": "testproj",
"cluster": "testcluster",
"partition": "default",
2022-07-12 10:22:22 +02:00
"walltime": 3600,
2022-09-06 09:31:52 +02:00
"numNodes": 1,
"exclusive": 1,
"monitoringStatus": 1,
"smt": 1,
"resources": [
{
"hostname": "host123"
}
],
"startTime": 12345678
2022-07-12 10:22:22 +02:00
}`
ok := t.Run("StartJob", func(t *testing.T) {
req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBody)))
recorder := httptest.NewRecorder()
r.ServeHTTP(recorder, req)
response := recorder.Result()
if response.StatusCode != http.StatusCreated {
t.Fatal(response.Status, recorder.Body.String())
}
})
if !ok {
t.Fatal("subtest failed")
}
const stopJobBody string = `{
2023-03-27 11:11:14 +02:00
"jobId": 12345,
2022-07-12 10:22:22 +02:00
"cluster": "testcluster",
"jobState": "failed",
"stopTime": 12355678
}`
ok = t.Run("StopJob", func(t *testing.T) {
req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody)))
recorder := httptest.NewRecorder()
r.ServeHTTP(recorder, req)
response := recorder.Result()
if response.StatusCode != http.StatusOK {
t.Fatal(response.Status, recorder.Body.String())
}
restapi.OngoingArchivings.Wait()
jobid, cluster := int64(12345), "testcluster"
job, err := restapi.JobRepository.Find(&jobid, &cluster, nil)
if err != nil {
t.Fatal(err)
}
if job.State != schema.JobStateCompleted {
t.Fatal("expected job to be completed")
}
})
if !ok {
t.Fatal("subtest failed")
}
2022-01-24 10:08:47 +01:00
}
func testImportFlag(t *testing.T) {
if err := repository.HandleImportFlag("meta.json:data.json"); err != nil {
t.Fatal(err)
}
repo := repository.GetJobRepository()
jobId := int64(20639587)
cluster := "taurus"
startTime := int64(1635856524)
job, err := repo.Find(&jobId, &cluster, &startTime)
if err != nil {
t.Fatal(err)
}
if job.NumNodes != 2 {
t.Errorf("NumNode: Received %d, expected 2", job.NumNodes)
}
ar := archive.GetHandle()
data, err := ar.LoadJobData(job)
if err != nil {
t.Fatal(err)
}
if len(data) != 8 {
t.Errorf("Job data length: Got %d, want 8", len(data))
}
2022-10-04 10:12:35 +02:00
r := map[string]string{"mem_used": "GB", "net_bw": "KB/s",
2022-11-09 19:47:56 +01:00
"cpu_power": "W", "cpu_used": "",
2023-03-22 07:05:41 +01:00
"file_bw": "KB/s", "flops_any": "F/s",
2022-10-04 10:12:35 +02:00
"mem_bw": "GB/s", "ipc": "IPC"}
for name, scopes := range data {
for _, metric := range scopes {
2022-11-09 19:47:56 +01:00
if metric.Unit.Base != r[name] {
2023-03-23 10:29:11 +01:00
t.Errorf("Metric %s unit: Got %s, want %s", name, metric.Unit.Base, r[name])
2022-10-04 10:12:35 +02:00
}
}
}
}