diff --git a/internal/api/rest.go b/internal/api/rest.go index 3525fba..c4af330 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -99,12 +99,12 @@ type StartJobApiResponse struct { // @Description Request to stop running job using stoptime and final state. // @Description They are only required if no database id was provided with endpoint. type StopJobApiRequest struct { - // Stop Time of job as epoch + // Stop Time of job as epoch StopTime int64 `json:"stopTime" validate:"required" example:"1649763839"` - State schema.JobState `json:"jobState" validate:"required" example:"completed" enums:"completed,failed,cancelled,stopped,timeout"` // Final job state - JobId *int64 `json:"jobId" example:"123000"` // Cluster Job ID of job - Cluster *string `json:"cluster" example:"fritz"` // Cluster of job - StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch + State schema.JobState `json:"jobState" validate:"required" example:"completed" enums:"completed,failed,cancelled,stopped,timeout"` // Final job state + JobId *int64 `json:"jobId" example:"123000"` // Cluster Job ID of job + Cluster *string `json:"cluster" example:"fritz"` // Cluster of job + StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch } // ErrorResponse model @@ -723,13 +723,13 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { delrole := r.FormValue("remove-role") // TODO: Handle anything but roles... - if (newrole != "") { + if newrole != "" { if err := api.Authentication.AddRole(r.Context(), mux.Vars(r)["id"], newrole); err != nil { http.Error(rw, err.Error(), http.StatusUnprocessableEntity) return } rw.Write([]byte("Add Role Success")) - } else if (delrole != "") { + } else if delrole != "" { if err := api.Authentication.RemoveRole(r.Context(), mux.Vars(r)["id"], delrole); err != nil { http.Error(rw, err.Error(), http.StatusUnprocessableEntity) return diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 62617b0..4e3bfcd 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -278,7 +278,7 @@ func (ccms *CCMetricStore) buildQueries( scopesLoop: for _, requestedScope := range scopes { nativeScope := mc.Scope - if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == 0 { + if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == nil { continue } diff --git a/internal/repository/init.go b/internal/repository/init.go index e813b44..5e9cb6e 100644 --- a/internal/repository/init.go +++ b/internal/repository/init.go @@ -351,7 +351,7 @@ func SanityChecks(job *schema.BaseJob) error { if len(job.Resources) == 0 || len(job.User) == 0 { return fmt.Errorf("'resources' and 'user' should not be empty") } - if job.NumAcc < 0 || job.NumHWThreads < 0 || job.NumNodes < 1 { + if *job.NumAcc < 0 || *job.NumHWThreads < 0 || job.NumNodes < 1 { return fmt.Errorf("'numNodes', 'numAcc' or 'numHWThreads' invalid") } if len(job.Resources) != int(job.NumNodes) { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index df2831c..300abe1 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -21,18 +21,18 @@ type BaseJob struct { Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster - Partition string `json:"partition" db:"partition" example:"main"` // The Slurm partition to which the job was submitted - ArrayJobId int64 `json:"arrayJobId" db:"array_job_id" example:"123000"` // The unique identifier of an array job + Partition *string `json:"partition,omitempty" db:"partition" example:"main"` // The Slurm partition to which the job was submitted + ArrayJobId *int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` // The unique identifier of an array job NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0) - NumHWThreads int32 `json:"numHwthreads" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) - NumAcc int32 `json:"numAcc" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) + NumHWThreads *int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) + NumAcc *int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user - MonitoringStatus int32 `json:"monitoringStatus" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull - SMT int32 `json:"smt" db:"smt" example:"4"` // SMT threads used by job + MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull + SMT *int32 `json:"smt,omitempty" db:"smt" example:"4"` // SMT threads used by job State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0) - Walltime int64 `json:"walltime" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) - Tags []*Tag `json:"tags"` // List of tags + Walltime *int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) + Tags []*Tag `json:"tags,omitempty"` // List of tags RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes] Resources []*Resource `json:"resources"` // Resources used by job RawMetaData []byte `json:"-" db:"meta_data"` // Additional information about the job [As Bytes] diff --git a/pkg/schema/schemas/job-meta.schema.json b/pkg/schema/schemas/job-meta.schema.json index b47e5d1..cec193f 100644 --- a/pkg/schema/schemas/job-meta.schema.json +++ b/pkg/schema/schemas/job-meta.schema.json @@ -338,6 +338,7 @@ "user", "project", "cluster", + "subcluster", "numNodes", "exclusive", "startTime", diff --git a/test/integration_test.go b/test/integration_test.go index 607907b..0189f7f 100644 --- a/test/integration_test.go +++ b/test/integration_test.go @@ -448,15 +448,15 @@ func TestRestApi(t *testing.T) { job.Project != "testproj" || job.Cluster != "testcluster" || job.SubCluster != "sc1" || - job.Partition != "default" || - job.Walltime != 3600 || - job.ArrayJobId != 0 || + *job.Partition != "default" || + *job.Walltime != 3600 || + job.ArrayJobId != nil || job.NumNodes != 1 || - job.NumHWThreads != 8 || - job.NumAcc != 0 || + *job.NumHWThreads != 8 || + job.NumAcc != nil || job.Exclusive != 1 || job.MonitoringStatus != 1 || - job.SMT != 1 || + *job.SMT != 1 || !reflect.DeepEqual(job.Resources, []*schema.Resource{{Hostname: "host123", HWThreads: []int{0, 1, 2, 3, 4, 5, 6, 7}}}) || job.StartTime.Unix() != 123456789 { t.Fatalf("unexpected job properties: %#v", job) diff --git a/tools/archive-migration/job.go b/tools/archive-migration/job.go index b0e9558..c98f44c 100644 --- a/tools/archive-migration/job.go +++ b/tools/archive-migration/job.go @@ -21,18 +21,18 @@ type BaseJob struct { Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster - Partition string `json:"partition" db:"partition" example:"main"` // The Slurm partition to which the job was submitted - ArrayJobId int64 `json:"arrayJobId" db:"array_job_id" example:"123000"` // The unique identifier of an array job + Partition *string `json:"partition" db:"partition" example:"main"` // The Slurm partition to which the job was submitted + ArrayJobId *int64 `json:"arrayJobId" db:"array_job_id" example:"123000"` // The unique identifier of an array job NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0) - NumHWThreads int32 `json:"numHwthreads" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) - NumAcc int32 `json:"numAcc" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) + NumHWThreads *int32 `json:"numHwthreads" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) + NumAcc *int32 `json:"numAcc" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user MonitoringStatus int32 `json:"monitoringStatus" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull - SMT int32 `json:"smt" db:"smt" example:"4"` // SMT threads used by job + SMT *int32 `json:"smt" db:"smt" example:"4"` // SMT threads used by job State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0) - Walltime int64 `json:"walltime" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) - Tags []*Tag `json:"tags"` // List of tags + Walltime *int64 `json:"walltime" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) + Tags []*schema.Tag `json:"tags"` // List of tags RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes] Resources []*Resource `json:"resources"` // Resources used by job RawMetaData []byte `json:"-" db:"meta_data"` // Additional information about the job [As Bytes] diff --git a/tools/archive-migration/main.go b/tools/archive-migration/main.go index 80233bd..e644407 100644 --- a/tools/archive-migration/main.go +++ b/tools/archive-migration/main.go @@ -36,34 +36,30 @@ func loadJobData(filename string) (*JobData, error) { func deepCopyJobMeta(j *JobMeta) schema.JobMeta { var jn schema.JobMeta - jn.StartTime = j.StartTime + //required properties + jn.JobID = j.JobID jn.User = j.User jn.Project = j.Project jn.Cluster = j.Cluster jn.SubCluster = j.SubCluster - jn.Partition = j.Partition - jn.ArrayJobId = j.ArrayJobId jn.NumNodes = j.NumNodes - jn.NumHWThreads = j.NumHWThreads - jn.NumAcc = j.NumAcc jn.Exclusive = j.Exclusive - jn.MonitoringStatus = j.MonitoringStatus - jn.SMT = j.SMT - jn.Duration = j.Duration - jn.Walltime = j.Walltime + jn.StartTime = j.StartTime jn.State = schema.JobState(j.State) - jn.Exclusive = j.Exclusive - jn.Exclusive = j.Exclusive - jn.Exclusive = j.Exclusive + jn.Duration = j.Duration for _, ro := range j.Resources { var rn schema.Resource rn.Hostname = ro.Hostname rn.Configuration = ro.Configuration - hwt := make([]int, len(ro.HWThreads)) - copy(hwt, ro.HWThreads) - acc := make([]string, len(ro.Accelerators)) - copy(acc, ro.Accelerators) + if ro.HWThreads != nil { + hwt := make([]int, len(ro.HWThreads)) + copy(hwt, ro.HWThreads) + } + if ro.Accelerators != nil { + acc := make([]string, len(ro.Accelerators)) + copy(acc, ro.Accelerators) + } jn.Resources = append(jn.Resources, &rn) } @@ -71,6 +67,19 @@ func deepCopyJobMeta(j *JobMeta) schema.JobMeta { jn.MetaData[k] = v } + //optional properties + jn.Partition = j.Partition + jn.ArrayJobId = j.ArrayJobId + jn.NumHWThreads = j.NumHWThreads + jn.NumAcc = j.NumAcc + jn.MonitoringStatus = j.MonitoringStatus + jn.SMT = j.SMT + jn.Walltime = j.Walltime + + for _, t := range j.Tags { + jn.Tags = append(jn.Tags, t) + } + return jn }