mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-25 12:59:06 +01:00
Add working, but manually entered jobName url query
This commit is contained in:
parent
c9ee8b552d
commit
a642e9cc7c
@ -10,6 +10,7 @@ type Job {
|
|||||||
jobId: Int!
|
jobId: Int!
|
||||||
user: String!
|
user: String!
|
||||||
project: String!
|
project: String!
|
||||||
|
jobName: String
|
||||||
cluster: String!
|
cluster: String!
|
||||||
subCluster: String!
|
subCluster: String!
|
||||||
startTime: Time!
|
startTime: Time!
|
||||||
@ -201,6 +202,7 @@ input JobFilter {
|
|||||||
arrayJobId: Int
|
arrayJobId: Int
|
||||||
user: StringInput
|
user: StringInput
|
||||||
project: StringInput
|
project: StringInput
|
||||||
|
jobName: StringInput
|
||||||
cluster: StringInput
|
cluster: StringInput
|
||||||
partition: StringInput
|
partition: StringInput
|
||||||
duration: IntRange
|
duration: IntRange
|
||||||
|
@ -88,6 +88,7 @@ type ComplexityRoot struct {
|
|||||||
Exclusive func(childComplexity int) int
|
Exclusive func(childComplexity int) int
|
||||||
ID func(childComplexity int) int
|
ID func(childComplexity int) int
|
||||||
JobID func(childComplexity int) int
|
JobID func(childComplexity int) int
|
||||||
|
JobName func(childComplexity int) int
|
||||||
MetaData func(childComplexity int) int
|
MetaData func(childComplexity int) int
|
||||||
MonitoringStatus func(childComplexity int) int
|
MonitoringStatus func(childComplexity int) int
|
||||||
NumAcc func(childComplexity int) int
|
NumAcc func(childComplexity int) int
|
||||||
@ -262,6 +263,8 @@ type ClusterResolver interface {
|
|||||||
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
|
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
|
||||||
}
|
}
|
||||||
type JobResolver interface {
|
type JobResolver interface {
|
||||||
|
JobName(ctx context.Context, obj *schema.Job) (*string, error)
|
||||||
|
|
||||||
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
|
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
|
||||||
|
|
||||||
MetaData(ctx context.Context, obj *schema.Job) (interface{}, error)
|
MetaData(ctx context.Context, obj *schema.Job) (interface{}, error)
|
||||||
@ -451,6 +454,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||||||
|
|
||||||
return e.complexity.Job.JobID(childComplexity), true
|
return e.complexity.Job.JobID(childComplexity), true
|
||||||
|
|
||||||
|
case "Job.jobName":
|
||||||
|
if e.complexity.Job.JobName == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.complexity.Job.JobName(childComplexity), true
|
||||||
|
|
||||||
case "Job.metaData":
|
case "Job.metaData":
|
||||||
if e.complexity.Job.MetaData == nil {
|
if e.complexity.Job.MetaData == nil {
|
||||||
break
|
break
|
||||||
@ -1383,6 +1393,7 @@ type Job {
|
|||||||
jobId: Int!
|
jobId: Int!
|
||||||
user: String!
|
user: String!
|
||||||
project: String!
|
project: String!
|
||||||
|
jobName: String
|
||||||
cluster: String!
|
cluster: String!
|
||||||
subCluster: String!
|
subCluster: String!
|
||||||
startTime: Time!
|
startTime: Time!
|
||||||
@ -1574,6 +1585,7 @@ input JobFilter {
|
|||||||
arrayJobId: Int
|
arrayJobId: Int
|
||||||
user: StringInput
|
user: StringInput
|
||||||
project: StringInput
|
project: StringInput
|
||||||
|
jobName: StringInput
|
||||||
cluster: StringInput
|
cluster: StringInput
|
||||||
partition: StringInput
|
partition: StringInput
|
||||||
duration: IntRange
|
duration: IntRange
|
||||||
@ -3029,6 +3041,47 @@ func (ec *executionContext) fieldContext_Job_project(ctx context.Context, field
|
|||||||
return fc, nil
|
return fc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) _Job_jobName(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
||||||
|
fc, err := ec.fieldContext_Job_jobName(ctx, field)
|
||||||
|
if err != nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
ctx = graphql.WithFieldContext(ctx, fc)
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ec.Error(ctx, ec.Recover(ctx, r))
|
||||||
|
ret = graphql.Null
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||||
|
ctx = rctx // use context from middleware stack in children
|
||||||
|
return ec.resolvers.Job().JobName(rctx, obj)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ec.Error(ctx, err)
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
if resTmp == nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
res := resTmp.(*string)
|
||||||
|
fc.Result = res
|
||||||
|
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) fieldContext_Job_jobName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
|
fc = &graphql.FieldContext{
|
||||||
|
Object: "Job",
|
||||||
|
Field: field,
|
||||||
|
IsMethod: true,
|
||||||
|
IsResolver: true,
|
||||||
|
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||||
|
return nil, errors.New("field of type String does not have child fields")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return fc, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
||||||
fc, err := ec.fieldContext_Job_cluster(ctx, field)
|
fc, err := ec.fieldContext_Job_cluster(ctx, field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -4220,6 +4273,8 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context
|
|||||||
return ec.fieldContext_Job_user(ctx, field)
|
return ec.fieldContext_Job_user(ctx, field)
|
||||||
case "project":
|
case "project":
|
||||||
return ec.fieldContext_Job_project(ctx, field)
|
return ec.fieldContext_Job_project(ctx, field)
|
||||||
|
case "jobName":
|
||||||
|
return ec.fieldContext_Job_jobName(ctx, field)
|
||||||
case "cluster":
|
case "cluster":
|
||||||
return ec.fieldContext_Job_cluster(ctx, field)
|
return ec.fieldContext_Job_cluster(ctx, field)
|
||||||
case "subCluster":
|
case "subCluster":
|
||||||
@ -6065,6 +6120,8 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr
|
|||||||
return ec.fieldContext_Job_user(ctx, field)
|
return ec.fieldContext_Job_user(ctx, field)
|
||||||
case "project":
|
case "project":
|
||||||
return ec.fieldContext_Job_project(ctx, field)
|
return ec.fieldContext_Job_project(ctx, field)
|
||||||
|
case "jobName":
|
||||||
|
return ec.fieldContext_Job_jobName(ctx, field)
|
||||||
case "cluster":
|
case "cluster":
|
||||||
return ec.fieldContext_Job_cluster(ctx, field)
|
return ec.fieldContext_Job_cluster(ctx, field)
|
||||||
case "subCluster":
|
case "subCluster":
|
||||||
@ -10335,7 +10392,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
|||||||
asMap[k] = v
|
asMap[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "flopsAnyAvg", "memBwAvg", "loadAvg", "memUsedMax"}
|
fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "flopsAnyAvg", "memBwAvg", "loadAvg", "memUsedMax"}
|
||||||
for _, k := range fieldsInOrder {
|
for _, k := range fieldsInOrder {
|
||||||
v, ok := asMap[k]
|
v, ok := asMap[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -10382,6 +10439,14 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return it, err
|
return it, err
|
||||||
}
|
}
|
||||||
|
case "jobName":
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobName"))
|
||||||
|
it.JobName, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||||
|
if err != nil {
|
||||||
|
return it, err
|
||||||
|
}
|
||||||
case "cluster":
|
case "cluster":
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@ -10946,6 +11011,23 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj
|
|||||||
if out.Values[i] == graphql.Null {
|
if out.Values[i] == graphql.Null {
|
||||||
atomic.AddUint32(&invalids, 1)
|
atomic.AddUint32(&invalids, 1)
|
||||||
}
|
}
|
||||||
|
case "jobName":
|
||||||
|
field := field
|
||||||
|
|
||||||
|
innerFunc := func(ctx context.Context) (res graphql.Marshaler) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ec.Error(ctx, ec.Recover(ctx, r))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
res = ec._Job_jobName(ctx, field, obj)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Concurrently(i, func() graphql.Marshaler {
|
||||||
|
return innerFunc(ctx)
|
||||||
|
|
||||||
|
})
|
||||||
case "cluster":
|
case "cluster":
|
||||||
|
|
||||||
out.Values[i] = ec._Job_cluster(ctx, field, obj)
|
out.Values[i] = ec._Job_cluster(ctx, field, obj)
|
||||||
|
@ -42,6 +42,7 @@ type JobFilter struct {
|
|||||||
ArrayJobID *int `json:"arrayJobId"`
|
ArrayJobID *int `json:"arrayJobId"`
|
||||||
User *StringInput `json:"user"`
|
User *StringInput `json:"user"`
|
||||||
Project *StringInput `json:"project"`
|
Project *StringInput `json:"project"`
|
||||||
|
JobName *StringInput `json:"jobName"`
|
||||||
Cluster *StringInput `json:"cluster"`
|
Cluster *StringInput `json:"cluster"`
|
||||||
Partition *StringInput `json:"partition"`
|
Partition *StringInput `json:"partition"`
|
||||||
Duration *schema.IntRange `json:"duration"`
|
Duration *schema.IntRange `json:"duration"`
|
||||||
|
@ -24,6 +24,11 @@ func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) (
|
|||||||
return r.Repo.Partitions(obj.Name)
|
return r.Repo.Partitions(obj.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// JobName is the resolver for the jobName field.
|
||||||
|
func (r *jobResolver) JobName(ctx context.Context, obj *schema.Job) (*string, error) {
|
||||||
|
return r.Repo.FetchJobName(obj)
|
||||||
|
}
|
||||||
|
|
||||||
// Tags is the resolver for the tags field.
|
// Tags is the resolver for the tags field.
|
||||||
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
||||||
return r.Repo.GetTags(&obj.ID)
|
return r.Repo.GetTags(&obj.ID)
|
||||||
|
@ -52,7 +52,7 @@ func GetJobRepository() *JobRepository {
|
|||||||
var jobColumns []string = []string{
|
var jobColumns []string = []string{
|
||||||
"job.id", "job.job_id", "job.user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.partition", "job.array_job_id",
|
"job.id", "job.job_id", "job.user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.partition", "job.array_job_id",
|
||||||
"job.num_nodes", "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state",
|
"job.num_nodes", "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state",
|
||||||
"job.duration", "job.walltime", "job.resources", // "job.meta_data",
|
"job.duration", "job.walltime", "job.resources", "job.meta_data",
|
||||||
}
|
}
|
||||||
|
|
||||||
func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
|
func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
|
||||||
@ -60,7 +60,7 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
|
|||||||
if err := row.Scan(
|
if err := row.Scan(
|
||||||
&job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId,
|
&job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId,
|
||||||
&job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
|
&job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
|
||||||
&job.Duration, &job.Walltime, &job.RawResources /*&job.MetaData*/); err != nil {
|
&job.Duration, &job.Walltime, &job.RawResources, &job.RawMetaData); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,6 +68,10 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
job.StartTime = time.Unix(job.StartTimeUnix, 0)
|
job.StartTime = time.Unix(job.StartTimeUnix, 0)
|
||||||
if job.Duration == 0 && job.State == schema.JobStateRunning {
|
if job.Duration == 0 && job.State == schema.JobStateRunning {
|
||||||
job.Duration = int32(time.Since(job.StartTime).Seconds())
|
job.Duration = int32(time.Since(job.StartTime).Seconds())
|
||||||
@ -77,6 +81,37 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
|
|||||||
return job, nil
|
return job, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) FetchJobName(job *schema.Job) (*string, error) {
|
||||||
|
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
||||||
|
if cached := r.cache.Get(cachekey, nil); cached != nil {
|
||||||
|
job.MetaData = cached.(map[string]string)
|
||||||
|
if jobName := job.MetaData["jobName"]; jobName != "" {
|
||||||
|
return &jobName, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
||||||
|
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(job.RawMetaData) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
||||||
|
|
||||||
|
if jobName := job.MetaData["jobName"]; jobName != "" {
|
||||||
|
return &jobName, nil
|
||||||
|
} else {
|
||||||
|
return new(string), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
|
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
|
||||||
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
||||||
if cached := r.cache.Get(cachekey, nil); cached != nil {
|
if cached := r.cache.Get(cachekey, nil); cached != nil {
|
||||||
|
@ -118,6 +118,9 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
|||||||
if filter.Project != nil {
|
if filter.Project != nil {
|
||||||
query = buildStringCondition("job.project", filter.Project, query)
|
query = buildStringCondition("job.project", filter.Project, query)
|
||||||
}
|
}
|
||||||
|
if filter.JobName != nil {
|
||||||
|
query = buildStringCondition("job.meta_data", filter.JobName, query)
|
||||||
|
}
|
||||||
if filter.Cluster != nil {
|
if filter.Cluster != nil {
|
||||||
query = buildStringCondition("job.cluster", filter.Cluster, query)
|
query = buildStringCondition("job.cluster", filter.Cluster, query)
|
||||||
}
|
}
|
||||||
|
@ -180,6 +180,9 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
|
|||||||
filterPresets["project"] = query.Get("project")
|
filterPresets["project"] = query.Get("project")
|
||||||
filterPresets["projectMatch"] = "eq"
|
filterPresets["projectMatch"] = "eq"
|
||||||
}
|
}
|
||||||
|
if query.Get("jobName") != "" {
|
||||||
|
filterPresets["jobName"] = query.Get("jobName")
|
||||||
|
}
|
||||||
if query.Get("user") != "" {
|
if query.Get("user") != "" {
|
||||||
filterPresets["user"] = query.Get("user")
|
filterPresets["user"] = query.Get("user")
|
||||||
filterPresets["userMatch"] = "eq"
|
filterPresets["userMatch"] = "eq"
|
||||||
|
@ -45,6 +45,7 @@
|
|||||||
arrayJobId: filterPresets.arrayJobId || null,
|
arrayJobId: filterPresets.arrayJobId || null,
|
||||||
user: filterPresets.user || '',
|
user: filterPresets.user || '',
|
||||||
project: filterPresets.project || '',
|
project: filterPresets.project || '',
|
||||||
|
jobName: filterPresets.jobName || '',
|
||||||
|
|
||||||
numNodes: filterPresets.numNodes || { from: null, to: null },
|
numNodes: filterPresets.numNodes || { from: null, to: null },
|
||||||
numHWThreads: filterPresets.numHWThreads || { from: null, to: null },
|
numHWThreads: filterPresets.numHWThreads || { from: null, to: null },
|
||||||
@ -94,6 +95,8 @@
|
|||||||
items.push({ user: { [filters.userMatch]: filters.user } })
|
items.push({ user: { [filters.userMatch]: filters.user } })
|
||||||
if (filters.project)
|
if (filters.project)
|
||||||
items.push({ project: { [filters.projectMatch]: filters.project } })
|
items.push({ project: { [filters.projectMatch]: filters.project } })
|
||||||
|
if (filters.jobName)
|
||||||
|
items.push({ jobName: { contains: filters.jobName } })
|
||||||
for (let stat of filters.stats)
|
for (let stat of filters.stats)
|
||||||
items.push({ [stat.field]: { from: stat.from, to: stat.to } })
|
items.push({ [stat.field]: { from: stat.from, to: stat.to } })
|
||||||
|
|
||||||
@ -115,7 +118,7 @@
|
|||||||
opts.push(`state=${state}`)
|
opts.push(`state=${state}`)
|
||||||
if (filters.startTime.from && filters.startTime.to)
|
if (filters.startTime.from && filters.startTime.to)
|
||||||
opts.push(`startTime=${dateToUnixEpoch(filters.startTime.from)}-${dateToUnixEpoch(filters.startTime.to)}`)
|
opts.push(`startTime=${dateToUnixEpoch(filters.startTime.from)}-${dateToUnixEpoch(filters.startTime.to)}`)
|
||||||
for (let tag of filters.tags)
|
for (let tag of filters.tags)
|
||||||
opts.push(`tag=${tag}`)
|
opts.push(`tag=${tag}`)
|
||||||
if (filters.duration.from && filters.duration.to)
|
if (filters.duration.from && filters.duration.to)
|
||||||
opts.push(`duration=${filters.duration.from}-${filters.duration.to}`)
|
opts.push(`duration=${filters.duration.from}-${filters.duration.to}`)
|
||||||
@ -129,6 +132,8 @@
|
|||||||
opts.push(`userMatch=${filters.userMatch}`)
|
opts.push(`userMatch=${filters.userMatch}`)
|
||||||
if (filters.project)
|
if (filters.project)
|
||||||
opts.push(`project=${filters.project}`)
|
opts.push(`project=${filters.project}`)
|
||||||
|
if (filters.jobName)
|
||||||
|
opts.push(`jobName=${filters.jobName}`)
|
||||||
if (filters.projectMatch != 'contains')
|
if (filters.projectMatch != 'contains')
|
||||||
opts.push(`projectMatch=${filters.projectMatch}`)
|
opts.push(`projectMatch=${filters.projectMatch}`)
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
query($filter: [JobFilter!]!, $sorting: OrderByInput!, $paging: PageRequest! ){
|
query($filter: [JobFilter!]!, $sorting: OrderByInput!, $paging: PageRequest! ){
|
||||||
jobs(filter: $filter, order: $sorting, page: $paging) {
|
jobs(filter: $filter, order: $sorting, page: $paging) {
|
||||||
items {
|
items {
|
||||||
id, jobId, user, project, cluster, subCluster, startTime,
|
id, jobId, user, project, jobName, cluster, subCluster, startTime,
|
||||||
duration, numNodes, numHWThreads, numAcc, walltime,
|
duration, numNodes, numHWThreads, numAcc, walltime,
|
||||||
SMT, exclusive, partition, arrayJobId,
|
SMT, exclusive, partition, arrayJobId,
|
||||||
monitoringStatus, state,
|
monitoringStatus, state,
|
||||||
|
Loading…
Reference in New Issue
Block a user