Merge pull request #252 from ClusterCockpit/20_infinite_scroll

20 infinite scroll
This commit is contained in:
Jan Eitzinger 2024-03-28 17:43:02 +01:00 committed by GitHub
commit baa51db26c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 149 additions and 111 deletions

View File

@ -278,6 +278,7 @@ type JobResultList {
offset: Int offset: Int
limit: Int limit: Int
count: Int count: Int
hasNextPage: Boolean
} }
type JobLinkResultList { type JobLinkResultList {

View File

@ -32,6 +32,7 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{
"job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"job_view_showFootprint": true, "job_view_showFootprint": true,
"job_list_usePaging": true,
"plot_general_colorBackground": true, "plot_general_colorBackground": true,
"plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"}, "plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"},
"plot_general_lineWidth": 3, "plot_general_lineWidth": 3,

View File

@ -140,6 +140,7 @@ type ComplexityRoot struct {
JobResultList struct { JobResultList struct {
Count func(childComplexity int) int Count func(childComplexity int) int
HasNextPage func(childComplexity int) int
Items func(childComplexity int) int Items func(childComplexity int) int
Limit func(childComplexity int) int Limit func(childComplexity int) int
Offset func(childComplexity int) int Offset func(childComplexity int) int
@ -755,6 +756,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.JobResultList.Count(childComplexity), true return e.complexity.JobResultList.Count(childComplexity), true
case "JobResultList.hasNextPage":
if e.complexity.JobResultList.HasNextPage == nil {
break
}
return e.complexity.JobResultList.HasNextPage(childComplexity), true
case "JobResultList.items": case "JobResultList.items":
if e.complexity.JobResultList.Items == nil { if e.complexity.JobResultList.Items == nil {
break break
@ -1987,6 +1995,7 @@ type JobResultList {
offset: Int offset: Int
limit: Int limit: Int
count: Int count: Int
hasNextPage: Boolean
} }
type JobLinkResultList { type JobLinkResultList {
@ -5221,6 +5230,47 @@ func (ec *executionContext) fieldContext_JobResultList_count(ctx context.Context
return fc, nil return fc, nil
} }
func (ec *executionContext) _JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField, obj *model.JobResultList) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_JobResultList_hasNextPage(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.HasNextPage, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*bool)
fc.Result = res
return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "JobResultList",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Boolean does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_JobsStatistics_id(ctx, field) fc, err := ec.fieldContext_JobsStatistics_id(ctx, field)
if err != nil { if err != nil {
@ -8017,6 +8067,8 @@ func (ec *executionContext) fieldContext_Query_jobs(ctx context.Context, field g
return ec.fieldContext_JobResultList_limit(ctx, field) return ec.fieldContext_JobResultList_limit(ctx, field)
case "count": case "count":
return ec.fieldContext_JobResultList_count(ctx, field) return ec.fieldContext_JobResultList_count(ctx, field)
case "hasNextPage":
return ec.fieldContext_JobResultList_hasNextPage(ctx, field)
} }
return nil, fmt.Errorf("no field named %q was found under type JobResultList", field.Name) return nil, fmt.Errorf("no field named %q was found under type JobResultList", field.Name)
}, },
@ -12226,8 +12278,6 @@ func (ec *executionContext) unmarshalInputFloatRange(ctx context.Context, obj in
} }
switch k { switch k {
case "from": case "from":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from"))
data, err := ec.unmarshalNFloat2float64(ctx, v) data, err := ec.unmarshalNFloat2float64(ctx, v)
if err != nil { if err != nil {
@ -12235,8 +12285,6 @@ func (ec *executionContext) unmarshalInputFloatRange(ctx context.Context, obj in
} }
it.From = data it.From = data
case "to": case "to":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to"))
data, err := ec.unmarshalNFloat2float64(ctx, v) data, err := ec.unmarshalNFloat2float64(ctx, v)
if err != nil { if err != nil {
@ -12264,8 +12312,6 @@ func (ec *executionContext) unmarshalInputIntRange(ctx context.Context, obj inte
} }
switch k { switch k {
case "from": case "from":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from"))
data, err := ec.unmarshalNInt2int(ctx, v) data, err := ec.unmarshalNInt2int(ctx, v)
if err != nil { if err != nil {
@ -12273,8 +12319,6 @@ func (ec *executionContext) unmarshalInputIntRange(ctx context.Context, obj inte
} }
it.From = data it.From = data
case "to": case "to":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to"))
data, err := ec.unmarshalNInt2int(ctx, v) data, err := ec.unmarshalNInt2int(ctx, v)
if err != nil { if err != nil {
@ -12302,8 +12346,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
switch k { switch k {
case "tags": case "tags":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("tags")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("tags"))
data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v)
if err != nil { if err != nil {
@ -12311,8 +12353,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.Tags = data it.Tags = data
case "jobId": case "jobId":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobId")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobId"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
if err != nil { if err != nil {
@ -12320,8 +12360,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.JobID = data it.JobID = data
case "arrayJobId": case "arrayJobId":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("arrayJobId")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("arrayJobId"))
data, err := ec.unmarshalOInt2ᚖint(ctx, v) data, err := ec.unmarshalOInt2ᚖint(ctx, v)
if err != nil { if err != nil {
@ -12329,8 +12367,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.ArrayJobID = data it.ArrayJobID = data
case "user": case "user":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("user")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("user"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
if err != nil { if err != nil {
@ -12338,8 +12374,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.User = data it.User = data
case "project": case "project":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("project")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("project"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
if err != nil { if err != nil {
@ -12347,8 +12381,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.Project = data it.Project = data
case "jobName": case "jobName":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobName")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobName"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
if err != nil { if err != nil {
@ -12356,8 +12388,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.JobName = data it.JobName = data
case "cluster": case "cluster":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
if err != nil { if err != nil {
@ -12365,8 +12395,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.Cluster = data it.Cluster = data
case "partition": case "partition":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
if err != nil { if err != nil {
@ -12374,8 +12402,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.Partition = data it.Partition = data
case "duration": case "duration":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("duration")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("duration"))
data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
if err != nil { if err != nil {
@ -12383,8 +12409,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.Duration = data it.Duration = data
case "minRunningFor": case "minRunningFor":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("minRunningFor")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("minRunningFor"))
data, err := ec.unmarshalOInt2ᚖint(ctx, v) data, err := ec.unmarshalOInt2ᚖint(ctx, v)
if err != nil { if err != nil {
@ -12392,8 +12416,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.MinRunningFor = data it.MinRunningFor = data
case "numNodes": case "numNodes":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numNodes")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numNodes"))
data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
if err != nil { if err != nil {
@ -12401,8 +12423,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.NumNodes = data it.NumNodes = data
case "numAccelerators": case "numAccelerators":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numAccelerators")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numAccelerators"))
data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
if err != nil { if err != nil {
@ -12410,8 +12430,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.NumAccelerators = data it.NumAccelerators = data
case "numHWThreads": case "numHWThreads":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numHWThreads")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numHWThreads"))
data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
if err != nil { if err != nil {
@ -12419,8 +12437,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.NumHWThreads = data it.NumHWThreads = data
case "startTime": case "startTime":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startTime")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startTime"))
data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx, v) data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx, v)
if err != nil { if err != nil {
@ -12428,8 +12444,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.StartTime = data it.StartTime = data
case "state": case "state":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("state")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("state"))
data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx, v) data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx, v)
if err != nil { if err != nil {
@ -12437,8 +12451,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.State = data it.State = data
case "flopsAnyAvg": case "flopsAnyAvg":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("flopsAnyAvg")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("flopsAnyAvg"))
data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v)
if err != nil { if err != nil {
@ -12446,8 +12458,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.FlopsAnyAvg = data it.FlopsAnyAvg = data
case "memBwAvg": case "memBwAvg":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memBwAvg")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memBwAvg"))
data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v)
if err != nil { if err != nil {
@ -12455,8 +12465,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.MemBwAvg = data it.MemBwAvg = data
case "loadAvg": case "loadAvg":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("loadAvg")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("loadAvg"))
data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v)
if err != nil { if err != nil {
@ -12464,8 +12472,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.LoadAvg = data it.LoadAvg = data
case "memUsedMax": case "memUsedMax":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memUsedMax")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memUsedMax"))
data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v)
if err != nil { if err != nil {
@ -12473,8 +12479,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.MemUsedMax = data it.MemUsedMax = data
case "exclusive": case "exclusive":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("exclusive")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("exclusive"))
data, err := ec.unmarshalOInt2ᚖint(ctx, v) data, err := ec.unmarshalOInt2ᚖint(ctx, v)
if err != nil { if err != nil {
@ -12482,8 +12486,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
} }
it.Exclusive = data it.Exclusive = data
case "node": case "node":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("node")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("node"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
if err != nil { if err != nil {
@ -12515,8 +12517,6 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj
} }
switch k { switch k {
case "field": case "field":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("field")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("field"))
data, err := ec.unmarshalNString2string(ctx, v) data, err := ec.unmarshalNString2string(ctx, v)
if err != nil { if err != nil {
@ -12524,8 +12524,6 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj
} }
it.Field = data it.Field = data
case "order": case "order":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("order")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("order"))
data, err := ec.unmarshalNSortDirectionEnum2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortDirectionEnum(ctx, v) data, err := ec.unmarshalNSortDirectionEnum2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortDirectionEnum(ctx, v)
if err != nil { if err != nil {
@ -12553,8 +12551,6 @@ func (ec *executionContext) unmarshalInputPageRequest(ctx context.Context, obj i
} }
switch k { switch k {
case "itemsPerPage": case "itemsPerPage":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("itemsPerPage")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("itemsPerPage"))
data, err := ec.unmarshalNInt2int(ctx, v) data, err := ec.unmarshalNInt2int(ctx, v)
if err != nil { if err != nil {
@ -12562,8 +12558,6 @@ func (ec *executionContext) unmarshalInputPageRequest(ctx context.Context, obj i
} }
it.ItemsPerPage = data it.ItemsPerPage = data
case "page": case "page":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page"))
data, err := ec.unmarshalNInt2int(ctx, v) data, err := ec.unmarshalNInt2int(ctx, v)
if err != nil { if err != nil {
@ -12591,8 +12585,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
} }
switch k { switch k {
case "eq": case "eq":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq"))
data, err := ec.unmarshalOString2ᚖstring(ctx, v) data, err := ec.unmarshalOString2ᚖstring(ctx, v)
if err != nil { if err != nil {
@ -12600,8 +12592,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
} }
it.Eq = data it.Eq = data
case "neq": case "neq":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("neq")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("neq"))
data, err := ec.unmarshalOString2ᚖstring(ctx, v) data, err := ec.unmarshalOString2ᚖstring(ctx, v)
if err != nil { if err != nil {
@ -12609,8 +12599,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
} }
it.Neq = data it.Neq = data
case "contains": case "contains":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("contains")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("contains"))
data, err := ec.unmarshalOString2ᚖstring(ctx, v) data, err := ec.unmarshalOString2ᚖstring(ctx, v)
if err != nil { if err != nil {
@ -12618,8 +12606,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
} }
it.Contains = data it.Contains = data
case "startsWith": case "startsWith":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startsWith")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startsWith"))
data, err := ec.unmarshalOString2ᚖstring(ctx, v) data, err := ec.unmarshalOString2ᚖstring(ctx, v)
if err != nil { if err != nil {
@ -12627,8 +12613,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
} }
it.StartsWith = data it.StartsWith = data
case "endsWith": case "endsWith":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("endsWith")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("endsWith"))
data, err := ec.unmarshalOString2ᚖstring(ctx, v) data, err := ec.unmarshalOString2ᚖstring(ctx, v)
if err != nil { if err != nil {
@ -12636,8 +12620,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
} }
it.EndsWith = data it.EndsWith = data
case "in": case "in":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("in")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("in"))
data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v)
if err != nil { if err != nil {
@ -12665,8 +12647,6 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int
} }
switch k { switch k {
case "from": case "from":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from"))
data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v)
if err != nil { if err != nil {
@ -12674,8 +12654,6 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int
} }
it.From = data it.From = data
case "to": case "to":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to"))
data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v)
if err != nil { if err != nil {
@ -13481,6 +13459,8 @@ func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.Selectio
out.Values[i] = ec._JobResultList_limit(ctx, field, obj) out.Values[i] = ec._JobResultList_limit(ctx, field, obj)
case "count": case "count":
out.Values[i] = ec._JobResultList_count(ctx, field, obj) out.Values[i] = ec._JobResultList_count(ctx, field, obj)
case "hasNextPage":
out.Values[i] = ec._JobResultList_hasNextPage(ctx, field, obj)
default: default:
panic("unknown field " + strconv.Quote(field.Name)) panic("unknown field " + strconv.Quote(field.Name))
} }

View File

@ -82,6 +82,7 @@ type JobResultList struct {
Offset *int `json:"offset,omitempty"` Offset *int `json:"offset,omitempty"`
Limit *int `json:"limit,omitempty"` Limit *int `json:"limit,omitempty"`
Count *int `json:"count,omitempty"` Count *int `json:"count,omitempty"`
HasNextPage *bool `json:"hasNextPage,omitempty"`
} }
type JobsStatistics struct { type JobsStatistics struct {
@ -122,6 +123,9 @@ type MetricHistoPoints struct {
Data []*MetricHistoPoint `json:"data,omitempty"` Data []*MetricHistoPoint `json:"data,omitempty"`
} }
type Mutation struct {
}
type NodeMetrics struct { type NodeMetrics struct {
Host string `json:"host"` Host string `json:"host"`
SubCluster string `json:"subCluster"` SubCluster string `json:"subCluster"`
@ -138,6 +142,9 @@ type PageRequest struct {
Page int `json:"page"` Page int `json:"page"`
} }
type Query struct {
}
type StringInput struct { type StringInput struct {
Eq *string `json:"eq,omitempty"` Eq *string `json:"eq,omitempty"`
Neq *string `json:"neq,omitempty"` Neq *string `json:"neq,omitempty"`

View File

@ -2,7 +2,7 @@ package graph
// This file will be automatically regenerated based on the schema, any resolver implementations // This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end. // will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.40 // Code generated by github.com/99designs/gqlgen version v0.17.45
import ( import (
"context" "context"
@ -11,6 +11,7 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/generated" "github.com/ClusterCockpit/cc-backend/internal/graph/generated"
"github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/internal/metricdata"
@ -240,8 +241,24 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
return nil, err return nil, err
} }
if !config.Keys.UiDefaults["job_list_usePaging"].(bool) {
hasNextPage := false
page.Page += 1
nextJobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
if err != nil {
log.Warn("Error while querying next jobs")
return nil, err
}
if len(nextJobs) > 0 {
hasNextPage = true
}
return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
} else {
return &model.JobResultList{Items: jobs, Count: &count}, nil return &model.JobResultList{Items: jobs, Count: &count}, nil
} }
}
// JobsStatistics is the resolver for the jobsStatistics field. // JobsStatistics is the resolver for the jobsStatistics field.
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {

View File

@ -30,7 +30,8 @@
export let metrics = ccconfig.plot_list_selectedMetrics; export let metrics = ccconfig.plot_list_selectedMetrics;
export let showFootprint; export let showFootprint;
let itemsPerPage = ccconfig.plot_list_jobsPerPage; let usePaging = ccconfig.job_list_usePaging
let itemsPerPage = usePaging ? ccconfig.plot_list_jobsPerPage : 10;
let page = 1; let page = 1;
let paging = { itemsPerPage, page }; let paging = { itemsPerPage, page };
let filter = []; let filter = [];
@ -79,21 +80,27 @@
loadAvg loadAvg
} }
count count
hasNextPage
} }
} }
`; `;
$: jobs = queryStore({ $: jobsStore = queryStore({
client: client, client: client,
query: query, query: query,
variables: { paging, sorting, filter }, variables: { paging, sorting, filter },
}); });
$: matchedJobs = $jobs.data != null ? $jobs.data.jobs.count : 0; let jobs = []
$: if ($initialized && $jobsStore.data) {
jobs = [...$jobsStore.data.jobs.items]
}
$: matchedJobs = $jobsStore.data != null ? $jobsStore.data.jobs.count : 0;
// Force refresh list with existing unchanged variables (== usually would not trigger reactivity) // Force refresh list with existing unchanged variables (== usually would not trigger reactivity)
export function refresh() { export function refresh() {
jobs = queryStore({ jobsStore = queryStore({
client: client, client: client,
query: query, query: query,
variables: { paging, sorting, filter }, variables: { paging, sorting, filter },
@ -132,6 +139,7 @@
value: value, value: value,
}).subscribe((res) => { }).subscribe((res) => {
if (res.fetching === false && !res.error) { if (res.fetching === false && !res.error) {
jobs = [] // Empty List
paging = { itemsPerPage: value, page: page }; // Trigger reload of jobList paging = { itemsPerPage: value, page: page }; // Trigger reload of jobList
} else if (res.fetching === false && res.error) { } else if (res.fetching === false && res.error) {
throw res.error; throw res.error;
@ -140,6 +148,24 @@
}); });
} }
if (!usePaging) {
let scrollMultiplier = 1
window.addEventListener('scroll', () => {
let {
scrollTop,
scrollHeight,
clientHeight
} = document.documentElement;
if (scrollTop + clientHeight >= scrollHeight && $jobsStore.data != null && $jobsStore.data.jobs.hasNextPage) {
let pendingPaging = { ...paging }
scrollMultiplier += 1
pendingPaging.itemsPerPage = itemsPerPage * scrollMultiplier
paging = pendingPaging
};
});
};
let plotWidth = null; let plotWidth = null;
let tableWidth = null; let tableWidth = null;
let jobInfoColumnWidth = 250; let jobInfoColumnWidth = 250;
@ -212,22 +238,16 @@
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
{#if $jobs.error} {#if $jobsStore.error}
<tr> <tr>
<td colspan={metrics.length + 1}> <td colspan={metrics.length + 1}>
<Card body color="danger" class="mb-3" <Card body color="danger" class="mb-3"
><h2>{$jobs.error.message}</h2></Card ><h2>{$jobsStore.error.message}</h2></Card
> >
</td> </td>
</tr> </tr>
{:else if $jobs.fetching || !$jobs.data} {:else}
<tr> {#each jobs as job (job)}
<td colspan={metrics.length + 1}>
<Spinner secondary />
</td>
</tr>
{:else if $jobs.data && $initialized}
{#each $jobs.data.jobs.items as job (job)}
<JobListRow {job} {metrics} {plotWidth} {showFootprint} /> <JobListRow {job} {metrics} {plotWidth} {showFootprint} />
{:else} {:else}
<tr> <tr>
@ -235,11 +255,21 @@
</tr> </tr>
{/each} {/each}
{/if} {/if}
{#if $jobsStore.fetching || !$jobsStore.data}
<tr>
<td colspan={metrics.length + 1}>
<div style="text-align:center;">
<Spinner secondary />
</div>
</td>
</tr>
{/if}
</tbody> </tbody>
</Table> </Table>
</div> </div>
</Row> </Row>
{#if usePaging}
<Pagination <Pagination
bind:page bind:page
{itemsPerPage} {itemsPerPage}
@ -249,10 +279,12 @@
if (detail.itemsPerPage != itemsPerPage) { if (detail.itemsPerPage != itemsPerPage) {
updateConfiguration(detail.itemsPerPage.toString(), detail.page); updateConfiguration(detail.itemsPerPage.toString(), detail.page);
} else { } else {
jobs = []
paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }; paging = { itemsPerPage: detail.itemsPerPage, page: detail.page };
} }
}} }}
/> />
{/if}
<style> <style>
.cc-table-wrapper { .cc-table-wrapper {