mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-25 12:59:06 +01:00
Merge pull request #252 from ClusterCockpit/20_infinite_scroll
20 infinite scroll
This commit is contained in:
commit
baa51db26c
@ -278,6 +278,7 @@ type JobResultList {
|
||||
offset: Int
|
||||
limit: Int
|
||||
count: Int
|
||||
hasNextPage: Boolean
|
||||
}
|
||||
|
||||
type JobLinkResultList {
|
||||
|
@ -32,6 +32,7 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{
|
||||
"job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"},
|
||||
"job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"},
|
||||
"job_view_showFootprint": true,
|
||||
"job_list_usePaging": true,
|
||||
"plot_general_colorBackground": true,
|
||||
"plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"},
|
||||
"plot_general_lineWidth": 3,
|
||||
|
@ -139,10 +139,11 @@ type ComplexityRoot struct {
|
||||
}
|
||||
|
||||
JobResultList struct {
|
||||
Count func(childComplexity int) int
|
||||
Items func(childComplexity int) int
|
||||
Limit func(childComplexity int) int
|
||||
Offset func(childComplexity int) int
|
||||
Count func(childComplexity int) int
|
||||
HasNextPage func(childComplexity int) int
|
||||
Items func(childComplexity int) int
|
||||
Limit func(childComplexity int) int
|
||||
Offset func(childComplexity int) int
|
||||
}
|
||||
|
||||
JobsStatistics struct {
|
||||
@ -755,6 +756,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
||||
|
||||
return e.complexity.JobResultList.Count(childComplexity), true
|
||||
|
||||
case "JobResultList.hasNextPage":
|
||||
if e.complexity.JobResultList.HasNextPage == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.JobResultList.HasNextPage(childComplexity), true
|
||||
|
||||
case "JobResultList.items":
|
||||
if e.complexity.JobResultList.Items == nil {
|
||||
break
|
||||
@ -1987,6 +1995,7 @@ type JobResultList {
|
||||
offset: Int
|
||||
limit: Int
|
||||
count: Int
|
||||
hasNextPage: Boolean
|
||||
}
|
||||
|
||||
type JobLinkResultList {
|
||||
@ -5221,6 +5230,47 @@ func (ec *executionContext) fieldContext_JobResultList_count(ctx context.Context
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField, obj *model.JobResultList) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_JobResultList_hasNextPage(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.HasNextPage, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*bool)
|
||||
fc.Result = res
|
||||
return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "JobResultList",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Boolean does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_JobsStatistics_id(ctx, field)
|
||||
if err != nil {
|
||||
@ -8017,6 +8067,8 @@ func (ec *executionContext) fieldContext_Query_jobs(ctx context.Context, field g
|
||||
return ec.fieldContext_JobResultList_limit(ctx, field)
|
||||
case "count":
|
||||
return ec.fieldContext_JobResultList_count(ctx, field)
|
||||
case "hasNextPage":
|
||||
return ec.fieldContext_JobResultList_hasNextPage(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type JobResultList", field.Name)
|
||||
},
|
||||
@ -12226,8 +12278,6 @@ func (ec *executionContext) unmarshalInputFloatRange(ctx context.Context, obj in
|
||||
}
|
||||
switch k {
|
||||
case "from":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from"))
|
||||
data, err := ec.unmarshalNFloat2float64(ctx, v)
|
||||
if err != nil {
|
||||
@ -12235,8 +12285,6 @@ func (ec *executionContext) unmarshalInputFloatRange(ctx context.Context, obj in
|
||||
}
|
||||
it.From = data
|
||||
case "to":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to"))
|
||||
data, err := ec.unmarshalNFloat2float64(ctx, v)
|
||||
if err != nil {
|
||||
@ -12264,8 +12312,6 @@ func (ec *executionContext) unmarshalInputIntRange(ctx context.Context, obj inte
|
||||
}
|
||||
switch k {
|
||||
case "from":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from"))
|
||||
data, err := ec.unmarshalNInt2int(ctx, v)
|
||||
if err != nil {
|
||||
@ -12273,8 +12319,6 @@ func (ec *executionContext) unmarshalInputIntRange(ctx context.Context, obj inte
|
||||
}
|
||||
it.From = data
|
||||
case "to":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to"))
|
||||
data, err := ec.unmarshalNInt2int(ctx, v)
|
||||
if err != nil {
|
||||
@ -12302,8 +12346,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
switch k {
|
||||
case "tags":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("tags"))
|
||||
data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v)
|
||||
if err != nil {
|
||||
@ -12311,8 +12353,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.Tags = data
|
||||
case "jobId":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobId"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
@ -12320,8 +12360,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.JobID = data
|
||||
case "arrayJobId":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("arrayJobId"))
|
||||
data, err := ec.unmarshalOInt2ᚖint(ctx, v)
|
||||
if err != nil {
|
||||
@ -12329,8 +12367,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.ArrayJobID = data
|
||||
case "user":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("user"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
@ -12338,8 +12374,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.User = data
|
||||
case "project":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("project"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
@ -12347,8 +12381,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.Project = data
|
||||
case "jobName":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobName"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
@ -12356,8 +12388,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.JobName = data
|
||||
case "cluster":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
@ -12365,8 +12395,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.Cluster = data
|
||||
case "partition":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
@ -12374,8 +12402,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.Partition = data
|
||||
case "duration":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("duration"))
|
||||
data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12383,8 +12409,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.Duration = data
|
||||
case "minRunningFor":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("minRunningFor"))
|
||||
data, err := ec.unmarshalOInt2ᚖint(ctx, v)
|
||||
if err != nil {
|
||||
@ -12392,8 +12416,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.MinRunningFor = data
|
||||
case "numNodes":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numNodes"))
|
||||
data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12401,8 +12423,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.NumNodes = data
|
||||
case "numAccelerators":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numAccelerators"))
|
||||
data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12410,8 +12430,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.NumAccelerators = data
|
||||
case "numHWThreads":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numHWThreads"))
|
||||
data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12419,8 +12437,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.NumHWThreads = data
|
||||
case "startTime":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startTime"))
|
||||
data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12428,8 +12444,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.StartTime = data
|
||||
case "state":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("state"))
|
||||
data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx, v)
|
||||
if err != nil {
|
||||
@ -12437,8 +12451,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.State = data
|
||||
case "flopsAnyAvg":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("flopsAnyAvg"))
|
||||
data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12446,8 +12458,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.FlopsAnyAvg = data
|
||||
case "memBwAvg":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memBwAvg"))
|
||||
data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12455,8 +12465,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.MemBwAvg = data
|
||||
case "loadAvg":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("loadAvg"))
|
||||
data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12464,8 +12472,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.LoadAvg = data
|
||||
case "memUsedMax":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memUsedMax"))
|
||||
data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v)
|
||||
if err != nil {
|
||||
@ -12473,8 +12479,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.MemUsedMax = data
|
||||
case "exclusive":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("exclusive"))
|
||||
data, err := ec.unmarshalOInt2ᚖint(ctx, v)
|
||||
if err != nil {
|
||||
@ -12482,8 +12486,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
}
|
||||
it.Exclusive = data
|
||||
case "node":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("node"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
@ -12515,8 +12517,6 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj
|
||||
}
|
||||
switch k {
|
||||
case "field":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("field"))
|
||||
data, err := ec.unmarshalNString2string(ctx, v)
|
||||
if err != nil {
|
||||
@ -12524,8 +12524,6 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj
|
||||
}
|
||||
it.Field = data
|
||||
case "order":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("order"))
|
||||
data, err := ec.unmarshalNSortDirectionEnum2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortDirectionEnum(ctx, v)
|
||||
if err != nil {
|
||||
@ -12553,8 +12551,6 @@ func (ec *executionContext) unmarshalInputPageRequest(ctx context.Context, obj i
|
||||
}
|
||||
switch k {
|
||||
case "itemsPerPage":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("itemsPerPage"))
|
||||
data, err := ec.unmarshalNInt2int(ctx, v)
|
||||
if err != nil {
|
||||
@ -12562,8 +12558,6 @@ func (ec *executionContext) unmarshalInputPageRequest(ctx context.Context, obj i
|
||||
}
|
||||
it.ItemsPerPage = data
|
||||
case "page":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page"))
|
||||
data, err := ec.unmarshalNInt2int(ctx, v)
|
||||
if err != nil {
|
||||
@ -12591,8 +12585,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
|
||||
}
|
||||
switch k {
|
||||
case "eq":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq"))
|
||||
data, err := ec.unmarshalOString2ᚖstring(ctx, v)
|
||||
if err != nil {
|
||||
@ -12600,8 +12592,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
|
||||
}
|
||||
it.Eq = data
|
||||
case "neq":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("neq"))
|
||||
data, err := ec.unmarshalOString2ᚖstring(ctx, v)
|
||||
if err != nil {
|
||||
@ -12609,8 +12599,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
|
||||
}
|
||||
it.Neq = data
|
||||
case "contains":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("contains"))
|
||||
data, err := ec.unmarshalOString2ᚖstring(ctx, v)
|
||||
if err != nil {
|
||||
@ -12618,8 +12606,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
|
||||
}
|
||||
it.Contains = data
|
||||
case "startsWith":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startsWith"))
|
||||
data, err := ec.unmarshalOString2ᚖstring(ctx, v)
|
||||
if err != nil {
|
||||
@ -12627,8 +12613,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
|
||||
}
|
||||
it.StartsWith = data
|
||||
case "endsWith":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("endsWith"))
|
||||
data, err := ec.unmarshalOString2ᚖstring(ctx, v)
|
||||
if err != nil {
|
||||
@ -12636,8 +12620,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
|
||||
}
|
||||
it.EndsWith = data
|
||||
case "in":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("in"))
|
||||
data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v)
|
||||
if err != nil {
|
||||
@ -12665,8 +12647,6 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int
|
||||
}
|
||||
switch k {
|
||||
case "from":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from"))
|
||||
data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v)
|
||||
if err != nil {
|
||||
@ -12674,8 +12654,6 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int
|
||||
}
|
||||
it.From = data
|
||||
case "to":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to"))
|
||||
data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v)
|
||||
if err != nil {
|
||||
@ -13481,6 +13459,8 @@ func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.Selectio
|
||||
out.Values[i] = ec._JobResultList_limit(ctx, field, obj)
|
||||
case "count":
|
||||
out.Values[i] = ec._JobResultList_count(ctx, field, obj)
|
||||
case "hasNextPage":
|
||||
out.Values[i] = ec._JobResultList_hasNextPage(ctx, field, obj)
|
||||
default:
|
||||
panic("unknown field " + strconv.Quote(field.Name))
|
||||
}
|
||||
|
@ -78,10 +78,11 @@ type JobMetricWithName struct {
|
||||
}
|
||||
|
||||
type JobResultList struct {
|
||||
Items []*schema.Job `json:"items"`
|
||||
Offset *int `json:"offset,omitempty"`
|
||||
Limit *int `json:"limit,omitempty"`
|
||||
Count *int `json:"count,omitempty"`
|
||||
Items []*schema.Job `json:"items"`
|
||||
Offset *int `json:"offset,omitempty"`
|
||||
Limit *int `json:"limit,omitempty"`
|
||||
Count *int `json:"count,omitempty"`
|
||||
HasNextPage *bool `json:"hasNextPage,omitempty"`
|
||||
}
|
||||
|
||||
type JobsStatistics struct {
|
||||
@ -122,6 +123,9 @@ type MetricHistoPoints struct {
|
||||
Data []*MetricHistoPoint `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type Mutation struct {
|
||||
}
|
||||
|
||||
type NodeMetrics struct {
|
||||
Host string `json:"host"`
|
||||
SubCluster string `json:"subCluster"`
|
||||
@ -138,6 +142,9 @@ type PageRequest struct {
|
||||
Page int `json:"page"`
|
||||
}
|
||||
|
||||
type Query struct {
|
||||
}
|
||||
|
||||
type StringInput struct {
|
||||
Eq *string `json:"eq,omitempty"`
|
||||
Neq *string `json:"neq,omitempty"`
|
||||
|
@ -2,7 +2,7 @@ package graph
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.40
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.45
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||
@ -240,7 +241,23 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.JobResultList{Items: jobs, Count: &count}, nil
|
||||
if !config.Keys.UiDefaults["job_list_usePaging"].(bool) {
|
||||
hasNextPage := false
|
||||
page.Page += 1
|
||||
|
||||
nextJobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
|
||||
if err != nil {
|
||||
log.Warn("Error while querying next jobs")
|
||||
return nil, err
|
||||
}
|
||||
if len(nextJobs) > 0 {
|
||||
hasNextPage = true
|
||||
}
|
||||
|
||||
return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
|
||||
} else {
|
||||
return &model.JobResultList{Items: jobs, Count: &count}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// JobsStatistics is the resolver for the jobsStatistics field.
|
||||
|
@ -30,7 +30,8 @@
|
||||
export let metrics = ccconfig.plot_list_selectedMetrics;
|
||||
export let showFootprint;
|
||||
|
||||
let itemsPerPage = ccconfig.plot_list_jobsPerPage;
|
||||
let usePaging = ccconfig.job_list_usePaging
|
||||
let itemsPerPage = usePaging ? ccconfig.plot_list_jobsPerPage : 10;
|
||||
let page = 1;
|
||||
let paging = { itemsPerPage, page };
|
||||
let filter = [];
|
||||
@ -79,21 +80,27 @@
|
||||
loadAvg
|
||||
}
|
||||
count
|
||||
hasNextPage
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
$: jobs = queryStore({
|
||||
$: jobsStore = queryStore({
|
||||
client: client,
|
||||
query: query,
|
||||
variables: { paging, sorting, filter },
|
||||
});
|
||||
|
||||
$: matchedJobs = $jobs.data != null ? $jobs.data.jobs.count : 0;
|
||||
let jobs = []
|
||||
$: if ($initialized && $jobsStore.data) {
|
||||
jobs = [...$jobsStore.data.jobs.items]
|
||||
}
|
||||
|
||||
$: matchedJobs = $jobsStore.data != null ? $jobsStore.data.jobs.count : 0;
|
||||
|
||||
// Force refresh list with existing unchanged variables (== usually would not trigger reactivity)
|
||||
export function refresh() {
|
||||
jobs = queryStore({
|
||||
jobsStore = queryStore({
|
||||
client: client,
|
||||
query: query,
|
||||
variables: { paging, sorting, filter },
|
||||
@ -132,6 +139,7 @@
|
||||
value: value,
|
||||
}).subscribe((res) => {
|
||||
if (res.fetching === false && !res.error) {
|
||||
jobs = [] // Empty List
|
||||
paging = { itemsPerPage: value, page: page }; // Trigger reload of jobList
|
||||
} else if (res.fetching === false && res.error) {
|
||||
throw res.error;
|
||||
@ -140,6 +148,24 @@
|
||||
});
|
||||
}
|
||||
|
||||
if (!usePaging) {
|
||||
let scrollMultiplier = 1
|
||||
window.addEventListener('scroll', () => {
|
||||
let {
|
||||
scrollTop,
|
||||
scrollHeight,
|
||||
clientHeight
|
||||
} = document.documentElement;
|
||||
|
||||
if (scrollTop + clientHeight >= scrollHeight && $jobsStore.data != null && $jobsStore.data.jobs.hasNextPage) {
|
||||
let pendingPaging = { ...paging }
|
||||
scrollMultiplier += 1
|
||||
pendingPaging.itemsPerPage = itemsPerPage * scrollMultiplier
|
||||
paging = pendingPaging
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
let plotWidth = null;
|
||||
let tableWidth = null;
|
||||
let jobInfoColumnWidth = 250;
|
||||
@ -212,22 +238,16 @@
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{#if $jobs.error}
|
||||
{#if $jobsStore.error}
|
||||
<tr>
|
||||
<td colspan={metrics.length + 1}>
|
||||
<Card body color="danger" class="mb-3"
|
||||
><h2>{$jobs.error.message}</h2></Card
|
||||
><h2>{$jobsStore.error.message}</h2></Card
|
||||
>
|
||||
</td>
|
||||
</tr>
|
||||
{:else if $jobs.fetching || !$jobs.data}
|
||||
<tr>
|
||||
<td colspan={metrics.length + 1}>
|
||||
<Spinner secondary />
|
||||
</td>
|
||||
</tr>
|
||||
{:else if $jobs.data && $initialized}
|
||||
{#each $jobs.data.jobs.items as job (job)}
|
||||
{:else}
|
||||
{#each jobs as job (job)}
|
||||
<JobListRow {job} {metrics} {plotWidth} {showFootprint} />
|
||||
{:else}
|
||||
<tr>
|
||||
@ -235,24 +255,36 @@
|
||||
</tr>
|
||||
{/each}
|
||||
{/if}
|
||||
{#if $jobsStore.fetching || !$jobsStore.data}
|
||||
<tr>
|
||||
<td colspan={metrics.length + 1}>
|
||||
<div style="text-align:center;">
|
||||
<Spinner secondary />
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{/if}
|
||||
</tbody>
|
||||
</Table>
|
||||
</div>
|
||||
</Row>
|
||||
|
||||
<Pagination
|
||||
bind:page
|
||||
{itemsPerPage}
|
||||
itemText="Jobs"
|
||||
totalItems={matchedJobs}
|
||||
on:update={({ detail }) => {
|
||||
if (detail.itemsPerPage != itemsPerPage) {
|
||||
updateConfiguration(detail.itemsPerPage.toString(), detail.page);
|
||||
} else {
|
||||
paging = { itemsPerPage: detail.itemsPerPage, page: detail.page };
|
||||
}
|
||||
}}
|
||||
/>
|
||||
{#if usePaging}
|
||||
<Pagination
|
||||
bind:page
|
||||
{itemsPerPage}
|
||||
itemText="Jobs"
|
||||
totalItems={matchedJobs}
|
||||
on:update={({ detail }) => {
|
||||
if (detail.itemsPerPage != itemsPerPage) {
|
||||
updateConfiguration(detail.itemsPerPage.toString(), detail.page);
|
||||
} else {
|
||||
jobs = []
|
||||
paging = { itemsPerPage: detail.itemsPerPage, page: detail.page };
|
||||
}
|
||||
}}
|
||||
/>
|
||||
{/if}
|
||||
|
||||
<style>
|
||||
.cc-table-wrapper {
|
||||
|
Loading…
Reference in New Issue
Block a user