Add filters for jobs by statistics like load

This commit is contained in:
Lou Knauer 2021-05-06 13:50:38 +02:00
parent b6df8e88b9
commit 030f1a3fba
4 changed files with 122 additions and 9 deletions

View File

@ -82,6 +82,7 @@ type ComplexityRoot struct {
HasProfile func(childComplexity int) int HasProfile func(childComplexity int) int
ID func(childComplexity int) int ID func(childComplexity int) int
JobID func(childComplexity int) int JobID func(childComplexity int) int
LoadAvg func(childComplexity int) int
MemBwAvg func(childComplexity int) int MemBwAvg func(childComplexity int) int
MemUsedMax func(childComplexity int) int MemUsedMax func(childComplexity int) int
NetBwAvg func(childComplexity int) int NetBwAvg func(childComplexity int) int
@ -363,6 +364,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Job.JobID(childComplexity), true return e.complexity.Job.JobID(childComplexity), true
case "Job.loadAvg":
if e.complexity.Job.LoadAvg == nil {
break
}
return e.complexity.Job.LoadAvg(childComplexity), true
case "Job.memBwAvg": case "Job.memBwAvg":
if e.complexity.Job.MemBwAvg == nil { if e.complexity.Job.MemBwAvg == nil {
break break
@ -788,6 +796,7 @@ var sources = []*ast.Source{
numNodes: Int! numNodes: Int!
hasProfile: Boolean! hasProfile: Boolean!
loadAvg: Float
memUsedMax: Float memUsedMax: Float
flopsAnyAvg: Float flopsAnyAvg: Float
memBwAvg: Float memBwAvg: Float
@ -878,6 +887,10 @@ input JobFilter {
numNodes: IntRange numNodes: IntRange
startTime: TimeRange startTime: TimeRange
hasProfile: Boolean hasProfile: Boolean
flopsAnyAvg: FloatRange
memBwAvg: FloatRange
loadAvg: FloatRange
memUsedMax: FloatRange
} }
type IntRangeOutput { type IntRangeOutput {
@ -2036,6 +2049,38 @@ func (ec *executionContext) _Job_hasProfile(ctx context.Context, field graphql.C
return ec.marshalNBoolean2bool(ctx, field.Selections, res) return ec.marshalNBoolean2bool(ctx, field.Selections, res)
} }
func (ec *executionContext) _Job_loadAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Job",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.LoadAvg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*float64)
fc.Result = res
return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res)
}
func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
@ -4968,6 +5013,38 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
if err != nil { if err != nil {
return it, err return it, err
} }
case "flopsAnyAvg":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("flopsAnyAvg"))
it.FlopsAnyAvg, err = ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐFloatRange(ctx, v)
if err != nil {
return it, err
}
case "memBwAvg":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memBwAvg"))
it.MemBwAvg, err = ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐFloatRange(ctx, v)
if err != nil {
return it, err
}
case "loadAvg":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("loadAvg"))
it.LoadAvg, err = ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐFloatRange(ctx, v)
if err != nil {
return it, err
}
case "memUsedMax":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memUsedMax"))
it.MemUsedMax, err = ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐFloatRange(ctx, v)
if err != nil {
return it, err
}
} }
} }
@ -5372,6 +5449,8 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1) atomic.AddUint32(&invalids, 1)
} }
case "loadAvg":
out.Values[i] = ec._Job_loadAvg(ctx, field, obj)
case "memUsedMax": case "memUsedMax":
out.Values[i] = ec._Job_memUsedMax(ctx, field, obj) out.Values[i] = ec._Job_memUsedMax(ctx, field, obj)
case "flopsAnyAvg": case "flopsAnyAvg":
@ -6883,6 +6962,14 @@ func (ec *executionContext) marshalOFloat2ᚖfloat64(ctx context.Context, sel as
return graphql.MarshalFloat(*v) return graphql.MarshalFloat(*v)
} }
func (ec *executionContext) unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐFloatRange(ctx context.Context, v interface{}) (*model.FloatRange, error) {
if v == nil {
return nil, nil
}
res, err := ec.unmarshalInputFloatRange(ctx, v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOHistoPoint2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐHistoPoint(ctx context.Context, sel ast.SelectionSet, v *model.HistoPoint) graphql.Marshaler { func (ec *executionContext) marshalOHistoPoint2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐHistoPoint(ctx context.Context, sel ast.SelectionSet, v *model.HistoPoint) graphql.Marshaler {
if v == nil { if v == nil {
return graphql.Null return graphql.Null

View File

@ -36,15 +36,19 @@ type IntRangeOutput struct {
} }
type JobFilter struct { type JobFilter struct {
Tags []string `json:"tags"` Tags []string `json:"tags"`
JobID *StringInput `json:"jobId"` JobID *StringInput `json:"jobId"`
UserID *StringInput `json:"userId"` UserID *StringInput `json:"userId"`
ProjectID *StringInput `json:"projectId"` ProjectID *StringInput `json:"projectId"`
ClusterID *StringInput `json:"clusterId"` ClusterID *StringInput `json:"clusterId"`
Duration *IntRange `json:"duration"` Duration *IntRange `json:"duration"`
NumNodes *IntRange `json:"numNodes"` NumNodes *IntRange `json:"numNodes"`
StartTime *TimeRange `json:"startTime"` StartTime *TimeRange `json:"startTime"`
HasProfile *bool `json:"hasProfile"` HasProfile *bool `json:"hasProfile"`
FlopsAnyAvg *FloatRange `json:"flopsAnyAvg"`
MemBwAvg *FloatRange `json:"memBwAvg"`
LoadAvg *FloatRange `json:"loadAvg"`
MemUsedMax *FloatRange `json:"memUsedMax"`
} }
type JobFilterList struct { type JobFilterList struct {

View File

@ -61,6 +61,11 @@ func addTimeCondition(conditions []string, field string, input *model.TimeRange)
return conditions return conditions
} }
func addFloatCondition(conditions []string, field string, input *model.FloatRange) []string {
conditions = append(conditions, fmt.Sprintf("%s BETWEEN %f AND %f", field, input.From, input.To))
return conditions;
}
func buildQueryConditions(filterList *model.JobFilterList) (string, string) { func buildQueryConditions(filterList *model.JobFilterList) (string, string) {
var conditions []string var conditions []string
var join string var join string
@ -91,6 +96,18 @@ func buildQueryConditions(filterList *model.JobFilterList) (string, string) {
if condition.NumNodes != nil { if condition.NumNodes != nil {
conditions = addIntCondition(conditions, `num_nodes`, condition.NumNodes) conditions = addIntCondition(conditions, `num_nodes`, condition.NumNodes)
} }
if condition.FlopsAnyAvg != nil {
conditions = addFloatCondition(conditions, `flops_any_avg`, condition.FlopsAnyAvg)
}
if condition.MemBwAvg != nil {
conditions = addFloatCondition(conditions, `mem_bw_avg`, condition.MemBwAvg)
}
if condition.LoadAvg != nil {
conditions = addFloatCondition(conditions, `load_avg`, condition.LoadAvg)
}
if condition.MemUsedMax != nil {
conditions = addFloatCondition(conditions, `mem_used_max`, condition.MemUsedMax)
}
} }
return strings.Join(conditions, " AND "), join return strings.Join(conditions, " AND "), join

View File

@ -10,6 +10,7 @@ type Job {
numNodes: Int! numNodes: Int!
hasProfile: Boolean! hasProfile: Boolean!
loadAvg: Float
memUsedMax: Float memUsedMax: Float
flopsAnyAvg: Float flopsAnyAvg: Float
memBwAvg: Float memBwAvg: Float
@ -100,6 +101,10 @@ input JobFilter {
numNodes: IntRange numNodes: IntRange
startTime: TimeRange startTime: TimeRange
hasProfile: Boolean hasProfile: Boolean
flopsAnyAvg: FloatRange
memBwAvg: FloatRange
loadAvg: FloatRange
memUsedMax: FloatRange
} }
type IntRangeOutput { type IntRangeOutput {