Fix build errors

Code not yet functional
This commit is contained in:
Jan Eitzinger 2024-06-28 17:08:28 +02:00
parent b3c1f39a0e
commit 130613b717
Signed by: moebiusband
GPG Key ID: 2574BA29B90D6DD5
4 changed files with 231 additions and 207 deletions

View File

@ -42,6 +42,7 @@ type Config struct {
type ResolverRoot interface { type ResolverRoot interface {
Cluster() ClusterResolver Cluster() ClusterResolver
Job() JobResolver Job() JobResolver
MetricValue() MetricValueResolver
Mutation() MutationResolver Mutation() MutationResolver
Query() QueryResolver Query() QueryResolver
SubCluster() SubClusterResolver SubCluster() SubClusterResolver
@ -90,12 +91,9 @@ type ComplexityRoot struct {
ConcurrentJobs func(childComplexity int) int ConcurrentJobs func(childComplexity int) int
Duration func(childComplexity int) int Duration func(childComplexity int) int
Exclusive func(childComplexity int) int Exclusive func(childComplexity int) int
FlopsAnyAvg func(childComplexity int) int Footprint func(childComplexity int) int
ID func(childComplexity int) int ID func(childComplexity int) int
JobID func(childComplexity int) int JobID func(childComplexity int) int
LoadAvg func(childComplexity int) int
MemBwAvg func(childComplexity int) int
MemUsedMax func(childComplexity int) int
MetaData func(childComplexity int) int MetaData func(childComplexity int) int
MonitoringStatus func(childComplexity int) int MonitoringStatus func(childComplexity int) int
NumAcc func(childComplexity int) int NumAcc func(childComplexity int) int
@ -204,6 +202,7 @@ type ComplexityRoot struct {
} }
MetricValue struct { MetricValue struct {
Name func(childComplexity int) int
Unit func(childComplexity int) int Unit func(childComplexity int) int
Value func(childComplexity int) int Value func(childComplexity int) int
} }
@ -324,10 +323,13 @@ type JobResolver interface {
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error)
Footprint(ctx context.Context, obj *schema.Job) ([]*schema.MetricValue, error)
MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error)
UserData(ctx context.Context, obj *schema.Job) (*model.User, error) UserData(ctx context.Context, obj *schema.Job) (*model.User, error)
} }
type MetricValueResolver interface {
Name(ctx context.Context, obj *schema.MetricValue) (*string, error)
}
type MutationResolver interface { type MutationResolver interface {
CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error)
DeleteTag(ctx context.Context, id string) (string, error) DeleteTag(ctx context.Context, id string) (string, error)
@ -511,12 +513,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Job.Exclusive(childComplexity), true return e.complexity.Job.Exclusive(childComplexity), true
case "Job.flopsAnyAvg": case "Job.footprint":
if e.complexity.Job.FlopsAnyAvg == nil { if e.complexity.Job.Footprint == nil {
break break
} }
return e.complexity.Job.FlopsAnyAvg(childComplexity), true return e.complexity.Job.Footprint(childComplexity), true
case "Job.id": case "Job.id":
if e.complexity.Job.ID == nil { if e.complexity.Job.ID == nil {
@ -532,27 +534,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Job.JobID(childComplexity), true return e.complexity.Job.JobID(childComplexity), true
case "Job.loadAvg":
if e.complexity.Job.LoadAvg == nil {
break
}
return e.complexity.Job.LoadAvg(childComplexity), true
case "Job.memBwAvg":
if e.complexity.Job.MemBwAvg == nil {
break
}
return e.complexity.Job.MemBwAvg(childComplexity), true
case "Job.memUsedMax":
if e.complexity.Job.MemUsedMax == nil {
break
}
return e.complexity.Job.MemUsedMax(childComplexity), true
case "Job.metaData": case "Job.metaData":
if e.complexity.Job.MetaData == nil { if e.complexity.Job.MetaData == nil {
break break
@ -1057,6 +1038,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.MetricStatistics.Min(childComplexity), true return e.complexity.MetricStatistics.Min(childComplexity), true
case "MetricValue.name":
if e.complexity.MetricValue.Name == nil {
break
}
return e.complexity.MetricValue.Name(childComplexity), true
case "MetricValue.unit": case "MetricValue.unit":
if e.complexity.MetricValue.Unit == nil { if e.complexity.MetricValue.Unit == nil {
break break
@ -1744,12 +1732,7 @@ type Job {
tags: [Tag!]! tags: [Tag!]!
resources: [Resource!]! resources: [Resource!]!
concurrentJobs: JobLinkResultList concurrentJobs: JobLinkResultList
footprint: [MetricValue]
memUsedMax: Float
flopsAnyAvg: Float
memBwAvg: Float
loadAvg: Float
metaData: Any metaData: Any
userData: User userData: User
} }
@ -1781,6 +1764,7 @@ type SubCluster {
} }
type MetricValue { type MetricValue {
name: String
unit: Unit! unit: Unit!
value: Float! value: Float!
} }
@ -4200,8 +4184,8 @@ func (ec *executionContext) fieldContext_Job_concurrentJobs(ctx context.Context,
return fc, nil return fc, nil
} }
func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { func (ec *executionContext) _Job_footprint(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Job_memUsedMax(ctx, field) fc, err := ec.fieldContext_Job_footprint(ctx, field)
if err != nil { if err != nil {
return graphql.Null return graphql.Null
} }
@ -4214,7 +4198,7 @@ func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.C
}() }()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children ctx = rctx // use context from middleware stack in children
return obj.MemUsedMax, nil return ec.resolvers.Job().Footprint(rctx, obj)
}) })
if err != nil { if err != nil {
ec.Error(ctx, err) ec.Error(ctx, err)
@ -4223,142 +4207,27 @@ func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.C
if resTmp == nil { if resTmp == nil {
return graphql.Null return graphql.Null
} }
res := resTmp.(float64) res := resTmp.([]*schema.MetricValue)
fc.Result = res fc.Result = res
return ec.marshalOFloat2float64(ctx, field.Selections, res) return ec.marshalOMetricValue2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res)
} }
func (ec *executionContext) fieldContext_Job_memUsedMax(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { func (ec *executionContext) fieldContext_Job_footprint(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{ fc = &graphql.FieldContext{
Object: "Job", Object: "Job",
Field: field, Field: field,
IsMethod: false, IsMethod: true,
IsResolver: false, IsResolver: true,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Float does not have child fields") switch field.Name {
}, case "name":
} return ec.fieldContext_MetricValue_name(ctx, field)
return fc, nil case "unit":
} return ec.fieldContext_MetricValue_unit(ctx, field)
case "value":
func (ec *executionContext) _Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { return ec.fieldContext_MetricValue_value(ctx, field)
fc, err := ec.fieldContext_Job_flopsAnyAvg(ctx, field) }
if err != nil { return nil, fmt.Errorf("no field named %q was found under type MetricValue", field.Name)
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.FlopsAnyAvg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(float64)
fc.Result = res
return ec.marshalOFloat2float64(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Job",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Float does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _Job_memBwAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Job_memBwAvg(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MemBwAvg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(float64)
fc.Result = res
return ec.marshalOFloat2float64(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Job_memBwAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Job",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Float does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _Job_loadAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Job_loadAvg(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.LoadAvg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(float64)
fc.Result = res
return ec.marshalOFloat2float64(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Job_loadAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Job",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Float does not have child fields")
}, },
} }
return fc, nil return fc, nil
@ -5088,14 +4957,8 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context
return ec.fieldContext_Job_resources(ctx, field) return ec.fieldContext_Job_resources(ctx, field)
case "concurrentJobs": case "concurrentJobs":
return ec.fieldContext_Job_concurrentJobs(ctx, field) return ec.fieldContext_Job_concurrentJobs(ctx, field)
case "memUsedMax": case "footprint":
return ec.fieldContext_Job_memUsedMax(ctx, field) return ec.fieldContext_Job_footprint(ctx, field)
case "flopsAnyAvg":
return ec.fieldContext_Job_flopsAnyAvg(ctx, field)
case "memBwAvg":
return ec.fieldContext_Job_memBwAvg(ctx, field)
case "loadAvg":
return ec.fieldContext_Job_loadAvg(ctx, field)
case "metaData": case "metaData":
return ec.fieldContext_Job_metaData(ctx, field) return ec.fieldContext_Job_metaData(ctx, field)
case "userData": case "userData":
@ -7034,6 +6897,47 @@ func (ec *executionContext) fieldContext_MetricStatistics_max(ctx context.Contex
return fc, nil return fc, nil
} }
func (ec *executionContext) _MetricValue_name(ctx context.Context, field graphql.CollectedField, obj *schema.MetricValue) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_MetricValue_name(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.MetricValue().Name(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_MetricValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "MetricValue",
Field: field,
IsMethod: true,
IsResolver: true,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type String does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _MetricValue_unit(ctx context.Context, field graphql.CollectedField, obj *schema.MetricValue) (ret graphql.Marshaler) { func (ec *executionContext) _MetricValue_unit(ctx context.Context, field graphql.CollectedField, obj *schema.MetricValue) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_MetricValue_unit(ctx, field) fc, err := ec.fieldContext_MetricValue_unit(ctx, field)
if err != nil { if err != nil {
@ -7869,14 +7773,8 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr
return ec.fieldContext_Job_resources(ctx, field) return ec.fieldContext_Job_resources(ctx, field)
case "concurrentJobs": case "concurrentJobs":
return ec.fieldContext_Job_concurrentJobs(ctx, field) return ec.fieldContext_Job_concurrentJobs(ctx, field)
case "memUsedMax": case "footprint":
return ec.fieldContext_Job_memUsedMax(ctx, field) return ec.fieldContext_Job_footprint(ctx, field)
case "flopsAnyAvg":
return ec.fieldContext_Job_flopsAnyAvg(ctx, field)
case "memBwAvg":
return ec.fieldContext_Job_memBwAvg(ctx, field)
case "loadAvg":
return ec.fieldContext_Job_loadAvg(ctx, field)
case "metaData": case "metaData":
return ec.fieldContext_Job_metaData(ctx, field) return ec.fieldContext_Job_metaData(ctx, field)
case "userData": case "userData":
@ -9249,6 +9147,8 @@ func (ec *executionContext) fieldContext_SubCluster_flopRateScalar(ctx context.C
IsResolver: false, IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name { switch field.Name {
case "name":
return ec.fieldContext_MetricValue_name(ctx, field)
case "unit": case "unit":
return ec.fieldContext_MetricValue_unit(ctx, field) return ec.fieldContext_MetricValue_unit(ctx, field)
case "value": case "value":
@ -9299,6 +9199,8 @@ func (ec *executionContext) fieldContext_SubCluster_flopRateSimd(ctx context.Con
IsResolver: false, IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name { switch field.Name {
case "name":
return ec.fieldContext_MetricValue_name(ctx, field)
case "unit": case "unit":
return ec.fieldContext_MetricValue_unit(ctx, field) return ec.fieldContext_MetricValue_unit(ctx, field)
case "value": case "value":
@ -9349,6 +9251,8 @@ func (ec *executionContext) fieldContext_SubCluster_memoryBandwidth(ctx context.
IsResolver: false, IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name { switch field.Name {
case "name":
return ec.fieldContext_MetricValue_name(ctx, field)
case "unit": case "unit":
return ec.fieldContext_MetricValue_unit(ctx, field) return ec.fieldContext_MetricValue_unit(ctx, field)
case "value": case "value":
@ -13159,14 +13063,39 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj
} }
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
case "memUsedMax": case "footprint":
out.Values[i] = ec._Job_memUsedMax(ctx, field, obj) field := field
case "flopsAnyAvg":
out.Values[i] = ec._Job_flopsAnyAvg(ctx, field, obj) innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
case "memBwAvg": defer func() {
out.Values[i] = ec._Job_memBwAvg(ctx, field, obj) if r := recover(); r != nil {
case "loadAvg": ec.Error(ctx, ec.Recover(ctx, r))
out.Values[i] = ec._Job_loadAvg(ctx, field, obj) }
}()
res = ec._Job_footprint(ctx, field, obj)
return res
}
if field.Deferrable != nil {
dfs, ok := deferred[field.Deferrable.Label]
di := 0
if ok {
dfs.AddField(field)
di = len(dfs.Values) - 1
} else {
dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
deferred[field.Deferrable.Label] = dfs
}
dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
return innerFunc(ctx, dfs)
})
// don't run the out.Concurrently() call below
out.Values[i] = graphql.Null
continue
}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
case "metaData": case "metaData":
field := field field := field
@ -13879,15 +13808,48 @@ func (ec *executionContext) _MetricValue(ctx context.Context, sel ast.SelectionS
switch field.Name { switch field.Name {
case "__typename": case "__typename":
out.Values[i] = graphql.MarshalString("MetricValue") out.Values[i] = graphql.MarshalString("MetricValue")
case "name":
field := field
innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._MetricValue_name(ctx, field, obj)
return res
}
if field.Deferrable != nil {
dfs, ok := deferred[field.Deferrable.Label]
di := 0
if ok {
dfs.AddField(field)
di = len(dfs.Values) - 1
} else {
dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
deferred[field.Deferrable.Label] = dfs
}
dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
return innerFunc(ctx, dfs)
})
// don't run the out.Concurrently() call below
out.Values[i] = graphql.Null
continue
}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
case "unit": case "unit":
out.Values[i] = ec._MetricValue_unit(ctx, field, obj) out.Values[i] = ec._MetricValue_unit(ctx, field, obj)
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
out.Invalids++ atomic.AddUint32(&out.Invalids, 1)
} }
case "value": case "value":
out.Values[i] = ec._MetricValue_value(ctx, field, obj) out.Values[i] = ec._MetricValue_value(ctx, field, obj)
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
out.Invalids++ atomic.AddUint32(&out.Invalids, 1)
} }
default: default:
panic("unknown field " + strconv.Quote(field.Name)) panic("unknown field " + strconv.Quote(field.Name))
@ -17279,6 +17241,54 @@ func (ec *executionContext) marshalOMetricStatistics2githubᚗcomᚋClusterCockp
return ec._MetricStatistics(ctx, sel, &v) return ec._MetricStatistics(ctx, sel, &v)
} }
func (ec *executionContext) marshalOMetricValue2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v []*schema.MetricValue) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalOMetricValue2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOMetricValue2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v *schema.MetricValue) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._MetricValue(ctx, sel, v)
}
func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v interface{}) (*model.OrderByInput, error) { func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v interface{}) (*model.OrderByInput, error) {
if v == nil { if v == nil {
return nil, nil return nil, nil

View File

@ -44,6 +44,11 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod
return nil, nil return nil, nil
} }
// Footprint is the resolver for the footprint field.
func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*schema.MetricValue, error) {
panic(fmt.Errorf("not implemented: Footprint - footprint"))
}
// MetaData is the resolver for the metaData field. // MetaData is the resolver for the metaData field.
func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) { func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) {
return r.Repo.FetchMetadata(obj) return r.Repo.FetchMetadata(obj)
@ -54,6 +59,11 @@ func (r *jobResolver) UserData(ctx context.Context, obj *schema.Job) (*model.Use
return repository.GetUserRepository().FetchUserInCtx(ctx, obj.User) return repository.GetUserRepository().FetchUserInCtx(ctx, obj.User)
} }
// Name is the resolver for the name field.
func (r *metricValueResolver) Name(ctx context.Context, obj *schema.MetricValue) (*string, error) {
panic(fmt.Errorf("not implemented: Name - name"))
}
// CreateTag is the resolver for the createTag field. // CreateTag is the resolver for the createTag field.
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) { func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) {
id, err := r.Repo.CreateTag(typeArg, name) id, err := r.Repo.CreateTag(typeArg, name)
@ -392,6 +402,9 @@ func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver
// Job returns generated.JobResolver implementation. // Job returns generated.JobResolver implementation.
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} } func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
// MetricValue returns generated.MetricValueResolver implementation.
func (r *Resolver) MetricValue() generated.MetricValueResolver { return &metricValueResolver{r} }
// Mutation returns generated.MutationResolver implementation. // Mutation returns generated.MutationResolver implementation.
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} } func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
@ -403,6 +416,7 @@ func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subCluste
type clusterResolver struct{ *Resolver } type clusterResolver struct{ *Resolver }
type jobResolver struct{ *Resolver } type jobResolver struct{ *Resolver }
type metricValueResolver struct{ *Resolver }
type mutationResolver struct{ *Resolver } type mutationResolver struct{ *Resolver }
type queryResolver struct{ *Resolver } type queryResolver struct{ *Resolver }
type subClusterResolver struct{ *Resolver } type subClusterResolver struct{ *Resolver }

View File

@ -86,13 +86,13 @@ func HandleImportFlag(flag string) error {
StartTimeUnix: jobMeta.StartTime, StartTimeUnix: jobMeta.StartTime,
} }
// TODO: Other metrics... // TODO: Do loop for new sub structure for stats
job.LoadAvg = loadJobStat(&jobMeta, "cpu_load") // job.LoadAvg = loadJobStat(&jobMeta, "cpu_load")
job.FlopsAnyAvg = loadJobStat(&jobMeta, "flops_any") // job.FlopsAnyAvg = loadJobStat(&jobMeta, "flops_any")
job.MemUsedMax = loadJobStat(&jobMeta, "mem_used") // job.MemUsedMax = loadJobStat(&jobMeta, "mem_used")
job.MemBwAvg = loadJobStat(&jobMeta, "mem_bw") // job.MemBwAvg = loadJobStat(&jobMeta, "mem_bw")
job.NetBwAvg = loadJobStat(&jobMeta, "net_bw") // job.NetBwAvg = loadJobStat(&jobMeta, "net_bw")
job.FileBwAvg = loadJobStat(&jobMeta, "file_bw") // job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
job.RawResources, err = json.Marshal(job.Resources) job.RawResources, err = json.Marshal(job.Resources)
if err != nil { if err != nil {

View File

@ -60,13 +60,13 @@ func InitDB() error {
StartTimeUnix: jobMeta.StartTime, StartTimeUnix: jobMeta.StartTime,
} }
// TODO: Other metrics... // TODO: Convert to loop for new footprint layout
job.LoadAvg = loadJobStat(jobMeta, "cpu_load") // job.LoadAvg = loadJobStat(jobMeta, "cpu_load")
job.FlopsAnyAvg = loadJobStat(jobMeta, "flops_any") // job.FlopsAnyAvg = loadJobStat(jobMeta, "flops_any")
job.MemUsedMax = loadJobStat(jobMeta, "mem_used") // job.MemUsedMax = loadJobStat(jobMeta, "mem_used")
job.MemBwAvg = loadJobStat(jobMeta, "mem_bw") // job.MemBwAvg = loadJobStat(jobMeta, "mem_bw")
job.NetBwAvg = loadJobStat(jobMeta, "net_bw") // job.NetBwAvg = loadJobStat(jobMeta, "net_bw")
job.FileBwAvg = loadJobStat(jobMeta, "file_bw") // job.FileBwAvg = loadJobStat(jobMeta, "file_bw")
job.RawResources, err = json.Marshal(job.Resources) job.RawResources, err = json.Marshal(job.Resources)
if err != nil { if err != nil {