mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-25 21:09:05 +01:00
Add GraphQL endpoint for counting jobs
This commit is contained in:
parent
2ca27a83b9
commit
dcb3fd6a6a
@ -110,6 +110,10 @@ func setup(t *testing.T) *api.RestApi {
|
||||
}
|
||||
|
||||
resolver := &graph.Resolver{DB: db, Repo: &repository.JobRepository{DB: db}}
|
||||
if err := resolver.Repo.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return &api.RestApi{
|
||||
JobRepository: resolver.Repo,
|
||||
Resolver: resolver,
|
||||
|
@ -59,6 +59,11 @@ type ComplexityRoot struct {
|
||||
Partitions func(childComplexity int) int
|
||||
}
|
||||
|
||||
Count struct {
|
||||
Count func(childComplexity int) int
|
||||
Name func(childComplexity int) int
|
||||
}
|
||||
|
||||
FilterRanges struct {
|
||||
Duration func(childComplexity int) int
|
||||
NumNodes func(childComplexity int) int
|
||||
@ -178,6 +183,7 @@ type ComplexityRoot struct {
|
||||
Job func(childComplexity int, id string) int
|
||||
JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int
|
||||
Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int
|
||||
JobsCount func(childComplexity int, filter []*model.JobFilter, groupBy model.Aggregate, limit *int) int
|
||||
JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int
|
||||
JobsStatistics func(childComplexity int, filter []*model.JobFilter, groupBy *model.Aggregate) int
|
||||
NodeMetrics func(childComplexity int, cluster string, partition *string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int
|
||||
@ -244,6 +250,7 @@ type QueryResolver interface {
|
||||
JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.MetricFootprints, error)
|
||||
Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error)
|
||||
JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error)
|
||||
JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, limit *int) ([]*model.Count, error)
|
||||
RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error)
|
||||
NodeMetrics(ctx context.Context, cluster string, partition *string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error)
|
||||
}
|
||||
@ -312,6 +319,20 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
||||
|
||||
return e.complexity.Cluster.Partitions(childComplexity), true
|
||||
|
||||
case "Count.count":
|
||||
if e.complexity.Count.Count == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.Count.Count(childComplexity), true
|
||||
|
||||
case "Count.name":
|
||||
if e.complexity.Count.Name == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.Count.Name(childComplexity), true
|
||||
|
||||
case "FilterRanges.duration":
|
||||
if e.complexity.FilterRanges.Duration == nil {
|
||||
break
|
||||
@ -884,6 +905,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
||||
|
||||
return e.complexity.Query.Jobs(childComplexity, args["filter"].([]*model.JobFilter), args["page"].(*model.PageRequest), args["order"].(*model.OrderByInput)), true
|
||||
|
||||
case "Query.jobsCount":
|
||||
if e.complexity.Query.JobsCount == nil {
|
||||
break
|
||||
}
|
||||
|
||||
args, err := ec.field_Query_jobsCount_args(context.TODO(), rawArgs)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return e.complexity.Query.JobsCount(childComplexity, args["filter"].([]*model.JobFilter), args["groupBy"].(model.Aggregate), args["limit"].(*int)), true
|
||||
|
||||
case "Query.jobsFootprints":
|
||||
if e.complexity.Query.JobsFootprints == nil {
|
||||
break
|
||||
@ -1285,6 +1318,11 @@ type NodeMetrics {
|
||||
metrics: [JobMetricWithName!]!
|
||||
}
|
||||
|
||||
type Count {
|
||||
name: String!
|
||||
count: Int!
|
||||
}
|
||||
|
||||
type Query {
|
||||
clusters: [Cluster!]! # List of all clusters
|
||||
tags: [Tag!]! # List of all tags
|
||||
@ -1295,6 +1333,7 @@ type Query {
|
||||
|
||||
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
|
||||
jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]!
|
||||
jobsCount(filter: [JobFilter]!, groupBy: Aggregate!, limit: Int): [Count!]!
|
||||
|
||||
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
|
||||
|
||||
@ -1570,6 +1609,39 @@ func (ec *executionContext) field_Query_job_args(ctx context.Context, rawArgs ma
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) field_Query_jobsCount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
|
||||
var err error
|
||||
args := map[string]interface{}{}
|
||||
var arg0 []*model.JobFilter
|
||||
if tmp, ok := rawArgs["filter"]; ok {
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter"))
|
||||
arg0, err = ec.unmarshalNJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐJobFilter(ctx, tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
args["filter"] = arg0
|
||||
var arg1 model.Aggregate
|
||||
if tmp, ok := rawArgs["groupBy"]; ok {
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy"))
|
||||
arg1, err = ec.unmarshalNAggregate2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐAggregate(ctx, tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
args["groupBy"] = arg1
|
||||
var arg2 *int
|
||||
if tmp, ok := rawArgs["limit"]; ok {
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("limit"))
|
||||
arg2, err = ec.unmarshalOInt2ᚖint(ctx, tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
args["limit"] = arg2
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) field_Query_jobsFootprints_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
|
||||
var err error
|
||||
args := map[string]interface{}{}
|
||||
@ -2072,6 +2144,76 @@ func (ec *executionContext) _Cluster_partitions(ctx context.Context, field graph
|
||||
return ec.marshalNPartition2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐPartitionᚄ(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Count_name(ctx context.Context, field graphql.CollectedField, obj *model.Count) (ret graphql.Marshaler) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
fc := &graphql.FieldContext{
|
||||
Object: "Count",
|
||||
Field: field,
|
||||
Args: nil,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
}
|
||||
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Name, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(string)
|
||||
fc.Result = res
|
||||
return ec.marshalNString2string(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Count_count(ctx context.Context, field graphql.CollectedField, obj *model.Count) (ret graphql.Marshaler) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
fc := &graphql.FieldContext{
|
||||
Object: "Count",
|
||||
Field: field,
|
||||
Args: nil,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
}
|
||||
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Count, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(int)
|
||||
fc.Result = res
|
||||
return ec.marshalNInt2int(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) _FilterRanges_duration(ctx context.Context, field graphql.CollectedField, obj *model.FilterRanges) (ret graphql.Marshaler) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@ -4886,6 +5028,48 @@ func (ec *executionContext) _Query_jobsStatistics(ctx context.Context, field gra
|
||||
return ec.marshalNJobsStatistics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐJobsStatisticsᚄ(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Query_jobsCount(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
fc := &graphql.FieldContext{
|
||||
Object: "Query",
|
||||
Field: field,
|
||||
Args: nil,
|
||||
IsMethod: true,
|
||||
IsResolver: true,
|
||||
}
|
||||
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
rawArgs := field.ArgumentMap(ec.Variables)
|
||||
args, err := ec.field_Query_jobsCount_args(ctx, rawArgs)
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
fc.Args = args
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return ec.resolvers.Query().JobsCount(rctx, args["filter"].([]*model.JobFilter), args["groupBy"].(model.Aggregate), args["limit"].(*int))
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.([]*model.Count)
|
||||
fc.Result = res
|
||||
return ec.marshalNCount2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐCountᚄ(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Query_rooflineHeatmap(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@ -7288,6 +7472,38 @@ func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet,
|
||||
return out
|
||||
}
|
||||
|
||||
var countImplementors = []string{"Count"}
|
||||
|
||||
func (ec *executionContext) _Count(ctx context.Context, sel ast.SelectionSet, obj *model.Count) graphql.Marshaler {
|
||||
fields := graphql.CollectFields(ec.OperationContext, sel, countImplementors)
|
||||
|
||||
out := graphql.NewFieldSet(fields)
|
||||
var invalids uint32
|
||||
for i, field := range fields {
|
||||
switch field.Name {
|
||||
case "__typename":
|
||||
out.Values[i] = graphql.MarshalString("Count")
|
||||
case "name":
|
||||
out.Values[i] = ec._Count_name(ctx, field, obj)
|
||||
if out.Values[i] == graphql.Null {
|
||||
invalids++
|
||||
}
|
||||
case "count":
|
||||
out.Values[i] = ec._Count_count(ctx, field, obj)
|
||||
if out.Values[i] == graphql.Null {
|
||||
invalids++
|
||||
}
|
||||
default:
|
||||
panic("unknown field " + strconv.Quote(field.Name))
|
||||
}
|
||||
}
|
||||
out.Dispatch()
|
||||
if invalids > 0 {
|
||||
return graphql.Null
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
var filterRangesImplementors = []string{"FilterRanges"}
|
||||
|
||||
func (ec *executionContext) _FilterRanges(ctx context.Context, sel ast.SelectionSet, obj *model.FilterRanges) graphql.Marshaler {
|
||||
@ -8061,6 +8277,20 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
||||
}
|
||||
return res
|
||||
})
|
||||
case "jobsCount":
|
||||
field := field
|
||||
out.Concurrently(i, func() (res graphql.Marshaler) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
}
|
||||
}()
|
||||
res = ec._Query_jobsCount(ctx, field)
|
||||
if res == graphql.Null {
|
||||
atomic.AddUint32(&invalids, 1)
|
||||
}
|
||||
return res
|
||||
})
|
||||
case "rooflineHeatmap":
|
||||
field := field
|
||||
out.Concurrently(i, func() (res graphql.Marshaler) {
|
||||
@ -8568,6 +8798,16 @@ func (ec *executionContext) marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpit
|
||||
return ec._Accelerator(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalNAggregate2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐAggregate(ctx context.Context, v interface{}) (model.Aggregate, error) {
|
||||
var res model.Aggregate
|
||||
err := res.UnmarshalGQL(v)
|
||||
return res, graphql.ErrorOnPath(ctx, err)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNAggregate2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐAggregate(ctx context.Context, sel ast.SelectionSet, v model.Aggregate) graphql.Marshaler {
|
||||
return v
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
|
||||
res, err := graphql.UnmarshalBoolean(v)
|
||||
return res, graphql.ErrorOnPath(ctx, err)
|
||||
@ -8630,6 +8870,53 @@ func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋc
|
||||
return ec._Cluster(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNCount2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Count) graphql.Marshaler {
|
||||
ret := make(graphql.Array, len(v))
|
||||
var wg sync.WaitGroup
|
||||
isLen1 := len(v) == 1
|
||||
if !isLen1 {
|
||||
wg.Add(len(v))
|
||||
}
|
||||
for i := range v {
|
||||
i := i
|
||||
fc := &graphql.FieldContext{
|
||||
Index: &i,
|
||||
Result: &v[i],
|
||||
}
|
||||
ctx := graphql.WithFieldContext(ctx, fc)
|
||||
f := func(i int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = nil
|
||||
}
|
||||
}()
|
||||
if !isLen1 {
|
||||
defer wg.Done()
|
||||
}
|
||||
ret[i] = ec.marshalNCount2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐCount(ctx, sel, v[i])
|
||||
}
|
||||
if isLen1 {
|
||||
f(i)
|
||||
} else {
|
||||
go f(i)
|
||||
}
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNCount2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐCount(ctx context.Context, sel ast.SelectionSet, v *model.Count) graphql.Marshaler {
|
||||
if v == nil {
|
||||
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
return ec._Count(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNFilterRanges2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐFilterRanges(ctx context.Context, sel ast.SelectionSet, v *model.FilterRanges) graphql.Marshaler {
|
||||
if v == nil {
|
||||
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
|
||||
@ -8954,6 +9241,27 @@ func (ec *executionContext) marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑ
|
||||
return ec._Job(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalNJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐJobFilter(ctx context.Context, v interface{}) ([]*model.JobFilter, error) {
|
||||
var vSlice []interface{}
|
||||
if v != nil {
|
||||
if tmp1, ok := v.([]interface{}); ok {
|
||||
vSlice = tmp1
|
||||
} else {
|
||||
vSlice = []interface{}{v}
|
||||
}
|
||||
}
|
||||
var err error
|
||||
res := make([]*model.JobFilter, len(vSlice))
|
||||
for i := range vSlice {
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
|
||||
res[i], err = ec.unmarshalOJobFilter2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐJobFilter(ctx, vSlice[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalNJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐJobFilterᚄ(ctx context.Context, v interface{}) ([]*model.JobFilter, error) {
|
||||
var vSlice []interface{}
|
||||
if v != nil {
|
||||
@ -10007,6 +10315,14 @@ func (ec *executionContext) unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCock
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalOJobFilter2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋgraphᚋmodelᚐJobFilter(ctx context.Context, v interface{}) (*model.JobFilter, error) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
res, err := ec.unmarshalInputJobFilter(ctx, v)
|
||||
return &res, graphql.ErrorOnPath(ctx, err)
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋschemaᚐJobStateᚄ(ctx context.Context, v interface{}) ([]schema.JobState, error) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
|
@ -17,6 +17,11 @@ type Accelerator struct {
|
||||
Model string `json:"model"`
|
||||
}
|
||||
|
||||
type Count struct {
|
||||
Name string `json:"name"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type FilterRanges struct {
|
||||
Duration *IntRangeOutput `json:"duration"`
|
||||
NumNodes *IntRangeOutput `json:"numNodes"`
|
||||
|
@ -126,6 +126,11 @@ type NodeMetrics {
|
||||
metrics: [JobMetricWithName!]!
|
||||
}
|
||||
|
||||
type Count {
|
||||
name: String!
|
||||
count: Int!
|
||||
}
|
||||
|
||||
type Query {
|
||||
clusters: [Cluster!]! # List of all clusters
|
||||
tags: [Tag!]! # List of all tags
|
||||
@ -136,6 +141,7 @@ type Query {
|
||||
|
||||
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
|
||||
jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]!
|
||||
jobsCount(filter: [JobFilter]!, groupBy: Aggregate!, limit: Int): [Count!]!
|
||||
|
||||
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
|
||||
|
||||
|
@ -182,6 +182,22 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
|
||||
return r.jobsStatistics(ctx, filter, groupBy)
|
||||
}
|
||||
|
||||
func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, limit *int) ([]*model.Count, error) {
|
||||
counts, err := r.Repo.CountGroupedJobs(ctx, groupBy, filter, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]*model.Count, 0, len(counts))
|
||||
for name, count := range counts {
|
||||
res = append(res, &model.Count{
|
||||
Name: name,
|
||||
Count: count,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
|
||||
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/auth"
|
||||
"github.com/ClusterCockpit/cc-backend/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
"github.com/jmoiron/sqlx"
|
||||
@ -16,6 +17,10 @@ type JobRepository struct {
|
||||
DB *sqlx.DB
|
||||
}
|
||||
|
||||
func (r *JobRepository) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find executes a SQL query to find a specific batch job.
|
||||
// The job is queried using the batch job id, the cluster name,
|
||||
// and the start time of the job in UNIX epoch time seconds.
|
||||
@ -93,15 +98,18 @@ func (r *JobRepository) Stop(
|
||||
return
|
||||
}
|
||||
|
||||
// CountJobsPerCluster returns the number of jobs for the specified user (if a non-admin user is found in that context) and state.
|
||||
// The counts are grouped by cluster.
|
||||
func (r *JobRepository) CountJobsPerCluster(ctx context.Context, state *schema.JobState) (map[string]int, error) {
|
||||
q := sq.Select("job.cluster, count(*)").From("job").GroupBy("job.cluster")
|
||||
if state != nil {
|
||||
q = q.Where("job.job_state = ?", string(*state))
|
||||
func (r *JobRepository) CountGroupedJobs(ctx context.Context, aggreg model.Aggregate, filters []*model.JobFilter, limit *int) (map[string]int, error) {
|
||||
if !aggreg.IsValid() {
|
||||
return nil, errors.New("invalid aggregate")
|
||||
}
|
||||
if user := auth.GetUser(ctx); user != nil && !user.HasRole(auth.RoleAdmin) {
|
||||
q = q.Where("job.user = ?", user.Username)
|
||||
|
||||
q := sq.Select("job."+string(aggreg), "count(*) as count").From("job").GroupBy("job." + string(aggreg)).OrderBy("count DESC")
|
||||
q = SecurityCheck(ctx, q)
|
||||
for _, f := range filters {
|
||||
q = BuildWhereClause(f, q)
|
||||
}
|
||||
if limit != nil {
|
||||
q = q.Limit(uint64(*limit))
|
||||
}
|
||||
|
||||
counts := map[string]int{}
|
||||
@ -111,13 +119,13 @@ func (r *JobRepository) CountJobsPerCluster(ctx context.Context, state *schema.J
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
var cluster string
|
||||
var group string
|
||||
var count int
|
||||
if err := rows.Scan(&cluster, &count); err != nil {
|
||||
if err := rows.Scan(&group, &count); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
counts[cluster] = count
|
||||
counts[group] = count
|
||||
}
|
||||
|
||||
return counts, nil
|
||||
|
@ -16,9 +16,14 @@ func init() {
|
||||
}
|
||||
|
||||
func setup(t *testing.T) *JobRepository {
|
||||
return &JobRepository{
|
||||
r := &JobRepository{
|
||||
DB: db,
|
||||
}
|
||||
if err := r.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func TestFind(t *testing.T) {
|
||||
|
15
server.go
15
server.go
@ -25,6 +25,7 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/config"
|
||||
"github.com/ClusterCockpit/cc-backend/graph"
|
||||
"github.com/ClusterCockpit/cc-backend/graph/generated"
|
||||
"github.com/ClusterCockpit/cc-backend/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/log"
|
||||
"github.com/ClusterCockpit/cc-backend/metricdata"
|
||||
"github.com/ClusterCockpit/cc-backend/repository"
|
||||
@ -129,13 +130,14 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
|
||||
TotalJobs int
|
||||
}
|
||||
|
||||
state := schema.JobStateRunning
|
||||
runningJobs, err := jobRepo.CountJobsPerCluster(r.Context(), &state)
|
||||
runningJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, []*model.JobFilter{{
|
||||
State: []schema.JobState{schema.JobStateRunning},
|
||||
}}, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
runningJobs = map[string]int{}
|
||||
}
|
||||
totalJobs, err := jobRepo.CountJobsPerCluster(r.Context(), nil)
|
||||
totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
totalJobs = map[string]int{}
|
||||
@ -360,6 +362,11 @@ func main() {
|
||||
|
||||
// Build routes...
|
||||
|
||||
jobRepo = &repository.JobRepository{DB: db}
|
||||
if err := jobRepo.Init(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
resolver := &graph.Resolver{DB: db, Repo: jobRepo}
|
||||
graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver}))
|
||||
if os.Getenv("DEBUG") != "1" {
|
||||
@ -375,8 +382,6 @@ func main() {
|
||||
})
|
||||
}
|
||||
|
||||
jobRepo = &repository.JobRepository{DB: db}
|
||||
|
||||
graphQLPlayground := playground.Handler("GraphQL playground", "/query")
|
||||
api := &api.RestApi{
|
||||
JobRepository: jobRepo,
|
||||
|
Loading…
Reference in New Issue
Block a user