Slight GraphQL-API changes

This commit is contained in:
Lou Knauer 2021-12-20 10:48:58 +01:00
parent 5403177edc
commit a6e8d5b484
6 changed files with 294 additions and 248 deletions

171
api/openapi.yaml Normal file
View File

@ -0,0 +1,171 @@
#
# ClusterCockpit's API spec can be exported via:
# docker exec -it cc-php php bin/console api:openapi:export --yaml
#
# This spec is written by hand and hopefully up to date with the API.
#
openapi: 3.0.3
info:
title: 'ClusterCockpit REST API'
description: 'API for batch job control'
version: 0.0.2
servers:
- url: /
description: ''
paths:
'/api/jobs/{id}':
get:
operationId: 'getJob'
summary: 'Get job resource'
parameters:
- name: id
in: path
required: true
schema: { type: integer }
description: 'Database ID (Resource Identifier)'
responses:
200:
description: 'Job resource'
content:
'application/json':
schema:
$ref: '#/components/schemas/Job'
404:
description: 'Resource not found'
'/api/jobs/tag_job/{id}':
post:
operationId: 'tagJob'
summary: 'Add a tag to a job'
parameters:
- name: id
in: path
required: true
schema: { type: integer }
description: 'Job ID'
requestBody:
description: 'Array of tags to add'
required: true
content:
'application/json':
schema:
type: array
items:
$ref: '#/components/schemas/Tag'
responses:
200:
description: 'Job resource'
content:
'application/json':
schema:
$ref: '#/components/schemas/Job'
404:
description: 'Job or tag does not exist'
400:
description: 'Bad request'
'/api/jobs/start_job/':
post:
operationId: 'startJob'
summary: 'Add a newly started job'
requestBody:
required: true
content:
'application/json':
schema:
$ref: '#/components/schemas/Job'
responses:
201:
description: 'Job successfully'
content:
'application/json':
schema:
type: object
properties:
id:
type: integer
description: 'The database ID assigned to this job'
400:
description: 'Bad request'
422:
description: 'The combination of jobId, clusterId and startTime does already exist'
'/api/jobs/stop_job/':
post:
operationId: stopJobViaJobID
summary: 'Mark a job as stopped. Which job to stop is specified by the request body.'
requestBody:
required: true
content:
'application/json':
schema:
type: object
required: [jobId, cluster, startTime, stopTime]
properties:
jobId: { type: integer }
cluster: { type: string }
startTime: { type: integer }
stopTime: { type: integer }
responses:
200:
description: 'Job resource'
content:
'application/json':
schema:
$ref: '#/components/schemas/Job'
400:
description: 'Bad request'
404:
description: 'Resource not found'
'/api/jobs/stop_job/{id}':
post:
operationId: 'stopJobViaDBID'
summary: 'Mark a job as stopped.'
parameters:
- name: id
in: path
required: true
schema: { type: integer }
description: 'Database ID (Resource Identifier)'
requestBody:
required: true
content:
'application/json':
schema:
type: object
required: [stopTime]
properties:
stopTime: { type: integer }
responses:
200:
description: 'Job resource'
content:
'application/json':
schema:
$ref: '#/components/schemas/Job'
400:
description: 'Bad request'
404:
description: 'Resource not found'
components:
schemas:
Tag:
description: 'A job tag'
type: object
properties:
id:
type: string
description: 'Database ID'
type:
type: string
description: 'Tag type'
name:
type: string
description: 'Tag name'
Job:
$ref: https://raw.githubusercontent.com/ClusterCockpit/cc-specifications/master/schema/json/job-meta.schema.json
securitySchemes:
bearerAuth:
type: http
scheme: bearer
bearerFormat: JWT
security:
- bearerAuth: [] # Applies `bearerAuth` globally

View File

@ -39,7 +39,7 @@ type StopJobApiRequest struct {
// JobId, ClusterId and StartTime are optional. // JobId, ClusterId and StartTime are optional.
// They are only used if no database id was provided. // They are only used if no database id was provided.
JobId *string `json:"jobId"` JobId *string `json:"jobId"`
Cluster *string `json:"clusterId"` Cluster *string `json:"cluster"`
StartTime *int64 `json:"startTime"` StartTime *int64 `json:"startTime"`
// Payload // Payload

View File

@ -106,12 +106,8 @@ type ComplexityRoot struct {
} }
JobMetricWithName struct { JobMetricWithName struct {
Core func(childComplexity int) int Metric func(childComplexity int) int
Hwthread func(childComplexity int) int
MemoryDomain func(childComplexity int) int
Name func(childComplexity int) int Name func(childComplexity int) int
Node func(childComplexity int) int
Socket func(childComplexity int) int
} }
JobResource struct { JobResource struct {
@ -193,7 +189,7 @@ type ComplexityRoot struct {
Query struct { Query struct {
Clusters func(childComplexity int) int Clusters func(childComplexity int) int
Job func(childComplexity int, id string) int Job func(childComplexity int, id string) int
JobMetrics func(childComplexity int, id string, metrics []string) int JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int
Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int
JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int
JobsStatistics func(childComplexity int, filter []*model.JobFilter, groupBy *model.Aggregate) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, groupBy *model.Aggregate) int
@ -254,7 +250,7 @@ type QueryResolver interface {
Clusters(ctx context.Context) ([]*model.Cluster, error) Clusters(ctx context.Context) ([]*model.Cluster, error)
Tags(ctx context.Context) ([]*schema.Tag, error) Tags(ctx context.Context) ([]*schema.Tag, error)
Job(ctx context.Context, id string) (*schema.Job, error) Job(ctx context.Context, id string) (*schema.Job, error)
JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error)
JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.MetricFootprints, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.MetricFootprints, error)
Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error)
JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error)
@ -536,26 +532,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.JobMetric.Unit(childComplexity), true return e.complexity.JobMetric.Unit(childComplexity), true
case "JobMetricWithName.core": case "JobMetricWithName.metric":
if e.complexity.JobMetricWithName.Core == nil { if e.complexity.JobMetricWithName.Metric == nil {
break break
} }
return e.complexity.JobMetricWithName.Core(childComplexity), true return e.complexity.JobMetricWithName.Metric(childComplexity), true
case "JobMetricWithName.hwthread":
if e.complexity.JobMetricWithName.Hwthread == nil {
break
}
return e.complexity.JobMetricWithName.Hwthread(childComplexity), true
case "JobMetricWithName.memoryDomain":
if e.complexity.JobMetricWithName.MemoryDomain == nil {
break
}
return e.complexity.JobMetricWithName.MemoryDomain(childComplexity), true
case "JobMetricWithName.name": case "JobMetricWithName.name":
if e.complexity.JobMetricWithName.Name == nil { if e.complexity.JobMetricWithName.Name == nil {
@ -564,20 +546,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.JobMetricWithName.Name(childComplexity), true return e.complexity.JobMetricWithName.Name(childComplexity), true
case "JobMetricWithName.node":
if e.complexity.JobMetricWithName.Node == nil {
break
}
return e.complexity.JobMetricWithName.Node(childComplexity), true
case "JobMetricWithName.socket":
if e.complexity.JobMetricWithName.Socket == nil {
break
}
return e.complexity.JobMetricWithName.Socket(childComplexity), true
case "JobResource.accelerators": case "JobResource.accelerators":
if e.complexity.JobResource.Accelerators == nil { if e.complexity.JobResource.Accelerators == nil {
break break
@ -683,14 +651,14 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.JobsStatistics.TotalWalltime(childComplexity), true return e.complexity.JobsStatistics.TotalWalltime(childComplexity), true
case "MetricConfig.Alert": case "MetricConfig.alert":
if e.complexity.MetricConfig.Alert == nil { if e.complexity.MetricConfig.Alert == nil {
break break
} }
return e.complexity.MetricConfig.Alert(childComplexity), true return e.complexity.MetricConfig.Alert(childComplexity), true
case "MetricConfig.Caution": case "MetricConfig.caution":
if e.complexity.MetricConfig.Caution == nil { if e.complexity.MetricConfig.Caution == nil {
break break
} }
@ -704,14 +672,14 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.MetricConfig.Name(childComplexity), true return e.complexity.MetricConfig.Name(childComplexity), true
case "MetricConfig.Normal": case "MetricConfig.normal":
if e.complexity.MetricConfig.Normal == nil { if e.complexity.MetricConfig.Normal == nil {
break break
} }
return e.complexity.MetricConfig.Normal(childComplexity), true return e.complexity.MetricConfig.Normal(childComplexity), true
case "MetricConfig.Peak": case "MetricConfig.peak":
if e.complexity.MetricConfig.Peak == nil { if e.complexity.MetricConfig.Peak == nil {
break break
} }
@ -954,7 +922,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return 0, false return 0, false
} }
return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string)), true return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)), true
case "Query.jobs": case "Query.jobs":
if e.complexity.Query.Jobs == nil { if e.complexity.Query.Jobs == nil {
@ -1278,10 +1246,10 @@ type MetricConfig {
unit: String! unit: String!
scope: String! scope: String!
timestep: Int! timestep: Int!
Peak: Float! peak: Float!
Normal: Float! normal: Float!
Caution: Float! caution: Float!
Alert: Float! alert: Float!
} }
type Tag { type Tag {
@ -1299,12 +1267,7 @@ type JobResource {
type JobMetricWithName { type JobMetricWithName {
name: String! name: String!
metric: JobMetric!
node: JobMetric
socket: JobMetric
memoryDomain: JobMetric
core: JobMetric
hwthread: JobMetric
} }
type JobMetric { type JobMetric {
@ -1356,7 +1319,7 @@ type Query {
tags: [Tag!]! # List of all tags tags: [Tag!]! # List of all tags
job(id: ID!): Job job(id: ID!): Job
jobMetrics(id: ID!, metrics: [String!]): [JobMetricWithName!]! jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]!
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): [MetricFootprints]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): [MetricFootprints]!
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
@ -1603,6 +1566,15 @@ func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, raw
} }
} }
args["metrics"] = arg1 args["metrics"] = arg1
var arg2 []schema.MetricScope
if tmp, ok := rawArgs["scopes"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes"))
arg2, err = ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScopeᚄ(ctx, tmp)
if err != nil {
return nil, err
}
}
args["scopes"] = arg2
return args, nil return args, nil
} }
@ -3187,7 +3159,7 @@ func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field g
return ec.marshalNString2string(ctx, field.Selections, res) return ec.marshalNString2string(ctx, field.Selections, res)
} }
func (ec *executionContext) _JobMetricWithName_node(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r)) ec.Error(ctx, ec.Recover(ctx, r))
@ -3205,146 +3177,21 @@ func (ec *executionContext) _JobMetricWithName_node(ctx context.Context, field g
ctx = graphql.WithFieldContext(ctx, fc) ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children ctx = rctx // use context from middleware stack in children
return obj.Node, nil return obj.Metric, nil
}) })
if err != nil { if err != nil {
ec.Error(ctx, err) ec.Error(ctx, err)
return graphql.Null return graphql.Null
} }
if resTmp == nil { if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null return graphql.Null
} }
res := resTmp.(*schema.JobMetric) res := resTmp.(*schema.JobMetric)
fc.Result = res fc.Result = res
return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res)
}
func (ec *executionContext) _JobMetricWithName_socket(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "JobMetricWithName",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Socket, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*schema.JobMetric)
fc.Result = res
return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res)
}
func (ec *executionContext) _JobMetricWithName_memoryDomain(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "JobMetricWithName",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MemoryDomain, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*schema.JobMetric)
fc.Result = res
return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res)
}
func (ec *executionContext) _JobMetricWithName_core(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "JobMetricWithName",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Core, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*schema.JobMetric)
fc.Result = res
return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res)
}
func (ec *executionContext) _JobMetricWithName_hwthread(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "JobMetricWithName",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Hwthread, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*schema.JobMetric)
fc.Result = res
return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res)
} }
func (ec *executionContext) _JobResource_hostname(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { func (ec *executionContext) _JobResource_hostname(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) {
@ -3994,7 +3841,7 @@ func (ec *executionContext) _MetricConfig_timestep(ctx context.Context, field gr
return ec.marshalNInt2int(ctx, field.Selections, res) return ec.marshalNInt2int(ctx, field.Selections, res)
} }
func (ec *executionContext) _MetricConfig_Peak(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { func (ec *executionContext) _MetricConfig_peak(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r)) ec.Error(ctx, ec.Recover(ctx, r))
@ -4029,7 +3876,7 @@ func (ec *executionContext) _MetricConfig_Peak(ctx context.Context, field graphq
return ec.marshalNFloat2float64(ctx, field.Selections, res) return ec.marshalNFloat2float64(ctx, field.Selections, res)
} }
func (ec *executionContext) _MetricConfig_Normal(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { func (ec *executionContext) _MetricConfig_normal(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r)) ec.Error(ctx, ec.Recover(ctx, r))
@ -4064,7 +3911,7 @@ func (ec *executionContext) _MetricConfig_Normal(ctx context.Context, field grap
return ec.marshalNFloat2float64(ctx, field.Selections, res) return ec.marshalNFloat2float64(ctx, field.Selections, res)
} }
func (ec *executionContext) _MetricConfig_Caution(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { func (ec *executionContext) _MetricConfig_caution(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r)) ec.Error(ctx, ec.Recover(ctx, r))
@ -4099,7 +3946,7 @@ func (ec *executionContext) _MetricConfig_Caution(ctx context.Context, field gra
return ec.marshalNFloat2float64(ctx, field.Selections, res) return ec.marshalNFloat2float64(ctx, field.Selections, res)
} }
func (ec *executionContext) _MetricConfig_Alert(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { func (ec *executionContext) _MetricConfig_alert(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r)) ec.Error(ctx, ec.Recover(ctx, r))
@ -5105,7 +4952,7 @@ func (ec *executionContext) _Query_jobMetrics(ctx context.Context, field graphql
fc.Args = args fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().JobMetrics(rctx, args["id"].(string), args["metrics"].([]string)) return ec.resolvers.Query().JobMetrics(rctx, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope))
}) })
if err != nil { if err != nil {
ec.Error(ctx, err) ec.Error(ctx, err)
@ -7778,16 +7625,11 @@ func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.Sele
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
invalids++ invalids++
} }
case "node": case "metric":
out.Values[i] = ec._JobMetricWithName_node(ctx, field, obj) out.Values[i] = ec._JobMetricWithName_metric(ctx, field, obj)
case "socket": if out.Values[i] == graphql.Null {
out.Values[i] = ec._JobMetricWithName_socket(ctx, field, obj) invalids++
case "memoryDomain": }
out.Values[i] = ec._JobMetricWithName_memoryDomain(ctx, field, obj)
case "core":
out.Values[i] = ec._JobMetricWithName_core(ctx, field, obj)
case "hwthread":
out.Values[i] = ec._JobMetricWithName_hwthread(ctx, field, obj)
default: default:
panic("unknown field " + strconv.Quote(field.Name)) panic("unknown field " + strconv.Quote(field.Name))
} }
@ -7953,23 +7795,23 @@ func (ec *executionContext) _MetricConfig(ctx context.Context, sel ast.Selection
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
invalids++ invalids++
} }
case "Peak": case "peak":
out.Values[i] = ec._MetricConfig_Peak(ctx, field, obj) out.Values[i] = ec._MetricConfig_peak(ctx, field, obj)
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
invalids++ invalids++
} }
case "Normal": case "normal":
out.Values[i] = ec._MetricConfig_Normal(ctx, field, obj) out.Values[i] = ec._MetricConfig_normal(ctx, field, obj)
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
invalids++ invalids++
} }
case "Caution": case "caution":
out.Values[i] = ec._MetricConfig_Caution(ctx, field, obj) out.Values[i] = ec._MetricConfig_caution(ctx, field, obj)
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
invalids++ invalids++
} }
case "Alert": case "alert":
out.Values[i] = ec._MetricConfig_Alert(ctx, field, obj) out.Values[i] = ec._MetricConfig_alert(ctx, field, obj)
if out.Values[i] == graphql.Null { if out.Values[i] == graphql.Null {
invalids++ invalids++
} }
@ -9219,6 +9061,16 @@ func (ec *executionContext) unmarshalNJobFilter2ᚖgithubᚗcomᚋClusterCockpit
return &res, graphql.ErrorOnPath(ctx, err) return &res, graphql.ErrorOnPath(ctx, err)
} }
func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._JobMetric(ctx, sel, v)
}
func (ec *executionContext) marshalNJobMetricWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobMetricWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricWithName) graphql.Marshaler { func (ec *executionContext) marshalNJobMetricWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobMetricWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricWithName) graphql.Marshaler {
ret := make(graphql.Array, len(v)) ret := make(graphql.Array, len(v))
var wg sync.WaitGroup var wg sync.WaitGroup
@ -10330,13 +10182,6 @@ func (ec *executionContext) unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCock
return res, nil return res, nil
} }
func (ec *executionContext) marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._JobMetric(ctx, sel, v)
}
func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobStateᚄ(ctx context.Context, v interface{}) ([]schema.JobState, error) { func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobStateᚄ(ctx context.Context, v interface{}) ([]schema.JobState, error) {
if v == nil { if v == nil {
return nil, nil return nil, nil
@ -10380,6 +10225,42 @@ func (ec *executionContext) marshalOMetricFootprints2ᚖgithubᚗcomᚋClusterCo
return ec._MetricFootprints(ctx, sel, v) return ec._MetricFootprints(ctx, sel, v)
} }
func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScopeᚄ(ctx context.Context, v interface{}) ([]schema.MetricScope, error) {
if v == nil {
return nil, nil
}
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([]schema.MetricScope, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricScope) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
for i := range v {
ret[i] = ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx, sel, v[i])
}
return ret
}
func (ec *executionContext) marshalOMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler { func (ec *executionContext) marshalOMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler {
if v == nil { if v == nil {
return graphql.Null return graphql.Null

View File

@ -61,11 +61,7 @@ type JobFilter struct {
type JobMetricWithName struct { type JobMetricWithName struct {
Name string `json:"name"` Name string `json:"name"`
Node *schema.JobMetric `json:"node"` Metric *schema.JobMetric `json:"metric"`
Socket *schema.JobMetric `json:"socket"`
MemoryDomain *schema.JobMetric `json:"memoryDomain"`
Core *schema.JobMetric `json:"core"`
Hwthread *schema.JobMetric `json:"hwthread"`
} }
type JobResource struct { type JobResource struct {
@ -97,10 +93,10 @@ type MetricConfig struct {
Unit string `json:"unit"` Unit string `json:"unit"`
Scope string `json:"scope"` Scope string `json:"scope"`
Timestep int `json:"timestep"` Timestep int `json:"timestep"`
Peak float64 `json:"Peak"` Peak float64 `json:"peak"`
Normal float64 `json:"Normal"` Normal float64 `json:"normal"`
Caution float64 `json:"Caution"` Caution float64 `json:"caution"`
Alert float64 `json:"Alert"` Alert float64 `json:"alert"`
} }
type MetricFootprints struct { type MetricFootprints struct {

View File

@ -63,10 +63,10 @@ type MetricConfig {
unit: String! unit: String!
scope: String! scope: String!
timestep: Int! timestep: Int!
Peak: Float! peak: Float!
Normal: Float! normal: Float!
Caution: Float! caution: Float!
Alert: Float! alert: Float!
} }
type Tag { type Tag {
@ -84,12 +84,7 @@ type JobResource {
type JobMetricWithName { type JobMetricWithName {
name: String! name: String!
metric: JobMetric!
node: JobMetric
socket: JobMetric
memoryDomain: JobMetric
core: JobMetric
hwthread: JobMetric
} }
type JobMetric { type JobMetric {
@ -141,7 +136,7 @@ type Query {
tags: [Tag!]! # List of all tags tags: [Tag!]! # List of all tags
job(id: ID!): Job job(id: ID!): Job
jobMetrics(id: ID!, metrics: [String!]): [JobMetricWithName!]! jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]!
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): [MetricFootprints]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): [MetricFootprints]!
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!

View File

@ -166,12 +166,13 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error)
return schema.ScanJob(r.DB.QueryRowx(sql, args...)) return schema.ScanJob(r.DB.QueryRowx(sql, args...))
} }
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) { func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) {
job, err := r.Query().Job(ctx, id) job, err := r.Query().Job(ctx, id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// TODO: FIXME: Do something with `scopes`
data, err := metricdata.LoadData(job, metrics, ctx) data, err := metricdata.LoadData(job, metrics, ctx)
if err != nil { if err != nil {
return nil, err return nil, err
@ -179,15 +180,17 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
res := []*model.JobMetricWithName{} res := []*model.JobMetricWithName{}
for name, md := range data { for name, md := range data {
for scope, metric := range md {
if metric.Scope != schema.MetricScope(scope) {
panic("WTF?")
}
res = append(res, &model.JobMetricWithName{ res = append(res, &model.JobMetricWithName{
Name: name, Name: name,
Node: md["node"], Metric: metric,
Socket: md["socket"],
MemoryDomain: md["memoryDomain"],
Core: md["core"],
Hwthread: md["hwthread"],
}) })
} }
}
return res, err return res, err
} }