mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-26 05:19:05 +01:00
New GraphQL schema
This commit is contained in:
parent
236f51ba9a
commit
b8d23f8ea1
20
gqlgen.yml
20
gqlgen.yml
@ -55,15 +55,17 @@ models:
|
|||||||
- github.com/99designs/gqlgen/graphql.Int64
|
- github.com/99designs/gqlgen/graphql.Int64
|
||||||
- github.com/99designs/gqlgen/graphql.Int32
|
- github.com/99designs/gqlgen/graphql.Int32
|
||||||
Job:
|
Job:
|
||||||
model: "github.com/ClusterCockpit/cc-jobarchive/graph/model.Job"
|
|
||||||
fields:
|
fields:
|
||||||
tags:
|
tags:
|
||||||
resolver: true
|
resolver: true
|
||||||
Cluster:
|
JobMetric:
|
||||||
fields:
|
model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobMetric"
|
||||||
filterRanges:
|
JobMetricSeries:
|
||||||
resolver: true
|
model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricSeries"
|
||||||
JobTag:
|
JobMetricStatistics:
|
||||||
model: "github.com/ClusterCockpit/cc-jobarchive/graph/model.JobTag"
|
model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricStatistics"
|
||||||
Timestamp:
|
NullableFloat:
|
||||||
model: "github.com/ClusterCockpit/cc-jobarchive/graph/model.Timestamp"
|
model: "github.com/ClusterCockpit/cc-jobarchive/schema.Float"
|
||||||
|
JobMetricScope:
|
||||||
|
model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricScope"
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,45 +1,9 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
// Go look at `gqlgen.yml` and the schema package for other non-generated models.
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Job struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
JobID string `json:"jobId" db:"job_id"`
|
|
||||||
UserID string `json:"userId" db:"user_id"`
|
|
||||||
ProjectID string `json:"projectId" db:"project_id"`
|
|
||||||
ClusterID string `json:"clusterId" db:"cluster_id"`
|
|
||||||
StartTime time.Time `json:"startTime" db:"start_time"`
|
|
||||||
Duration int `json:"duration" db:"duration"`
|
|
||||||
Walltime *int `json:"walltime" db:"walltime"`
|
|
||||||
Jobstate *string `json:"jobstate" db:"job_state"`
|
|
||||||
NumNodes int `json:"numNodes" db:"num_nodes"`
|
|
||||||
NodeList string `json:"nodelist" db:"node_list"`
|
|
||||||
HasProfile bool `json:"hasProfile" db:"has_profile"`
|
|
||||||
MemUsedMax *float64 `json:"memUsedMax" db:"mem_used_max"`
|
|
||||||
FlopsAnyAvg *float64 `json:"flopsAnyAvg" db:"flops_any_avg"`
|
|
||||||
MemBwAvg *float64 `json:"memBwAvg" db:"mem_bw_avg"`
|
|
||||||
NetBwAvg *float64 `json:"netBwAvg" db:"net_bw_avg"`
|
|
||||||
FileBwAvg *float64 `json:"fileBwAvg" db:"file_bw_avg"`
|
|
||||||
LoadAvg *float64 `json:"loadAvg" db:"load_avg"`
|
|
||||||
Tags []JobTag `json:"tags"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JobTag struct {
|
type JobTag struct {
|
||||||
ID string `db:"id"`
|
ID string `json:"id" db:"id"`
|
||||||
TagType string `db:"tag_type"`
|
TagType string `json:"tagType" db:"tag_type"`
|
||||||
TagName string `db:"tag_name"`
|
TagName string `json:"tagName" db:"tag_name"`
|
||||||
}
|
|
||||||
|
|
||||||
type Cluster struct {
|
|
||||||
ClusterID string `json:"clusterID"`
|
|
||||||
ProcessorType string `json:"processorType"`
|
|
||||||
SocketsPerNode int `json:"socketsPerNode"`
|
|
||||||
CoresPerSocket int `json:"coresPerSocket"`
|
|
||||||
ThreadsPerCore int `json:"threadsPerCore"`
|
|
||||||
FlopRateScalar int `json:"flopRateScalar"`
|
|
||||||
FlopRateSimd int `json:"flopRateSimd"`
|
|
||||||
MemoryBandwidth int `json:"memoryBandwidth"`
|
|
||||||
MetricConfig []MetricConfig `json:"metricConfig"`
|
|
||||||
}
|
}
|
||||||
|
@ -7,8 +7,23 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-jobarchive/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Cluster struct {
|
||||||
|
ClusterID string `json:"clusterID"`
|
||||||
|
ProcessorType string `json:"processorType"`
|
||||||
|
SocketsPerNode int `json:"socketsPerNode"`
|
||||||
|
CoresPerSocket int `json:"coresPerSocket"`
|
||||||
|
ThreadsPerCore int `json:"threadsPerCore"`
|
||||||
|
FlopRateScalar int `json:"flopRateScalar"`
|
||||||
|
FlopRateSimd int `json:"flopRateSimd"`
|
||||||
|
MemoryBandwidth int `json:"memoryBandwidth"`
|
||||||
|
MetricConfig []*MetricConfig `json:"metricConfig"`
|
||||||
|
FilterRanges *FilterRanges `json:"filterRanges"`
|
||||||
|
}
|
||||||
|
|
||||||
type FilterRanges struct {
|
type FilterRanges struct {
|
||||||
Duration *IntRangeOutput `json:"duration"`
|
Duration *IntRangeOutput `json:"duration"`
|
||||||
NumNodes *IntRangeOutput `json:"numNodes"`
|
NumNodes *IntRangeOutput `json:"numNodes"`
|
||||||
@ -35,6 +50,27 @@ type IntRangeOutput struct {
|
|||||||
To int `json:"to"`
|
To int `json:"to"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Job struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
JobID string `json:"jobId"`
|
||||||
|
UserID string `json:"userId"`
|
||||||
|
ProjectID string `json:"projectId"`
|
||||||
|
ClusterID string `json:"clusterId"`
|
||||||
|
StartTime time.Time `json:"startTime"`
|
||||||
|
Duration int `json:"duration"`
|
||||||
|
NumNodes int `json:"numNodes"`
|
||||||
|
Nodes []string `json:"nodes"`
|
||||||
|
HasProfile bool `json:"hasProfile"`
|
||||||
|
State JobState `json:"state"`
|
||||||
|
Tags []*JobTag `json:"tags"`
|
||||||
|
LoadAvg *float64 `json:"loadAvg"`
|
||||||
|
MemUsedMax *float64 `json:"memUsedMax"`
|
||||||
|
FlopsAnyAvg *float64 `json:"flopsAnyAvg"`
|
||||||
|
MemBwAvg *float64 `json:"memBwAvg"`
|
||||||
|
NetBwAvg *float64 `json:"netBwAvg"`
|
||||||
|
FileBwAvg *float64 `json:"fileBwAvg"`
|
||||||
|
}
|
||||||
|
|
||||||
type JobFilter struct {
|
type JobFilter struct {
|
||||||
Tags []string `json:"tags"`
|
Tags []string `json:"tags"`
|
||||||
JobID *StringInput `json:"jobId"`
|
JobID *StringInput `json:"jobId"`
|
||||||
@ -51,32 +87,9 @@ type JobFilter struct {
|
|||||||
MemUsedMax *FloatRange `json:"memUsedMax"`
|
MemUsedMax *FloatRange `json:"memUsedMax"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobFilterList struct {
|
|
||||||
List []*JobFilter `json:"list"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JobMetric struct {
|
|
||||||
Unit string `json:"unit"`
|
|
||||||
Scope JobMetricScope `json:"scope"`
|
|
||||||
Timestep int `json:"timestep"`
|
|
||||||
Series []*JobMetricSeries `json:"series"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JobMetricSeries struct {
|
|
||||||
NodeID string `json:"node_id"`
|
|
||||||
Statistics *JobMetricStatistics `json:"statistics"`
|
|
||||||
Data []*float64 `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JobMetricStatistics struct {
|
|
||||||
Avg float64 `json:"avg"`
|
|
||||||
Min float64 `json:"min"`
|
|
||||||
Max float64 `json:"max"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JobMetricWithName struct {
|
type JobMetricWithName struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Metric *JobMetric `json:"metric"`
|
Metric *schema.JobMetric `json:"metric"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobResultList struct {
|
type JobResultList struct {
|
||||||
@ -87,6 +100,7 @@ type JobResultList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type JobsStatistics struct {
|
type JobsStatistics struct {
|
||||||
|
ID string `json:"id"`
|
||||||
TotalJobs int `json:"totalJobs"`
|
TotalJobs int `json:"totalJobs"`
|
||||||
ShortJobs int `json:"shortJobs"`
|
ShortJobs int `json:"shortJobs"`
|
||||||
TotalWalltime int `json:"totalWalltime"`
|
TotalWalltime int `json:"totalWalltime"`
|
||||||
@ -105,9 +119,14 @@ type MetricConfig struct {
|
|||||||
Alert int `json:"alert"`
|
Alert int `json:"alert"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MetricFootprints struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Footprints []schema.Float `json:"footprints"`
|
||||||
|
}
|
||||||
|
|
||||||
type OrderByInput struct {
|
type OrderByInput struct {
|
||||||
Field string `json:"field"`
|
Field string `json:"field"`
|
||||||
Order *SortDirectionEnum `json:"order"`
|
Order SortDirectionEnum `json:"order"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PageRequest struct {
|
type PageRequest struct {
|
||||||
@ -123,8 +142,8 @@ type StringInput struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type TimeRange struct {
|
type TimeRange struct {
|
||||||
From time.Time `json:"from"`
|
From *time.Time `json:"from"`
|
||||||
To time.Time `json:"to"`
|
To *time.Time `json:"to"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TimeRangeOutput struct {
|
type TimeRangeOutput struct {
|
||||||
@ -132,53 +151,87 @@ type TimeRangeOutput struct {
|
|||||||
To time.Time `json:"to"`
|
To time.Time `json:"to"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type UserStats struct {
|
type Aggregate string
|
||||||
UserID string `json:"userId"`
|
|
||||||
TotalJobs int `json:"totalJobs"`
|
|
||||||
TotalWalltime float64 `json:"totalWalltime"`
|
|
||||||
TotalCoreHours float64 `json:"totalCoreHours"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JobMetricScope string
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
JobMetricScopeNode JobMetricScope = "node"
|
AggregateUser Aggregate = "USER"
|
||||||
JobMetricScopeCPU JobMetricScope = "cpu"
|
AggregateProject Aggregate = "PROJECT"
|
||||||
JobMetricScopeSocket JobMetricScope = "socket"
|
AggregateCluster Aggregate = "CLUSTER"
|
||||||
)
|
)
|
||||||
|
|
||||||
var AllJobMetricScope = []JobMetricScope{
|
var AllAggregate = []Aggregate{
|
||||||
JobMetricScopeNode,
|
AggregateUser,
|
||||||
JobMetricScopeCPU,
|
AggregateProject,
|
||||||
JobMetricScopeSocket,
|
AggregateCluster,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e JobMetricScope) IsValid() bool {
|
func (e Aggregate) IsValid() bool {
|
||||||
switch e {
|
switch e {
|
||||||
case JobMetricScopeNode, JobMetricScopeCPU, JobMetricScopeSocket:
|
case AggregateUser, AggregateProject, AggregateCluster:
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e JobMetricScope) String() string {
|
func (e Aggregate) String() string {
|
||||||
return string(e)
|
return string(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *JobMetricScope) UnmarshalGQL(v interface{}) error {
|
func (e *Aggregate) UnmarshalGQL(v interface{}) error {
|
||||||
str, ok := v.(string)
|
str, ok := v.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("enums must be strings")
|
return fmt.Errorf("enums must be strings")
|
||||||
}
|
}
|
||||||
|
|
||||||
*e = JobMetricScope(str)
|
*e = Aggregate(str)
|
||||||
if !e.IsValid() {
|
if !e.IsValid() {
|
||||||
return fmt.Errorf("%s is not a valid JobMetricScope", str)
|
return fmt.Errorf("%s is not a valid Aggregate", str)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e JobMetricScope) MarshalGQL(w io.Writer) {
|
func (e Aggregate) MarshalGQL(w io.Writer) {
|
||||||
|
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
type JobState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
JobStateRunning JobState = "running"
|
||||||
|
JobStateCompleted JobState = "completed"
|
||||||
|
)
|
||||||
|
|
||||||
|
var AllJobState = []JobState{
|
||||||
|
JobStateRunning,
|
||||||
|
JobStateCompleted,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e JobState) IsValid() bool {
|
||||||
|
switch e {
|
||||||
|
case JobStateRunning, JobStateCompleted:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e JobState) String() string {
|
||||||
|
return string(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *JobState) UnmarshalGQL(v interface{}) error {
|
||||||
|
str, ok := v.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("enums must be strings")
|
||||||
|
}
|
||||||
|
|
||||||
|
*e = JobState(str)
|
||||||
|
if !e.IsValid() {
|
||||||
|
return fmt.Errorf("%s is not a valid JobState", str)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e JobState) MarshalGQL(w io.Writer) {
|
||||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,69 +1,72 @@
|
|||||||
type Job {
|
type Job {
|
||||||
id: ID!
|
id: ID! # Database ID, unique
|
||||||
jobId: String!
|
jobId: String! # ID given to the job by the cluster scheduler
|
||||||
userId: String!
|
userId: String! # Username
|
||||||
projectId: String!
|
projectId: String! # Project
|
||||||
clusterId: String!
|
clusterId: String! # Name of the cluster this job was running on
|
||||||
startTime: Time!
|
startTime: Time! # RFC3339 formated string
|
||||||
duration: Int!
|
duration: Int! # For running jobs, the time it has already run
|
||||||
numNodes: Int!
|
numNodes: Int! # Number of nodes this job was running on
|
||||||
hasProfile: Boolean!
|
nodes: [String!]! # List of hostnames
|
||||||
tags: [JobTag!]!
|
hasProfile: Boolean! # TODO: Could be removed?
|
||||||
|
state: JobState! # State of the job
|
||||||
|
tags: [JobTag!]! # List of tags this job has
|
||||||
|
|
||||||
loadAvg: Float
|
# Will be null for running jobs.
|
||||||
memUsedMax: Float
|
loadAvg: Float
|
||||||
|
memUsedMax: Float
|
||||||
flopsAnyAvg: Float
|
flopsAnyAvg: Float
|
||||||
memBwAvg: Float
|
memBwAvg: Float
|
||||||
netBwAvg: Float
|
netBwAvg: Float
|
||||||
fileBwAvg: Float
|
fileBwAvg: Float
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: Extend by more possible states?
|
||||||
|
enum JobState {
|
||||||
|
running
|
||||||
|
completed
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobTag {
|
type JobTag {
|
||||||
id: ID!
|
id: ID! # Database ID, unique
|
||||||
tagType: String!
|
tagType: String! # Type
|
||||||
tagName: String!
|
tagName: String! # Name
|
||||||
}
|
}
|
||||||
|
|
||||||
type Cluster {
|
type Cluster {
|
||||||
clusterID: String!
|
clusterID: String!
|
||||||
processorType: String!
|
processorType: String!
|
||||||
socketsPerNode: Int!
|
socketsPerNode: Int!
|
||||||
coresPerSocket: Int!
|
coresPerSocket: Int!
|
||||||
threadsPerCore: Int!
|
threadsPerCore: Int!
|
||||||
flopRateScalar: Int!
|
flopRateScalar: Int!
|
||||||
flopRateSimd: Int!
|
flopRateSimd: Int!
|
||||||
memoryBandwidth: Int!
|
memoryBandwidth: Int!
|
||||||
metricConfig: [MetricConfig!]!
|
metricConfig: [MetricConfig!]!
|
||||||
filterRanges: FilterRanges!
|
filterRanges: FilterRanges!
|
||||||
}
|
}
|
||||||
|
|
||||||
type MetricConfig {
|
type MetricConfig {
|
||||||
name: String!
|
name: String!
|
||||||
unit: String!
|
unit: String!
|
||||||
sampletime: Int!
|
sampletime: Int!
|
||||||
peak: Int!
|
peak: Int!
|
||||||
normal: Int!
|
normal: Int!
|
||||||
caution: Int!
|
caution: Int!
|
||||||
alert: Int!
|
alert: Int!
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobMetric {
|
type JobMetric {
|
||||||
unit: String!
|
unit: String!
|
||||||
scope: JobMetricScope!
|
scope: JobMetricScope!
|
||||||
timestep: Int!
|
timestep: Int!
|
||||||
series: [JobMetricSeries]!
|
series: [JobMetricSeries!]!
|
||||||
}
|
|
||||||
|
|
||||||
enum JobMetricScope {
|
|
||||||
node
|
|
||||||
cpu
|
|
||||||
socket
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobMetricSeries {
|
type JobMetricSeries {
|
||||||
node_id: String!
|
node_id: String!
|
||||||
statistics: JobMetricStatistics
|
statistics: JobMetricStatistics
|
||||||
data: [Float]!
|
data: [NullableFloat!]!
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobMetricStatistics {
|
type JobMetricStatistics {
|
||||||
@ -73,21 +76,29 @@ type JobMetricStatistics {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type JobMetricWithName {
|
type JobMetricWithName {
|
||||||
name: String!
|
name: String!
|
||||||
metric: JobMetric!
|
metric: JobMetric!
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MetricFootprints {
|
||||||
|
name: String!
|
||||||
|
footprints: [NullableFloat!]!
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Aggregate { USER, PROJECT, CLUSTER }
|
||||||
|
|
||||||
type Query {
|
type Query {
|
||||||
clusters: [Cluster!]!
|
clusters: [Cluster!]! # List of all clusters
|
||||||
jobById(id: ID!): Job
|
tags: [JobTag!]! # List of all tags
|
||||||
jobs(filter: JobFilterList, page: PageRequest, order: OrderByInput): JobResultList!
|
|
||||||
jobsStatistics(filter: JobFilterList): JobsStatistics!
|
job(id: ID!): Job
|
||||||
jobMetrics(jobId: String!, clusterId: String, metrics: [String]): [JobMetricWithName]!
|
jobMetrics(id: ID!, metrics: [String!]): [JobMetricWithName!]!
|
||||||
jobMetricAverages(filter: JobFilterList!, metrics: [String]!): [[Float]]!
|
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): [MetricFootprints]!
|
||||||
rooflineHeatmap(filter: JobFilterList!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]]!
|
|
||||||
tags: [JobTag!]!
|
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
|
||||||
filterRanges: FilterRanges!
|
jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]!
|
||||||
userStats(startTime: Time, stopTime: Time, clusterId: String): [UserStats!]!
|
|
||||||
|
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
|
||||||
}
|
}
|
||||||
|
|
||||||
type Mutation {
|
type Mutation {
|
||||||
@ -101,43 +112,39 @@ type Mutation {
|
|||||||
|
|
||||||
type IntRangeOutput {
|
type IntRangeOutput {
|
||||||
from: Int!
|
from: Int!
|
||||||
to: Int!
|
to: Int!
|
||||||
}
|
}
|
||||||
|
|
||||||
type TimeRangeOutput {
|
type TimeRangeOutput {
|
||||||
from: Time!
|
from: Time!
|
||||||
to: Time!
|
to: Time!
|
||||||
}
|
}
|
||||||
|
|
||||||
type FilterRanges {
|
type FilterRanges {
|
||||||
duration: IntRangeOutput!
|
duration: IntRangeOutput!
|
||||||
numNodes: IntRangeOutput!
|
numNodes: IntRangeOutput!
|
||||||
startTime: TimeRangeOutput!
|
startTime: TimeRangeOutput!
|
||||||
}
|
}
|
||||||
|
|
||||||
input JobFilterList {
|
|
||||||
list: [JobFilter]
|
|
||||||
}
|
|
||||||
|
|
||||||
input JobFilter {
|
input JobFilter {
|
||||||
tags: [ID!]
|
tags: [ID!]
|
||||||
jobId: StringInput
|
jobId: StringInput
|
||||||
userId: StringInput
|
userId: StringInput
|
||||||
projectId: StringInput
|
projectId: StringInput
|
||||||
clusterId: StringInput
|
clusterId: StringInput
|
||||||
duration: IntRange
|
duration: IntRange
|
||||||
numNodes: IntRange
|
numNodes: IntRange
|
||||||
startTime: TimeRange
|
startTime: TimeRange
|
||||||
isRunning: Boolean
|
isRunning: Boolean
|
||||||
flopsAnyAvg: FloatRange
|
flopsAnyAvg: FloatRange
|
||||||
memBwAvg: FloatRange
|
memBwAvg: FloatRange
|
||||||
loadAvg: FloatRange
|
loadAvg: FloatRange
|
||||||
memUsedMax: FloatRange
|
memUsedMax: FloatRange
|
||||||
}
|
}
|
||||||
|
|
||||||
input OrderByInput {
|
input OrderByInput {
|
||||||
field: String!
|
field: String!
|
||||||
order: SortDirectionEnum = ASC
|
order: SortDirectionEnum! = ASC
|
||||||
}
|
}
|
||||||
|
|
||||||
enum SortDirectionEnum {
|
enum SortDirectionEnum {
|
||||||
@ -146,32 +153,32 @@ enum SortDirectionEnum {
|
|||||||
}
|
}
|
||||||
|
|
||||||
input StringInput {
|
input StringInput {
|
||||||
eq: String
|
eq: String
|
||||||
contains: String
|
contains: String
|
||||||
startsWith: String
|
startsWith: String
|
||||||
endsWith: String
|
endsWith: String
|
||||||
}
|
}
|
||||||
|
|
||||||
input IntRange {
|
input IntRange {
|
||||||
from: Int!
|
from: Int!
|
||||||
to: Int!
|
to: Int!
|
||||||
}
|
}
|
||||||
|
|
||||||
input FloatRange {
|
input FloatRange {
|
||||||
from: Float!
|
from: Float!
|
||||||
to: Float!
|
to: Float!
|
||||||
}
|
}
|
||||||
|
|
||||||
input TimeRange {
|
input TimeRange {
|
||||||
from: Time!
|
from: Time
|
||||||
to: Time!
|
to: Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobResultList {
|
type JobResultList {
|
||||||
items: [Job]!
|
items: [Job!]!
|
||||||
offset: Int
|
offset: Int
|
||||||
limit: Int
|
limit: Int
|
||||||
count: Int
|
count: Int
|
||||||
}
|
}
|
||||||
|
|
||||||
type HistoPoint {
|
type HistoPoint {
|
||||||
@ -180,24 +187,20 @@ type HistoPoint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type JobsStatistics {
|
type JobsStatistics {
|
||||||
totalJobs: Int!
|
id: ID! # If `groupBy` was used, ID of the user/project/cluster
|
||||||
shortJobs: Int!
|
totalJobs: Int! # Number of jobs that matched
|
||||||
totalWalltime: Int!
|
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
||||||
totalCoreHours: Int!
|
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||||
histWalltime: [HistoPoint]!
|
totalCoreHours: Int! # Sum of the core hours of all matched jobs
|
||||||
histNumNodes: [HistoPoint]!
|
histWalltime: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
|
||||||
}
|
histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
|
||||||
|
|
||||||
type UserStats {
|
|
||||||
userId: ID!
|
|
||||||
totalJobs: Int!
|
|
||||||
totalWalltime: Float!
|
|
||||||
totalCoreHours: Float!
|
|
||||||
}
|
}
|
||||||
|
|
||||||
input PageRequest {
|
input PageRequest {
|
||||||
itemsPerPage: Int!
|
itemsPerPage: Int!
|
||||||
page: Int!
|
page: Int!
|
||||||
}
|
}
|
||||||
|
|
||||||
scalar Time
|
scalar Time
|
||||||
|
scalar NullableFloat
|
||||||
|
scalar JobMetricScope
|
||||||
|
66
schema/float.go
Normal file
66
schema/float.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A custom float type is used so that (Un)MarshalJSON and
|
||||||
|
// (Un)MarshalGQL can be overloaded and NaN/null can be used.
|
||||||
|
// The default behaviour of putting every nullable value behind
|
||||||
|
// a pointer has a bigger overhead.
|
||||||
|
type Float float64
|
||||||
|
|
||||||
|
var NaN Float = Float(math.NaN())
|
||||||
|
|
||||||
|
func (f Float) IsNaN() bool {
|
||||||
|
return math.IsNaN(float64(f))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NaN will be serialized to `null`.
|
||||||
|
func (f Float) MarshalJSON() ([]byte, error) {
|
||||||
|
if f.IsNaN() {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return []byte(strconv.FormatFloat(float64(f), 'f', 2, 64)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// `null` will be unserialized to NaN.
|
||||||
|
func (f *Float) UnmarshalJSON(input []byte) error {
|
||||||
|
s := string(input)
|
||||||
|
if s == "null" {
|
||||||
|
*f = NaN
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := strconv.ParseFloat(s, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*f = Float(val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalGQL implements the graphql.Unmarshaler interface.
|
||||||
|
func (f *Float) UnmarshalGQL(v interface{}) error {
|
||||||
|
f64, ok := v.(float64)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("invalid Float scalar")
|
||||||
|
}
|
||||||
|
|
||||||
|
*f = Float(f64)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalGQL implements the graphql.Marshaler interface.
|
||||||
|
// NaN will be serialized to `null`.
|
||||||
|
func (f Float) MarshalGQL(w io.Writer) {
|
||||||
|
if f.IsNaN() {
|
||||||
|
w.Write([]byte(`null`))
|
||||||
|
} else {
|
||||||
|
w.Write([]byte(strconv.FormatFloat(float64(f), 'f', 2, 64)))
|
||||||
|
}
|
||||||
|
}
|
76
schema/metrics.go
Normal file
76
schema/metrics.go
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Format of `data.json` files.
|
||||||
|
type JobData map[string]*JobMetric
|
||||||
|
|
||||||
|
type JobMetric struct {
|
||||||
|
Unit string `json:"unit"`
|
||||||
|
Scope MetricScope `json:"scope"`
|
||||||
|
Timestep int `json:"timestep"`
|
||||||
|
Series []*MetricSeries `json:"series"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetricScope string
|
||||||
|
|
||||||
|
const (
|
||||||
|
MetricScopeNode MetricScope = "node"
|
||||||
|
MetricScopeSocket MetricScope = "socket"
|
||||||
|
MetricScopeCpu MetricScope = "cpu"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (e *MetricScope) UnmarshalGQL(v interface{}) error {
|
||||||
|
str, ok := v.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("enums must be strings")
|
||||||
|
}
|
||||||
|
|
||||||
|
*e = MetricScope(str)
|
||||||
|
if *e != "node" && *e != "socket" && *e != "cpu" {
|
||||||
|
return fmt.Errorf("%s is not a valid MetricScope", str)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e MetricScope) MarshalGQL(w io.Writer) {
|
||||||
|
fmt.Fprintf(w, "\"%s\"", e)
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetricStatistics struct {
|
||||||
|
Avg float64 `json:"avg"`
|
||||||
|
Min float64 `json:"min"`
|
||||||
|
Max float64 `json:"max"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetricSeries struct {
|
||||||
|
NodeID string `json:"node_id"`
|
||||||
|
Statistics *MetricStatistics `json:"statistics"`
|
||||||
|
Data []Float `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format of `meta.json` files.
|
||||||
|
type JobMeta struct {
|
||||||
|
JobId string `json:"job_id"`
|
||||||
|
UserId string `json:"user_id"`
|
||||||
|
ProjectId string `json:"project_id"`
|
||||||
|
ClusterId string `json:"cluster_id"`
|
||||||
|
NumNodes int `json:"num_nodes"`
|
||||||
|
JobState string `json:"job_state"`
|
||||||
|
StartTime int64 `json:"start_time"`
|
||||||
|
Duration int64 `json:"duration"`
|
||||||
|
Nodes []string `json:"nodes"`
|
||||||
|
Tags []struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
} `json:"tags"`
|
||||||
|
Statistics map[string]struct {
|
||||||
|
Unit string `json:"unit"`
|
||||||
|
Avg float64 `json:"avg"`
|
||||||
|
Min float64 `json:"min"`
|
||||||
|
Max float64 `json:"max"`
|
||||||
|
} `json:"statistics"`
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user