New GraphQL schema

This commit is contained in:
Lou Knauer 2021-10-26 10:22:02 +02:00
parent 236f51ba9a
commit b8d23f8ea1
7 changed files with 1328 additions and 1292 deletions

View File

@ -55,15 +55,17 @@ models:
- github.com/99designs/gqlgen/graphql.Int64
- github.com/99designs/gqlgen/graphql.Int32
Job:
model: "github.com/ClusterCockpit/cc-jobarchive/graph/model.Job"
fields:
tags:
resolver: true
Cluster:
fields:
filterRanges:
resolver: true
JobTag:
model: "github.com/ClusterCockpit/cc-jobarchive/graph/model.JobTag"
Timestamp:
model: "github.com/ClusterCockpit/cc-jobarchive/graph/model.Timestamp"
JobMetric:
model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobMetric"
JobMetricSeries:
model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricSeries"
JobMetricStatistics:
model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricStatistics"
NullableFloat:
model: "github.com/ClusterCockpit/cc-jobarchive/schema.Float"
JobMetricScope:
model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricScope"

File diff suppressed because it is too large Load Diff

View File

@ -1,45 +1,9 @@
package model
import (
"time"
)
type Job struct {
ID string `json:"id"`
JobID string `json:"jobId" db:"job_id"`
UserID string `json:"userId" db:"user_id"`
ProjectID string `json:"projectId" db:"project_id"`
ClusterID string `json:"clusterId" db:"cluster_id"`
StartTime time.Time `json:"startTime" db:"start_time"`
Duration int `json:"duration" db:"duration"`
Walltime *int `json:"walltime" db:"walltime"`
Jobstate *string `json:"jobstate" db:"job_state"`
NumNodes int `json:"numNodes" db:"num_nodes"`
NodeList string `json:"nodelist" db:"node_list"`
HasProfile bool `json:"hasProfile" db:"has_profile"`
MemUsedMax *float64 `json:"memUsedMax" db:"mem_used_max"`
FlopsAnyAvg *float64 `json:"flopsAnyAvg" db:"flops_any_avg"`
MemBwAvg *float64 `json:"memBwAvg" db:"mem_bw_avg"`
NetBwAvg *float64 `json:"netBwAvg" db:"net_bw_avg"`
FileBwAvg *float64 `json:"fileBwAvg" db:"file_bw_avg"`
LoadAvg *float64 `json:"loadAvg" db:"load_avg"`
Tags []JobTag `json:"tags"`
}
// Go look at `gqlgen.yml` and the schema package for other non-generated models.
type JobTag struct {
ID string `db:"id"`
TagType string `db:"tag_type"`
TagName string `db:"tag_name"`
}
type Cluster struct {
ClusterID string `json:"clusterID"`
ProcessorType string `json:"processorType"`
SocketsPerNode int `json:"socketsPerNode"`
CoresPerSocket int `json:"coresPerSocket"`
ThreadsPerCore int `json:"threadsPerCore"`
FlopRateScalar int `json:"flopRateScalar"`
FlopRateSimd int `json:"flopRateSimd"`
MemoryBandwidth int `json:"memoryBandwidth"`
MetricConfig []MetricConfig `json:"metricConfig"`
ID string `json:"id" db:"id"`
TagType string `json:"tagType" db:"tag_type"`
TagName string `json:"tagName" db:"tag_name"`
}

View File

@ -7,8 +7,23 @@ import (
"io"
"strconv"
"time"
"github.com/ClusterCockpit/cc-jobarchive/schema"
)
type Cluster struct {
ClusterID string `json:"clusterID"`
ProcessorType string `json:"processorType"`
SocketsPerNode int `json:"socketsPerNode"`
CoresPerSocket int `json:"coresPerSocket"`
ThreadsPerCore int `json:"threadsPerCore"`
FlopRateScalar int `json:"flopRateScalar"`
FlopRateSimd int `json:"flopRateSimd"`
MemoryBandwidth int `json:"memoryBandwidth"`
MetricConfig []*MetricConfig `json:"metricConfig"`
FilterRanges *FilterRanges `json:"filterRanges"`
}
type FilterRanges struct {
Duration *IntRangeOutput `json:"duration"`
NumNodes *IntRangeOutput `json:"numNodes"`
@ -35,6 +50,27 @@ type IntRangeOutput struct {
To int `json:"to"`
}
type Job struct {
ID string `json:"id"`
JobID string `json:"jobId"`
UserID string `json:"userId"`
ProjectID string `json:"projectId"`
ClusterID string `json:"clusterId"`
StartTime time.Time `json:"startTime"`
Duration int `json:"duration"`
NumNodes int `json:"numNodes"`
Nodes []string `json:"nodes"`
HasProfile bool `json:"hasProfile"`
State JobState `json:"state"`
Tags []*JobTag `json:"tags"`
LoadAvg *float64 `json:"loadAvg"`
MemUsedMax *float64 `json:"memUsedMax"`
FlopsAnyAvg *float64 `json:"flopsAnyAvg"`
MemBwAvg *float64 `json:"memBwAvg"`
NetBwAvg *float64 `json:"netBwAvg"`
FileBwAvg *float64 `json:"fileBwAvg"`
}
type JobFilter struct {
Tags []string `json:"tags"`
JobID *StringInput `json:"jobId"`
@ -51,32 +87,9 @@ type JobFilter struct {
MemUsedMax *FloatRange `json:"memUsedMax"`
}
type JobFilterList struct {
List []*JobFilter `json:"list"`
}
type JobMetric struct {
Unit string `json:"unit"`
Scope JobMetricScope `json:"scope"`
Timestep int `json:"timestep"`
Series []*JobMetricSeries `json:"series"`
}
type JobMetricSeries struct {
NodeID string `json:"node_id"`
Statistics *JobMetricStatistics `json:"statistics"`
Data []*float64 `json:"data"`
}
type JobMetricStatistics struct {
Avg float64 `json:"avg"`
Min float64 `json:"min"`
Max float64 `json:"max"`
}
type JobMetricWithName struct {
Name string `json:"name"`
Metric *JobMetric `json:"metric"`
Name string `json:"name"`
Metric *schema.JobMetric `json:"metric"`
}
type JobResultList struct {
@ -87,6 +100,7 @@ type JobResultList struct {
}
type JobsStatistics struct {
ID string `json:"id"`
TotalJobs int `json:"totalJobs"`
ShortJobs int `json:"shortJobs"`
TotalWalltime int `json:"totalWalltime"`
@ -105,9 +119,14 @@ type MetricConfig struct {
Alert int `json:"alert"`
}
type MetricFootprints struct {
Name string `json:"name"`
Footprints []schema.Float `json:"footprints"`
}
type OrderByInput struct {
Field string `json:"field"`
Order *SortDirectionEnum `json:"order"`
Field string `json:"field"`
Order SortDirectionEnum `json:"order"`
}
type PageRequest struct {
@ -123,8 +142,8 @@ type StringInput struct {
}
type TimeRange struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
From *time.Time `json:"from"`
To *time.Time `json:"to"`
}
type TimeRangeOutput struct {
@ -132,53 +151,87 @@ type TimeRangeOutput struct {
To time.Time `json:"to"`
}
type UserStats struct {
UserID string `json:"userId"`
TotalJobs int `json:"totalJobs"`
TotalWalltime float64 `json:"totalWalltime"`
TotalCoreHours float64 `json:"totalCoreHours"`
}
type JobMetricScope string
type Aggregate string
const (
JobMetricScopeNode JobMetricScope = "node"
JobMetricScopeCPU JobMetricScope = "cpu"
JobMetricScopeSocket JobMetricScope = "socket"
AggregateUser Aggregate = "USER"
AggregateProject Aggregate = "PROJECT"
AggregateCluster Aggregate = "CLUSTER"
)
var AllJobMetricScope = []JobMetricScope{
JobMetricScopeNode,
JobMetricScopeCPU,
JobMetricScopeSocket,
var AllAggregate = []Aggregate{
AggregateUser,
AggregateProject,
AggregateCluster,
}
func (e JobMetricScope) IsValid() bool {
func (e Aggregate) IsValid() bool {
switch e {
case JobMetricScopeNode, JobMetricScopeCPU, JobMetricScopeSocket:
case AggregateUser, AggregateProject, AggregateCluster:
return true
}
return false
}
func (e JobMetricScope) String() string {
func (e Aggregate) String() string {
return string(e)
}
func (e *JobMetricScope) UnmarshalGQL(v interface{}) error {
func (e *Aggregate) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
}
*e = JobMetricScope(str)
*e = Aggregate(str)
if !e.IsValid() {
return fmt.Errorf("%s is not a valid JobMetricScope", str)
return fmt.Errorf("%s is not a valid Aggregate", str)
}
return nil
}
func (e JobMetricScope) MarshalGQL(w io.Writer) {
func (e Aggregate) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
}
type JobState string
const (
JobStateRunning JobState = "running"
JobStateCompleted JobState = "completed"
)
var AllJobState = []JobState{
JobStateRunning,
JobStateCompleted,
}
func (e JobState) IsValid() bool {
switch e {
case JobStateRunning, JobStateCompleted:
return true
}
return false
}
func (e JobState) String() string {
return string(e)
}
func (e *JobState) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
}
*e = JobState(str)
if !e.IsValid() {
return fmt.Errorf("%s is not a valid JobState", str)
}
return nil
}
func (e JobState) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
}

View File

@ -1,69 +1,72 @@
type Job {
id: ID!
jobId: String!
userId: String!
projectId: String!
clusterId: String!
startTime: Time!
duration: Int!
numNodes: Int!
hasProfile: Boolean!
tags: [JobTag!]!
id: ID! # Database ID, unique
jobId: String! # ID given to the job by the cluster scheduler
userId: String! # Username
projectId: String! # Project
clusterId: String! # Name of the cluster this job was running on
startTime: Time! # RFC3339 formated string
duration: Int! # For running jobs, the time it has already run
numNodes: Int! # Number of nodes this job was running on
nodes: [String!]! # List of hostnames
hasProfile: Boolean! # TODO: Could be removed?
state: JobState! # State of the job
tags: [JobTag!]! # List of tags this job has
loadAvg: Float
memUsedMax: Float
# Will be null for running jobs.
loadAvg: Float
memUsedMax: Float
flopsAnyAvg: Float
memBwAvg: Float
netBwAvg: Float
fileBwAvg: Float
memBwAvg: Float
netBwAvg: Float
fileBwAvg: Float
}
# TODO: Extend by more possible states?
enum JobState {
running
completed
}
type JobTag {
id: ID!
tagType: String!
tagName: String!
id: ID! # Database ID, unique
tagType: String! # Type
tagName: String! # Name
}
type Cluster {
clusterID: String!
processorType: String!
socketsPerNode: Int!
coresPerSocket: Int!
threadsPerCore: Int!
flopRateScalar: Int!
flopRateSimd: Int!
clusterID: String!
processorType: String!
socketsPerNode: Int!
coresPerSocket: Int!
threadsPerCore: Int!
flopRateScalar: Int!
flopRateSimd: Int!
memoryBandwidth: Int!
metricConfig: [MetricConfig!]!
filterRanges: FilterRanges!
metricConfig: [MetricConfig!]!
filterRanges: FilterRanges!
}
type MetricConfig {
name: String!
unit: String!
name: String!
unit: String!
sampletime: Int!
peak: Int!
normal: Int!
caution: Int!
alert: Int!
peak: Int!
normal: Int!
caution: Int!
alert: Int!
}
type JobMetric {
unit: String!
scope: JobMetricScope!
unit: String!
scope: JobMetricScope!
timestep: Int!
series: [JobMetricSeries]!
}
enum JobMetricScope {
node
cpu
socket
series: [JobMetricSeries!]!
}
type JobMetricSeries {
node_id: String!
node_id: String!
statistics: JobMetricStatistics
data: [Float]!
data: [NullableFloat!]!
}
type JobMetricStatistics {
@ -73,21 +76,29 @@ type JobMetricStatistics {
}
type JobMetricWithName {
name: String!
name: String!
metric: JobMetric!
}
type MetricFootprints {
name: String!
footprints: [NullableFloat!]!
}
enum Aggregate { USER, PROJECT, CLUSTER }
type Query {
clusters: [Cluster!]!
jobById(id: ID!): Job
jobs(filter: JobFilterList, page: PageRequest, order: OrderByInput): JobResultList!
jobsStatistics(filter: JobFilterList): JobsStatistics!
jobMetrics(jobId: String!, clusterId: String, metrics: [String]): [JobMetricWithName]!
jobMetricAverages(filter: JobFilterList!, metrics: [String]!): [[Float]]!
rooflineHeatmap(filter: JobFilterList!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]]!
tags: [JobTag!]!
filterRanges: FilterRanges!
userStats(startTime: Time, stopTime: Time, clusterId: String): [UserStats!]!
clusters: [Cluster!]! # List of all clusters
tags: [JobTag!]! # List of all tags
job(id: ID!): Job
jobMetrics(id: ID!, metrics: [String!]): [JobMetricWithName!]!
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): [MetricFootprints]!
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]!
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
}
type Mutation {
@ -101,43 +112,39 @@ type Mutation {
type IntRangeOutput {
from: Int!
to: Int!
to: Int!
}
type TimeRangeOutput {
from: Time!
to: Time!
to: Time!
}
type FilterRanges {
duration: IntRangeOutput!
numNodes: IntRangeOutput!
duration: IntRangeOutput!
numNodes: IntRangeOutput!
startTime: TimeRangeOutput!
}
input JobFilterList {
list: [JobFilter]
}
input JobFilter {
tags: [ID!]
jobId: StringInput
userId: StringInput
projectId: StringInput
clusterId: StringInput
duration: IntRange
numNodes: IntRange
startTime: TimeRange
isRunning: Boolean
tags: [ID!]
jobId: StringInput
userId: StringInput
projectId: StringInput
clusterId: StringInput
duration: IntRange
numNodes: IntRange
startTime: TimeRange
isRunning: Boolean
flopsAnyAvg: FloatRange
memBwAvg: FloatRange
loadAvg: FloatRange
memUsedMax: FloatRange
memBwAvg: FloatRange
loadAvg: FloatRange
memUsedMax: FloatRange
}
input OrderByInput {
field: String!
order: SortDirectionEnum = ASC
order: SortDirectionEnum! = ASC
}
enum SortDirectionEnum {
@ -146,32 +153,32 @@ enum SortDirectionEnum {
}
input StringInput {
eq: String
contains: String
eq: String
contains: String
startsWith: String
endsWith: String
endsWith: String
}
input IntRange {
from: Int!
to: Int!
to: Int!
}
input FloatRange {
from: Float!
to: Float!
to: Float!
}
input TimeRange {
from: Time!
to: Time!
from: Time
to: Time
}
type JobResultList {
items: [Job]!
items: [Job!]!
offset: Int
limit: Int
count: Int
limit: Int
count: Int
}
type HistoPoint {
@ -180,24 +187,20 @@ type HistoPoint {
}
type JobsStatistics {
totalJobs: Int!
shortJobs: Int!
totalWalltime: Int!
totalCoreHours: Int!
histWalltime: [HistoPoint]!
histNumNodes: [HistoPoint]!
}
type UserStats {
userId: ID!
totalJobs: Int!
totalWalltime: Float!
totalCoreHours: Float!
id: ID! # If `groupBy` was used, ID of the user/project/cluster
totalJobs: Int! # Number of jobs that matched
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
totalCoreHours: Int! # Sum of the core hours of all matched jobs
histWalltime: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
}
input PageRequest {
itemsPerPage: Int!
page: Int!
page: Int!
}
scalar Time
scalar NullableFloat
scalar JobMetricScope

66
schema/float.go Normal file
View File

@ -0,0 +1,66 @@
package schema
import (
"errors"
"io"
"math"
"strconv"
)
// A custom float type is used so that (Un)MarshalJSON and
// (Un)MarshalGQL can be overloaded and NaN/null can be used.
// The default behaviour of putting every nullable value behind
// a pointer has a bigger overhead.
type Float float64
var NaN Float = Float(math.NaN())
func (f Float) IsNaN() bool {
return math.IsNaN(float64(f))
}
// NaN will be serialized to `null`.
func (f Float) MarshalJSON() ([]byte, error) {
if f.IsNaN() {
return []byte("null"), nil
}
return []byte(strconv.FormatFloat(float64(f), 'f', 2, 64)), nil
}
// `null` will be unserialized to NaN.
func (f *Float) UnmarshalJSON(input []byte) error {
s := string(input)
if s == "null" {
*f = NaN
return nil
}
val, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
*f = Float(val)
return nil
}
// UnmarshalGQL implements the graphql.Unmarshaler interface.
func (f *Float) UnmarshalGQL(v interface{}) error {
f64, ok := v.(float64)
if !ok {
return errors.New("invalid Float scalar")
}
*f = Float(f64)
return nil
}
// MarshalGQL implements the graphql.Marshaler interface.
// NaN will be serialized to `null`.
func (f Float) MarshalGQL(w io.Writer) {
if f.IsNaN() {
w.Write([]byte(`null`))
} else {
w.Write([]byte(strconv.FormatFloat(float64(f), 'f', 2, 64)))
}
}

76
schema/metrics.go Normal file
View File

@ -0,0 +1,76 @@
package schema
import (
"fmt"
"io"
)
// Format of `data.json` files.
type JobData map[string]*JobMetric
type JobMetric struct {
Unit string `json:"unit"`
Scope MetricScope `json:"scope"`
Timestep int `json:"timestep"`
Series []*MetricSeries `json:"series"`
}
type MetricScope string
const (
MetricScopeNode MetricScope = "node"
MetricScopeSocket MetricScope = "socket"
MetricScopeCpu MetricScope = "cpu"
)
func (e *MetricScope) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
}
*e = MetricScope(str)
if *e != "node" && *e != "socket" && *e != "cpu" {
return fmt.Errorf("%s is not a valid MetricScope", str)
}
return nil
}
func (e MetricScope) MarshalGQL(w io.Writer) {
fmt.Fprintf(w, "\"%s\"", e)
}
type MetricStatistics struct {
Avg float64 `json:"avg"`
Min float64 `json:"min"`
Max float64 `json:"max"`
}
type MetricSeries struct {
NodeID string `json:"node_id"`
Statistics *MetricStatistics `json:"statistics"`
Data []Float `json:"data"`
}
// Format of `meta.json` files.
type JobMeta struct {
JobId string `json:"job_id"`
UserId string `json:"user_id"`
ProjectId string `json:"project_id"`
ClusterId string `json:"cluster_id"`
NumNodes int `json:"num_nodes"`
JobState string `json:"job_state"`
StartTime int64 `json:"start_time"`
Duration int64 `json:"duration"`
Nodes []string `json:"nodes"`
Tags []struct {
Name string `json:"name"`
Type string `json:"type"`
} `json:"tags"`
Statistics map[string]struct {
Unit string `json:"unit"`
Avg float64 `json:"avg"`
Min float64 `json:"min"`
Max float64 `json:"max"`
} `json:"statistics"`
}