mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-11-10 08:57:25 +01:00
Update GraphQL schema
This commit is contained in:
parent
341091a796
commit
e4c0b3e955
16
graph/analysis.go
Normal file
16
graph/analysis.go
Normal file
@ -0,0 +1,16 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ClusterCockpit/cc-jobarchive/graph/model"
|
||||
)
|
||||
|
||||
func (r *queryResolver) JobMetricAverages(ctx context.Context, filter model.JobFilterList, metrics []*string) ([][]*float64, error) {
|
||||
return nil, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter model.JobFilterList, rows, cols int, minX, minY, maxX, maxY float64) ([][]float64, error) {
|
||||
return nil, errors.New("unimplemented")
|
||||
}
|
10
graph/config.go
Normal file
10
graph/config.go
Normal file
@ -0,0 +1,10 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
)
|
||||
|
||||
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, key, value string) (*string, error) {
|
||||
return nil, errors.New("unimplemented")
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -44,7 +44,7 @@ type JobFilter struct {
|
||||
Duration *IntRange `json:"duration"`
|
||||
NumNodes *IntRange `json:"numNodes"`
|
||||
StartTime *TimeRange `json:"startTime"`
|
||||
HasProfile *bool `json:"hasProfile"`
|
||||
IsRunning *bool `json:"isRunning"`
|
||||
FlopsAnyAvg *FloatRange `json:"flopsAnyAvg"`
|
||||
MemBwAvg *FloatRange `json:"memBwAvg"`
|
||||
LoadAvg *FloatRange `json:"loadAvg"`
|
||||
@ -111,8 +111,8 @@ type OrderByInput struct {
|
||||
}
|
||||
|
||||
type PageRequest struct {
|
||||
ItemsPerPage *int `json:"itemsPerPage"`
|
||||
Page *int `json:"page"`
|
||||
ItemsPerPage int `json:"itemsPerPage"`
|
||||
Page int `json:"page"`
|
||||
}
|
||||
|
||||
type StringInput struct {
|
||||
@ -132,6 +132,13 @@ type TimeRangeOutput struct {
|
||||
To time.Time `json:"to"`
|
||||
}
|
||||
|
||||
type UserStats struct {
|
||||
UserID string `json:"userId"`
|
||||
TotalJobs int `json:"totalJobs"`
|
||||
TotalWalltime float64 `json:"totalWalltime"`
|
||||
TotalCoreHours float64 `json:"totalCoreHours"`
|
||||
}
|
||||
|
||||
type JobMetricScope string
|
||||
|
||||
const (
|
||||
|
@ -114,7 +114,7 @@ func buildQueryConditions(filterList *model.JobFilterList) (string, string) {
|
||||
return strings.Join(conditions, " AND "), join
|
||||
}
|
||||
|
||||
func readJobDataFile(jobId string, clusterId *string, startTime *time.Time) ([]byte, error) {
|
||||
func readJobDataFile(jobId string, clusterId *string) ([]byte, error) {
|
||||
jobId = strings.Split(jobId, ".")[0]
|
||||
id, err := strconv.Atoi(jobId)
|
||||
if err != nil {
|
||||
@ -205,13 +205,8 @@ func (r *queryResolver) Jobs(
|
||||
var limit, offset int
|
||||
var qc, ob, jo string
|
||||
|
||||
if page != nil {
|
||||
limit = *page.ItemsPerPage
|
||||
offset = (*page.Page - 1) * limit
|
||||
} else {
|
||||
limit = 20
|
||||
offset = 0
|
||||
}
|
||||
limit = page.ItemsPerPage
|
||||
offset = (page.Page - 1) * limit
|
||||
|
||||
if filterList != nil {
|
||||
qc, jo = buildQueryConditions(filterList)
|
||||
@ -371,11 +366,10 @@ func (r *queryResolver) Clusters(ctx context.Context) ([]*model.Cluster, error)
|
||||
}
|
||||
|
||||
func (r *queryResolver) JobMetrics(
|
||||
ctx context.Context, jobId string,
|
||||
clusterId *string, startTime *time.Time,
|
||||
ctx context.Context, jobId string, clusterId *string,
|
||||
metrics []*string) ([]*model.JobMetricWithName, error) {
|
||||
|
||||
f, err := readJobDataFile(jobId, clusterId, startTime)
|
||||
f, err := readJobDataFile(jobId, clusterId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -593,4 +587,4 @@ func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResol
|
||||
type jobResolver struct{ *Resolver }
|
||||
type clusterResolver struct{ *Resolver }
|
||||
type queryResolver struct{ *Resolver }
|
||||
type mutationResolver struct { *Resolver }
|
||||
type mutationResolver struct{ *Resolver }
|
||||
|
@ -1,6 +1,5 @@
|
||||
type Job {
|
||||
id: ID!
|
||||
|
||||
jobId: String!
|
||||
userId: String!
|
||||
projectId: String!
|
||||
@ -9,6 +8,7 @@ type Job {
|
||||
duration: Int!
|
||||
numNodes: Int!
|
||||
hasProfile: Boolean!
|
||||
tags: [JobTag!]!
|
||||
|
||||
loadAvg: Float
|
||||
memUsedMax: Float
|
||||
@ -16,8 +16,12 @@ type Job {
|
||||
memBwAvg: Float
|
||||
netBwAvg: Float
|
||||
fileBwAvg: Float
|
||||
}
|
||||
|
||||
tags: [JobTag!]
|
||||
type JobTag {
|
||||
id: ID!
|
||||
tagType: String!
|
||||
tagName: String!
|
||||
}
|
||||
|
||||
type Cluster {
|
||||
@ -68,23 +72,22 @@ type JobMetricStatistics {
|
||||
max: Float!
|
||||
}
|
||||
|
||||
type JobTag {
|
||||
id: ID!
|
||||
tagType: String!
|
||||
tagName: String!
|
||||
type JobMetricWithName {
|
||||
name: String!
|
||||
metric: JobMetric!
|
||||
}
|
||||
|
||||
type Query {
|
||||
clusters: [Cluster!]!
|
||||
|
||||
jobById(id: ID!): Job
|
||||
jobs(filter: JobFilterList, page: PageRequest, order: OrderByInput): JobResultList!
|
||||
jobsStatistics(filter: JobFilterList): JobsStatistics!
|
||||
jobMetrics(jobId: String!, clusterId: String, startTime: Time, metrics: [String]): [JobMetricWithName]!
|
||||
|
||||
jobMetrics(jobId: String!, clusterId: String, metrics: [String]): [JobMetricWithName]!
|
||||
jobMetricAverages(filter: JobFilterList!, metrics: [String]!): [[Float]]!
|
||||
rooflineHeatmap(filter: JobFilterList!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]]!
|
||||
tags: [JobTag!]!
|
||||
|
||||
filterRanges: FilterRanges!
|
||||
userStats(startTime: Time, stopTime: Time, clusterId: String): [UserStats!]!
|
||||
}
|
||||
|
||||
type Mutation {
|
||||
@ -92,26 +95,8 @@ type Mutation {
|
||||
deleteTag(id: ID!): ID!
|
||||
addTagsToJob(job: ID!, tagIds: [ID!]!): [JobTag!]!
|
||||
removeTagsFromJob(job: ID!, tagIds: [ID!]!): [JobTag!]!
|
||||
}
|
||||
|
||||
input JobFilterList {
|
||||
list: [JobFilter]
|
||||
}
|
||||
|
||||
input JobFilter {
|
||||
tags: [ID!]
|
||||
jobId: StringInput
|
||||
userId: StringInput
|
||||
projectId: StringInput
|
||||
clusterId: StringInput
|
||||
duration: IntRange
|
||||
numNodes: IntRange
|
||||
startTime: TimeRange
|
||||
hasProfile: Boolean
|
||||
flopsAnyAvg: FloatRange
|
||||
memBwAvg: FloatRange
|
||||
loadAvg: FloatRange
|
||||
memUsedMax: FloatRange
|
||||
updateConfiguration(name: String!, value: String!): String
|
||||
}
|
||||
|
||||
type IntRangeOutput {
|
||||
@ -130,6 +115,26 @@ type FilterRanges {
|
||||
startTime: TimeRangeOutput!
|
||||
}
|
||||
|
||||
input JobFilterList {
|
||||
list: [JobFilter]
|
||||
}
|
||||
|
||||
input JobFilter {
|
||||
tags: [ID!]
|
||||
jobId: StringInput
|
||||
userId: StringInput
|
||||
projectId: StringInput
|
||||
clusterId: StringInput
|
||||
duration: IntRange
|
||||
numNodes: IntRange
|
||||
startTime: TimeRange
|
||||
isRunning: Boolean
|
||||
flopsAnyAvg: FloatRange
|
||||
memBwAvg: FloatRange
|
||||
loadAvg: FloatRange
|
||||
memUsedMax: FloatRange
|
||||
}
|
||||
|
||||
input OrderByInput {
|
||||
field: String!
|
||||
order: SortDirectionEnum = ASC
|
||||
@ -169,11 +174,6 @@ type JobResultList {
|
||||
count: Int
|
||||
}
|
||||
|
||||
type JobMetricWithName {
|
||||
name: String!
|
||||
metric: JobMetric!
|
||||
}
|
||||
|
||||
type HistoPoint {
|
||||
count: Int!
|
||||
value: Int!
|
||||
@ -188,9 +188,16 @@ type JobsStatistics {
|
||||
histNumNodes: [HistoPoint]!
|
||||
}
|
||||
|
||||
type UserStats {
|
||||
userId: ID!
|
||||
totalJobs: Int!
|
||||
totalWalltime: Float!
|
||||
totalCoreHours: Float!
|
||||
}
|
||||
|
||||
input PageRequest {
|
||||
itemsPerPage: Int
|
||||
page: Int
|
||||
itemsPerPage: Int!
|
||||
page: Int!
|
||||
}
|
||||
|
||||
scalar Time
|
||||
|
13
graph/users.go
Normal file
13
graph/users.go
Normal file
@ -0,0 +1,13 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-jobarchive/graph/model"
|
||||
)
|
||||
|
||||
func (r *queryResolver) UserStats(ctx context.Context, from *time.Time, to *time.Time, clusterId *string) ([]*model.UserStats, error) {
|
||||
return nil, errors.New("unimplemented")
|
||||
}
|
Loading…
Reference in New Issue
Block a user