new filterRanges query and Cluster.filterRanges field

This commit is contained in:
Lou Knauer 2021-05-06 08:36:24 +02:00
parent 5c0ada7ec9
commit b6df8e88b9
5 changed files with 734 additions and 293 deletions

View File

@ -59,6 +59,10 @@ models:
fields:
tags:
resolver: true
Cluster:
fields:
filterRanges:
resolver: true
JobTag:
model: "github.com/ClusterCockpit/cc-jobarchive/graph/model.JobTag"
Timestamp:

File diff suppressed because it is too large Load Diff

View File

@ -9,14 +9,10 @@ import (
"time"
)
type AddJobInput struct {
JobID string `json:"jobId"`
UserID string `json:"userId"`
ProjectID string `json:"projectId"`
ClusterID string `json:"clusterId"`
StartTime time.Time `json:"startTime"`
Duration int `json:"duration"`
NumNodes int `json:"numNodes"`
type FilterRanges struct {
Duration *IntRangeOutput `json:"duration"`
NumNodes *IntRangeOutput `json:"numNodes"`
StartTime *TimeRangeOutput `json:"startTime"`
}
type FloatRange struct {
@ -34,6 +30,11 @@ type IntRange struct {
To int `json:"to"`
}
type IntRangeOutput struct {
From int `json:"from"`
To int `json:"to"`
}
type JobFilter struct {
Tags []string `json:"tags"`
JobID *StringInput `json:"jobId"`
@ -110,19 +111,6 @@ type PageRequest struct {
Page *int `json:"page"`
}
type StartJobInput struct {
JobID string `json:"jobId"`
UserID string `json:"userId"`
ProjectID string `json:"projectId"`
ClusterID string `json:"clusterId"`
StartTime time.Time `json:"startTime"`
NumNodes int `json:"numNodes"`
}
type StopJobInput struct {
StopTime time.Time `json:"stopTime"`
}
type StringInput struct {
Eq *string `json:"eq"`
Contains *string `json:"contains"`
@ -135,6 +123,11 @@ type TimeRange struct {
To time.Time `json:"to"`
}
type TimeRangeOutput struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
}
type JobMetricScope string
const (

View File

@ -357,9 +357,8 @@ func (r *queryResolver) JobMetrics(
}
func (r *queryResolver) Tags(
ctx context.Context, jobId *string) ([]*model.JobTag, error) {
ctx context.Context) ([]*model.JobTag, error) {
if jobId == nil {
rows, err := r.DB.Queryx("SELECT * FROM tag")
if err != nil {
return nil, err
@ -377,29 +376,37 @@ func (r *queryResolver) Tags(
return tags, nil
}
/* TODO: Use cluster id? */
query := `
SELECT tag.id, tag.tag_name, tag.tag_type FROM tag
JOIN jobtag ON tag.id = jobtag.tag_id
JOIN job ON job.id = jobtag.job_id
WHERE job.job_id = $1
`
rows, err := r.DB.Queryx(query, jobId)
func (r *queryResolver) FilterRanges(
ctx context.Context) (*model.FilterRanges, error) {
rows, err := r.DB.Query(`
SELECT MIN(duration), MAX(duration),
MIN(num_nodes), MAX(num_nodes),
MIN(start_time), MAX(start_time) FROM job
`)
defer rows.Close()
if err != nil {
return nil, err
}
tags := []*model.JobTag{}
for rows.Next() {
var tag model.JobTag
err = rows.StructScan(&tag)
if !rows.Next() {
panic("expected exactly one row")
}
duration := &model.IntRangeOutput{};
numNodes := &model.IntRangeOutput{};
var startTimeMin, startTimeMax int64
err = rows.Scan(&duration.From, &duration.To,
&numNodes.From, &numNodes.To,
&startTimeMin, &startTimeMax)
if err != nil {
return nil, err
}
tags = append(tags, &tag)
}
return tags, nil
startTime := &model.TimeRangeOutput {
time.Unix(startTimeMin, 0), time.Unix(startTimeMax, 0) }
return &model.FilterRanges{ duration, numNodes, startTime }, nil
}
func (r *jobResolver) Tags(ctx context.Context, job *model.Job) ([]*model.JobTag, error) {
@ -426,8 +433,44 @@ func (r *jobResolver) Tags(ctx context.Context, job *model.Job) ([]*model.JobTag
return tags, nil
}
func (r *clusterResolver) FilterRanges(
ctx context.Context, cluster *model.Cluster) (*model.FilterRanges, error) {
rows, err := r.DB.Query(`
SELECT MIN(duration), MAX(duration),
MIN(num_nodes), MAX(num_nodes),
MIN(start_time), MAX(start_time)
FROM job WHERE job.cluster_id = $1
`, cluster.ClusterID)
defer rows.Close()
if err != nil {
return nil, err
}
if !rows.Next() {
panic("expected exactly one row")
}
duration := &model.IntRangeOutput{};
numNodes := &model.IntRangeOutput{};
var startTimeMin, startTimeMax int64
err = rows.Scan(&duration.From, &duration.To,
&numNodes.From, &numNodes.To,
&startTimeMin, &startTimeMax)
if err != nil {
return nil, err
}
startTime := &model.TimeRangeOutput {
time.Unix(startTimeMin, 0), time.Unix(startTimeMax, 0) }
return &model.FilterRanges{ duration, numNodes, startTime }, nil
}
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
type jobResolver struct{ *Resolver }
type clusterResolver struct{ *Resolver }
type queryResolver struct{ *Resolver }

View File

@ -10,11 +10,11 @@ type Job {
numNodes: Int!
hasProfile: Boolean!
memUsed_max: Float
flopsAny_avg: Float
memBw_avg: Float
netBw_avg: Float
fileBw_avg: Float
memUsedMax: Float
flopsAnyAvg: Float
memBwAvg: Float
netBwAvg: Float
fileBwAvg: Float
tags: [JobTag!]
}
@ -29,6 +29,7 @@ type Cluster {
flopRateSimd: Int!
memoryBandwidth: Int!
metricConfig: [MetricConfig!]!
filterRanges: FilterRanges!
}
type MetricConfig {
@ -80,8 +81,9 @@ type Query {
jobsStatistics(filter: JobFilterList): JobsStatistics!
jobMetrics(jobId: String!, clusterId: String, startTime: Time, metrics: [String]): [JobMetricWithName]!
# Return all known tags or, if jobId is specified, only tags from this job
tags(jobId: String): [JobTag!]!
tags: [JobTag!]!
filterRanges: FilterRanges!
}
input JobFilterList {
@ -100,6 +102,22 @@ input JobFilter {
hasProfile: Boolean
}
type IntRangeOutput {
from: Int!
to: Int!
}
type TimeRangeOutput {
from: Time!
to: Time!
}
type FilterRanges {
duration: IntRangeOutput!
numNodes: IntRangeOutput!
startTime: TimeRangeOutput!
}
input OrderByInput {
field: String!
order: SortDirectionEnum = ASC