mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-11-10 00:47:26 +01:00
Regenerate GraphQL Api. Remove FilterRanger
This commit is contained in:
parent
c64a935750
commit
c0daad256f
@ -35,7 +35,6 @@ type Cluster {
|
|||||||
name: String!
|
name: String!
|
||||||
partitions: [String!]! # Slurm partitions
|
partitions: [String!]! # Slurm partitions
|
||||||
metricConfig: [MetricConfig!]!
|
metricConfig: [MetricConfig!]!
|
||||||
filterRanges: FilterRanges!
|
|
||||||
subClusters: [SubCluster!]! # Hardware partitions/subclusters
|
subClusters: [SubCluster!]! # Hardware partitions/subclusters
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,12 +195,6 @@ type Mutation {
|
|||||||
type IntRangeOutput { from: Int!, to: Int! }
|
type IntRangeOutput { from: Int!, to: Int! }
|
||||||
type TimeRangeOutput { from: Time!, to: Time! }
|
type TimeRangeOutput { from: Time!, to: Time! }
|
||||||
|
|
||||||
type FilterRanges {
|
|
||||||
duration: IntRangeOutput!
|
|
||||||
numNodes: IntRangeOutput!
|
|
||||||
startTime: TimeRangeOutput!
|
|
||||||
}
|
|
||||||
|
|
||||||
input JobFilter {
|
input JobFilter {
|
||||||
tags: [ID!]
|
tags: [ID!]
|
||||||
jobId: StringInput
|
jobId: StringInput
|
||||||
|
11
gqlgen.yml
11
gqlgen.yml
@ -30,7 +30,7 @@ resolver:
|
|||||||
# gqlgen will search for any type names in the schema in these go packages
|
# gqlgen will search for any type names in the schema in these go packages
|
||||||
# if they match it will use them, otherwise it will generate them.
|
# if they match it will use them, otherwise it will generate them.
|
||||||
autobind:
|
autobind:
|
||||||
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
|
- "github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
|
|
||||||
# This section declares type mapping between the GraphQL and go type systems
|
# This section declares type mapping between the GraphQL and go type systems
|
||||||
#
|
#
|
||||||
@ -57,6 +57,7 @@ models:
|
|||||||
metaData:
|
metaData:
|
||||||
resolver: true
|
resolver: true
|
||||||
Cluster:
|
Cluster:
|
||||||
|
model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Cluster"
|
||||||
fields:
|
fields:
|
||||||
partitions:
|
partitions:
|
||||||
resolver: true
|
resolver: true
|
||||||
@ -66,7 +67,15 @@ models:
|
|||||||
Tag: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Tag" }
|
Tag: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Tag" }
|
||||||
Resource: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" }
|
Resource: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" }
|
||||||
JobState: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" }
|
JobState: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" }
|
||||||
|
TimeRange: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.TimeRange" }
|
||||||
|
IntRange: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.IntRange" }
|
||||||
JobMetric: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" }
|
JobMetric: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" }
|
||||||
Series: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Series" }
|
Series: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Series" }
|
||||||
MetricStatistics: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics" }
|
MetricStatistics: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics" }
|
||||||
|
MetricConfig: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricConfig" }
|
||||||
|
SubClusterConfig: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubClusterConfig" }
|
||||||
|
Accelerator: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Accelerator" }
|
||||||
|
Topology: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Topology" }
|
||||||
|
FilterRanges: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.FilterRanges" }
|
||||||
|
SubCluster: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubCluster" }
|
||||||
StatsSeries: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" }
|
StatsSeries: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" }
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -17,8 +17,8 @@ type Count struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type FloatRange struct {
|
type FloatRange struct {
|
||||||
From schema.Float `json:"from"`
|
From float64 `json:"from"`
|
||||||
To schema.Float `json:"to"`
|
To float64 `json:"to"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Footprints struct {
|
type Footprints struct {
|
||||||
|
@ -24,26 +24,6 @@ func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) (
|
|||||||
return r.Repo.Partitions(obj.Name)
|
return r.Repo.Partitions(obj.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilterRanges is the resolver for the filterRanges field.
|
|
||||||
func (r *clusterResolver) FilterRanges(ctx context.Context, obj *schema.Cluster) (*schema.FilterRanges, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: FilterRanges - filterRanges"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Duration is the resolver for the duration field.
|
|
||||||
func (r *filterRangesResolver) Duration(ctx context.Context, obj *schema.FilterRanges) (*model.IntRangeOutput, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Duration - duration"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumNodes is the resolver for the numNodes field.
|
|
||||||
func (r *filterRangesResolver) NumNodes(ctx context.Context, obj *schema.FilterRanges) (*model.IntRangeOutput, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: NumNodes - numNodes"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartTime is the resolver for the startTime field.
|
|
||||||
func (r *filterRangesResolver) StartTime(ctx context.Context, obj *schema.FilterRanges) (*model.TimeRangeOutput, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: StartTime - startTime"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tags is the resolver for the tags field.
|
// Tags is the resolver for the tags field.
|
||||||
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
||||||
return r.Repo.GetTags(&obj.ID)
|
return r.Repo.GetTags(&obj.ID)
|
||||||
@ -59,41 +39,6 @@ func (r *jobResolver) UserData(ctx context.Context, obj *schema.Job) (*model.Use
|
|||||||
return auth.FetchUser(ctx, r.DB, obj.User)
|
return auth.FetchUser(ctx, r.DB, obj.User)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peak is the resolver for the peak field.
|
|
||||||
func (r *metricConfigResolver) Peak(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Peak - peak"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normal is the resolver for the normal field.
|
|
||||||
func (r *metricConfigResolver) Normal(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Normal - normal"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Caution is the resolver for the caution field.
|
|
||||||
func (r *metricConfigResolver) Caution(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Caution - caution"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Alert is the resolver for the alert field.
|
|
||||||
func (r *metricConfigResolver) Alert(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Alert - alert"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Avg is the resolver for the avg field.
|
|
||||||
func (r *metricStatisticsResolver) Avg(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Avg - avg"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min is the resolver for the min field.
|
|
||||||
func (r *metricStatisticsResolver) Min(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Min - min"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max is the resolver for the max field.
|
|
||||||
func (r *metricStatisticsResolver) Max(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Max - max"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateTag is the resolver for the createTag field.
|
// CreateTag is the resolver for the createTag field.
|
||||||
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) {
|
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) {
|
||||||
id, err := r.Repo.CreateTag(typeArg, name)
|
id, err := r.Repo.CreateTag(typeArg, name)
|
||||||
@ -155,8 +100,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
|
|||||||
|
|
||||||
// UpdateConfiguration is the resolver for the updateConfiguration field.
|
// UpdateConfiguration is the resolver for the updateConfiguration field.
|
||||||
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
|
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
|
||||||
|
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, auth.GetUser(ctx)); err != nil {
|
||||||
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, ctx); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,7 +240,7 @@ func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RooflineHeatmap is the resolver for the rooflineHeatmap field.
|
// RooflineHeatmap is the resolver for the rooflineHeatmap field.
|
||||||
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX schema.Float, minY schema.Float, maxX schema.Float, maxY schema.Float) ([][]schema.Float, error) {
|
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
|
||||||
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
|
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,59 +285,19 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
|
|||||||
return nodeMetrics, nil
|
return nodeMetrics, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peak is the resolver for the peak field.
|
|
||||||
func (r *subClusterConfigResolver) Peak(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Peak - peak"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normal is the resolver for the normal field.
|
|
||||||
func (r *subClusterConfigResolver) Normal(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Normal - normal"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Caution is the resolver for the caution field.
|
|
||||||
func (r *subClusterConfigResolver) Caution(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Caution - caution"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Alert is the resolver for the alert field.
|
|
||||||
func (r *subClusterConfigResolver) Alert(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
|
|
||||||
panic(fmt.Errorf("not implemented: Alert - alert"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cluster returns generated.ClusterResolver implementation.
|
// Cluster returns generated.ClusterResolver implementation.
|
||||||
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }
|
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }
|
||||||
|
|
||||||
// FilterRanges returns generated.FilterRangesResolver implementation.
|
|
||||||
func (r *Resolver) FilterRanges() generated.FilterRangesResolver { return &filterRangesResolver{r} }
|
|
||||||
|
|
||||||
// Job returns generated.JobResolver implementation.
|
// Job returns generated.JobResolver implementation.
|
||||||
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
|
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
|
||||||
|
|
||||||
// MetricConfig returns generated.MetricConfigResolver implementation.
|
|
||||||
func (r *Resolver) MetricConfig() generated.MetricConfigResolver { return &metricConfigResolver{r} }
|
|
||||||
|
|
||||||
// MetricStatistics returns generated.MetricStatisticsResolver implementation.
|
|
||||||
func (r *Resolver) MetricStatistics() generated.MetricStatisticsResolver {
|
|
||||||
return &metricStatisticsResolver{r}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutation returns generated.MutationResolver implementation.
|
// Mutation returns generated.MutationResolver implementation.
|
||||||
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
|
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
|
||||||
|
|
||||||
// Query returns generated.QueryResolver implementation.
|
// Query returns generated.QueryResolver implementation.
|
||||||
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
|
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
|
||||||
|
|
||||||
// SubClusterConfig returns generated.SubClusterConfigResolver implementation.
|
|
||||||
func (r *Resolver) SubClusterConfig() generated.SubClusterConfigResolver {
|
|
||||||
return &subClusterConfigResolver{r}
|
|
||||||
}
|
|
||||||
|
|
||||||
type clusterResolver struct{ *Resolver }
|
type clusterResolver struct{ *Resolver }
|
||||||
type filterRangesResolver struct{ *Resolver }
|
|
||||||
type jobResolver struct{ *Resolver }
|
type jobResolver struct{ *Resolver }
|
||||||
type metricConfigResolver struct{ *Resolver }
|
|
||||||
type metricStatisticsResolver struct{ *Resolver }
|
|
||||||
type mutationResolver struct{ *Resolver }
|
type mutationResolver struct{ *Resolver }
|
||||||
type queryResolver struct{ *Resolver }
|
type queryResolver struct{ *Resolver }
|
||||||
type subClusterConfigResolver struct{ *Resolver }
|
|
||||||
|
@ -204,13 +204,8 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
filter []*model.JobFilter,
|
filter []*model.JobFilter,
|
||||||
rows int, cols int,
|
rows int, cols int,
|
||||||
minXF schema.Float, minYF schema.Float, maxXF schema.Float, maxYF schema.Float) ([][]schema.Float, error) {
|
minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
|
||||||
|
|
||||||
var minX, minY, maxX, maxY float64
|
|
||||||
minX = float64(minXF)
|
|
||||||
minY = float64(minYF)
|
|
||||||
maxX = float64(maxXF)
|
|
||||||
maxY = float64(maxYF)
|
|
||||||
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -221,9 +216,9 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
|
|
||||||
fcols, frows := float64(cols), float64(rows)
|
fcols, frows := float64(cols), float64(rows)
|
||||||
minX, minY, maxX, maxY = math.Log10(minX), math.Log10(minY), math.Log10(maxX), math.Log10(maxY)
|
minX, minY, maxX, maxY = math.Log10(minX), math.Log10(minY), math.Log10(maxX), math.Log10(maxY)
|
||||||
tiles := make([][]schema.Float, rows)
|
tiles := make([][]float64, rows)
|
||||||
for i := range tiles {
|
for i := range tiles {
|
||||||
tiles[i] = make([]schema.Float, cols)
|
tiles[i] = make([]float64, cols)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, job := range jobs {
|
for _, job := range jobs {
|
||||||
|
Loading…
Reference in New Issue
Block a user