cc-backend/internal/graph/schema.resolvers.go

400 lines
13 KiB
Go
Raw Normal View History

package graph
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
import (
"context"
2021-12-09 16:25:48 +01:00
"errors"
"fmt"
"strconv"
2021-12-09 16:25:48 +01:00
"time"
2022-06-21 17:52:36 +02:00
"github.com/ClusterCockpit/cc-backend/internal/auth"
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
2022-06-21 17:52:36 +02:00
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
// Partitions is the resolver for the partitions field.
func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error) {
2022-03-14 10:24:27 +01:00
return r.Repo.Partitions(obj.Name)
}
// FilterRanges is the resolver for the filterRanges field.
func (r *clusterResolver) FilterRanges(ctx context.Context, obj *schema.Cluster) (*schema.FilterRanges, error) {
panic(fmt.Errorf("not implemented: FilterRanges - filterRanges"))
}
// Duration is the resolver for the duration field.
func (r *filterRangesResolver) Duration(ctx context.Context, obj *schema.FilterRanges) (*model.IntRangeOutput, error) {
panic(fmt.Errorf("not implemented: Duration - duration"))
}
// NumNodes is the resolver for the numNodes field.
func (r *filterRangesResolver) NumNodes(ctx context.Context, obj *schema.FilterRanges) (*model.IntRangeOutput, error) {
panic(fmt.Errorf("not implemented: NumNodes - numNodes"))
}
// StartTime is the resolver for the startTime field.
func (r *filterRangesResolver) StartTime(ctx context.Context, obj *schema.FilterRanges) (*model.TimeRangeOutput, error) {
panic(fmt.Errorf("not implemented: StartTime - startTime"))
}
// Tags is the resolver for the tags field.
2022-03-15 11:04:54 +01:00
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
return r.Repo.GetTags(&obj.ID)
}
// MetaData is the resolver for the metaData field.
func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) {
return r.Repo.FetchMetadata(obj)
}
// UserData is the resolver for the userData field.
2022-03-15 11:04:54 +01:00
func (r *jobResolver) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) {
return auth.FetchUser(ctx, r.DB, obj.User)
}
// Peak is the resolver for the peak field.
func (r *metricConfigResolver) Peak(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
panic(fmt.Errorf("not implemented: Peak - peak"))
}
// Normal is the resolver for the normal field.
func (r *metricConfigResolver) Normal(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
panic(fmt.Errorf("not implemented: Normal - normal"))
}
// Caution is the resolver for the caution field.
func (r *metricConfigResolver) Caution(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
panic(fmt.Errorf("not implemented: Caution - caution"))
}
// Alert is the resolver for the alert field.
func (r *metricConfigResolver) Alert(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
panic(fmt.Errorf("not implemented: Alert - alert"))
}
// Avg is the resolver for the avg field.
func (r *metricStatisticsResolver) Avg(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
panic(fmt.Errorf("not implemented: Avg - avg"))
}
// Min is the resolver for the min field.
func (r *metricStatisticsResolver) Min(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
panic(fmt.Errorf("not implemented: Min - min"))
}
// Max is the resolver for the max field.
func (r *metricStatisticsResolver) Max(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
panic(fmt.Errorf("not implemented: Max - max"))
}
// CreateTag is the resolver for the createTag field.
2021-12-17 15:49:22 +01:00
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) {
2022-02-17 09:04:57 +01:00
id, err := r.Repo.CreateTag(typeArg, name)
if err != nil {
return nil, err
}
2021-12-17 15:49:22 +01:00
return &schema.Tag{ID: id, Type: typeArg, Name: name}, nil
}
// DeleteTag is the resolver for the deleteTag field.
func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) {
panic(fmt.Errorf("not implemented: DeleteTag - deleteTag"))
}
// AddTagsToJob is the resolver for the addTagsToJob field.
2021-12-17 15:49:22 +01:00
func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
2022-02-17 09:04:57 +01:00
jid, err := strconv.ParseInt(job, 10, 64)
if err != nil {
return nil, err
}
2022-02-23 14:18:55 +01:00
tags := []*schema.Tag{}
for _, tagId := range tagIds {
2022-02-17 09:04:57 +01:00
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
return nil, err
}
2022-02-23 14:18:55 +01:00
if tags, err = r.Repo.AddTag(jid, tid); err != nil {
return nil, err
}
}
2022-02-23 14:18:55 +01:00
return tags, nil
}
// RemoveTagsFromJob is the resolver for the removeTagsFromJob field.
2021-12-17 15:49:22 +01:00
func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
jid, err := strconv.ParseInt(job, 10, 64)
if err != nil {
return nil, err
}
2022-02-23 14:18:55 +01:00
tags := []*schema.Tag{}
for _, tagId := range tagIds {
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
return nil, err
}
2022-02-23 14:18:55 +01:00
if tags, err = r.Repo.RemoveTag(jid, tid); err != nil {
return nil, err
}
}
2022-02-23 14:18:55 +01:00
return tags, nil
}
// UpdateConfiguration is the resolver for the updateConfiguration field.
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, ctx); err != nil {
return nil, err
}
return nil, nil
}
// Clusters is the resolver for the clusters field.
func (r *queryResolver) Clusters(ctx context.Context) ([]*schema.Cluster, error) {
return archive.Clusters, nil
}
// Tags is the resolver for the tags field.
2021-12-17 15:49:22 +01:00
func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) {
return r.Repo.GetTags(nil)
}
// User is the resolver for the user field.
2022-03-15 11:04:54 +01:00
func (r *queryResolver) User(ctx context.Context, username string) (*model.User, error) {
return auth.FetchUser(ctx, r.DB, username)
}
// AllocatedNodes is the resolver for the allocatedNodes field.
2022-03-24 16:08:47 +01:00
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
data, err := r.Repo.AllocatedNodes(cluster)
if err != nil {
return nil, err
}
counts := make([]*model.Count, 0, len(data))
for subcluster, hosts := range data {
counts = append(counts, &model.Count{
Name: subcluster,
Count: len(hosts),
})
}
return counts, nil
2022-03-24 10:32:08 +01:00
}
// Job is the resolver for the job field.
2021-12-17 15:49:22 +01:00
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
2022-02-17 09:04:57 +01:00
numericId, err := strconv.ParseInt(id, 10, 64)
if err != nil {
return nil, err
}
job, err := r.Repo.FindById(numericId)
if err != nil {
return nil, err
}
if user := auth.GetUser(ctx); user != nil &&
!user.HasRole(auth.RoleAdmin) &&
job.User != user.Username {
2022-02-17 09:04:57 +01:00
return nil, errors.New("you are not allowed to see this job")
2021-12-17 15:49:22 +01:00
}
2022-02-17 09:04:57 +01:00
return job, nil
}
// JobMetrics is the resolver for the jobMetrics field.
2021-12-20 10:48:58 +01:00
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) {
job, err := r.Query().Job(ctx, id)
if err != nil {
return nil, err
}
2022-01-07 09:44:34 +01:00
data, err := metricdata.LoadData(job, metrics, scopes, ctx)
if err != nil {
return nil, err
}
res := []*model.JobMetricWithName{}
for name, md := range data {
2021-12-20 10:48:58 +01:00
for scope, metric := range md {
if metric.Scope != schema.MetricScope(scope) {
panic("WTF?")
}
res = append(res, &model.JobMetricWithName{
Name: name,
Metric: metric,
})
}
}
return res, err
}
// JobsFootprints is the resolver for the jobsFootprints field.
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
return r.jobsFootprints(ctx, filter, metrics)
}
// Jobs is the resolver for the jobs field.
func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) {
if page == nil {
page = &model.PageRequest{
ItemsPerPage: 50,
Page: 1,
}
}
2022-02-17 09:04:57 +01:00
jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
if err != nil {
return nil, err
}
count, err := r.Repo.CountJobs(ctx, filter)
if err != nil {
return nil, err
}
return &model.JobResultList{Items: jobs, Count: &count}, nil
}
// JobsStatistics is the resolver for the jobsStatistics field.
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
return r.jobsStatistics(ctx, filter, groupBy)
}
// JobsCount is the resolver for the jobsCount field.
2022-03-25 10:20:33 +01:00
func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, weight *model.Weights, limit *int) ([]*model.Count, error) {
counts, err := r.Repo.CountGroupedJobs(ctx, groupBy, filter, weight, limit)
2022-02-19 10:28:29 +01:00
if err != nil {
return nil, err
}
res := make([]*model.Count, 0, len(counts))
for name, count := range counts {
res = append(res, &model.Count{
Name: name,
Count: count,
})
}
return res, nil
}
// RooflineHeatmap is the resolver for the rooflineHeatmap field.
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX schema.Float, minY schema.Float, maxX schema.Float, maxY schema.Float) ([][]schema.Float, error) {
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
}
// NodeMetrics is the resolver for the nodeMetrics field.
func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) {
2021-12-09 16:25:48 +01:00
user := auth.GetUser(ctx)
if user != nil && !user.HasRole(auth.RoleAdmin) {
2021-12-09 16:25:48 +01:00
return nil, errors.New("you need to be an administrator for this query")
}
2022-02-02 13:59:08 +01:00
if metrics == nil {
for _, mc := range archive.GetCluster(cluster).MetricConfig {
2022-02-02 13:59:08 +01:00
metrics = append(metrics, mc.Name)
}
}
data, err := metricdata.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
return nil, err
}
nodeMetrics := make([]*model.NodeMetrics, 0, len(data))
for hostname, metrics := range data {
host := &model.NodeMetrics{
Host: hostname,
Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
}
host.SubCluster, _ = archive.GetSubClusterByNode(cluster, hostname)
for metric, scopedMetrics := range metrics {
for _, scopedMetric := range scopedMetrics {
host.Metrics = append(host.Metrics, &model.JobMetricWithName{
Name: metric,
Metric: scopedMetric,
})
}
}
nodeMetrics = append(nodeMetrics, host)
}
return nodeMetrics, nil
2021-12-09 16:25:48 +01:00
}
// Peak is the resolver for the peak field.
func (r *subClusterConfigResolver) Peak(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
panic(fmt.Errorf("not implemented: Peak - peak"))
}
// Normal is the resolver for the normal field.
func (r *subClusterConfigResolver) Normal(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
panic(fmt.Errorf("not implemented: Normal - normal"))
}
// Caution is the resolver for the caution field.
func (r *subClusterConfigResolver) Caution(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
panic(fmt.Errorf("not implemented: Caution - caution"))
}
// Alert is the resolver for the alert field.
func (r *subClusterConfigResolver) Alert(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
panic(fmt.Errorf("not implemented: Alert - alert"))
}
// Cluster returns generated.ClusterResolver implementation.
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }
// FilterRanges returns generated.FilterRangesResolver implementation.
func (r *Resolver) FilterRanges() generated.FilterRangesResolver { return &filterRangesResolver{r} }
// Job returns generated.JobResolver implementation.
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
// MetricConfig returns generated.MetricConfigResolver implementation.
func (r *Resolver) MetricConfig() generated.MetricConfigResolver { return &metricConfigResolver{r} }
// MetricStatistics returns generated.MetricStatisticsResolver implementation.
func (r *Resolver) MetricStatistics() generated.MetricStatisticsResolver {
return &metricStatisticsResolver{r}
}
// Mutation returns generated.MutationResolver implementation.
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
// Query returns generated.QueryResolver implementation.
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
// SubClusterConfig returns generated.SubClusterConfigResolver implementation.
func (r *Resolver) SubClusterConfig() generated.SubClusterConfigResolver {
return &subClusterConfigResolver{r}
}
type clusterResolver struct{ *Resolver }
type filterRangesResolver struct{ *Resolver }
type jobResolver struct{ *Resolver }
type metricConfigResolver struct{ *Resolver }
type metricStatisticsResolver struct{ *Resolver }
type mutationResolver struct{ *Resolver }
type queryResolver struct{ *Resolver }
type subClusterConfigResolver struct{ *Resolver }