mirror of
				https://github.com/ClusterCockpit/cc-backend
				synced 2025-11-04 01:25:06 +01:00 
			
		
		
		
	
		
			
				
	
	
		
			787 lines
		
	
	
		
			24 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			787 lines
		
	
	
		
			24 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
package graph
 | 
						|
 | 
						|
// This file will be automatically regenerated based on the schema, any resolver implementations
 | 
						|
// will be copied through when generating and any unknown code will be moved to the end.
 | 
						|
// Code generated by github.com/99designs/gqlgen version v0.17.66
 | 
						|
 | 
						|
import (
 | 
						|
	"context"
 | 
						|
	"errors"
 | 
						|
	"fmt"
 | 
						|
	"regexp"
 | 
						|
	"slices"
 | 
						|
	"strconv"
 | 
						|
	"strings"
 | 
						|
	"time"
 | 
						|
 | 
						|
	"github.com/ClusterCockpit/cc-backend/internal/config"
 | 
						|
	"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
 | 
						|
	"github.com/ClusterCockpit/cc-backend/internal/graph/model"
 | 
						|
	"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
 | 
						|
	"github.com/ClusterCockpit/cc-backend/internal/repository"
 | 
						|
	"github.com/ClusterCockpit/cc-backend/pkg/archive"
 | 
						|
	"github.com/ClusterCockpit/cc-backend/pkg/log"
 | 
						|
	"github.com/ClusterCockpit/cc-backend/pkg/schema"
 | 
						|
)
 | 
						|
 | 
						|
// Partitions is the resolver for the partitions field.
 | 
						|
func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error) {
 | 
						|
	return r.Repo.Partitions(obj.Name)
 | 
						|
}
 | 
						|
 | 
						|
// Tags is the resolver for the tags field.
 | 
						|
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
 | 
						|
	return r.Repo.GetTags(repository.GetUserFromContext(ctx), &obj.ID)
 | 
						|
}
 | 
						|
 | 
						|
// ConcurrentJobs is the resolver for the concurrentJobs field.
 | 
						|
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
 | 
						|
	// FIXME: Make the hardcoded duration configurable
 | 
						|
	if obj.Exclusive != 1 && obj.Duration > 600 {
 | 
						|
		return r.Repo.FindConcurrentJobs(ctx, obj)
 | 
						|
	}
 | 
						|
 | 
						|
	return nil, nil
 | 
						|
}
 | 
						|
 | 
						|
// Footprint is the resolver for the footprint field.
 | 
						|
func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) {
 | 
						|
	rawFootprint, err := r.Repo.FetchFootprint(obj)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while fetching job footprint data")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	res := []*model.FootprintValue{}
 | 
						|
	for name, value := range rawFootprint {
 | 
						|
 | 
						|
		parts := strings.Split(name, "_")
 | 
						|
		statPart := parts[len(parts)-1]
 | 
						|
		nameParts := parts[:len(parts)-1]
 | 
						|
 | 
						|
		res = append(res, &model.FootprintValue{
 | 
						|
			Name:  strings.Join(nameParts, "_"),
 | 
						|
			Stat:  statPart,
 | 
						|
			Value: value,
 | 
						|
		})
 | 
						|
	}
 | 
						|
 | 
						|
	return res, err
 | 
						|
}
 | 
						|
 | 
						|
// EnergyFootprint is the resolver for the energyFootprint field.
 | 
						|
func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]*model.EnergyFootprintValue, error) {
 | 
						|
	rawEnergyFootprint, err := r.Repo.FetchEnergyFootprint(obj)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while fetching job energy footprint data")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	res := []*model.EnergyFootprintValue{}
 | 
						|
	for name, value := range rawEnergyFootprint {
 | 
						|
		// Suboptimal: Nearly hardcoded metric name expectations
 | 
						|
		matchCpu := regexp.MustCompile(`cpu|Cpu|CPU`)
 | 
						|
		matchAcc := regexp.MustCompile(`acc|Acc|ACC`)
 | 
						|
		matchMem := regexp.MustCompile(`mem|Mem|MEM`)
 | 
						|
		matchCore := regexp.MustCompile(`core|Core|CORE`)
 | 
						|
 | 
						|
		hwType := ""
 | 
						|
		switch test := name; { // NOtice ';' for var declaration
 | 
						|
		case matchCpu.MatchString(test):
 | 
						|
			hwType = "CPU"
 | 
						|
		case matchAcc.MatchString(test):
 | 
						|
			hwType = "Accelerator"
 | 
						|
		case matchMem.MatchString(test):
 | 
						|
			hwType = "Memory"
 | 
						|
		case matchCore.MatchString(test):
 | 
						|
			hwType = "Core"
 | 
						|
		default:
 | 
						|
			hwType = "Other"
 | 
						|
		}
 | 
						|
 | 
						|
		res = append(res, &model.EnergyFootprintValue{
 | 
						|
			Hardware: hwType,
 | 
						|
			Metric:   name,
 | 
						|
			Value:    value,
 | 
						|
		})
 | 
						|
	}
 | 
						|
	return res, err
 | 
						|
}
 | 
						|
 | 
						|
// MetaData is the resolver for the metaData field.
 | 
						|
func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (any, error) {
 | 
						|
	return r.Repo.FetchMetadata(obj)
 | 
						|
}
 | 
						|
 | 
						|
// UserData is the resolver for the userData field.
 | 
						|
func (r *jobResolver) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) {
 | 
						|
	return repository.GetUserRepository().FetchUserInCtx(ctx, obj.User)
 | 
						|
}
 | 
						|
 | 
						|
// Name is the resolver for the name field.
 | 
						|
func (r *metricValueResolver) Name(ctx context.Context, obj *schema.MetricValue) (*string, error) {
 | 
						|
	panic(fmt.Errorf("not implemented: Name - name"))
 | 
						|
}
 | 
						|
 | 
						|
// CreateTag is the resolver for the createTag field.
 | 
						|
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string, scope string) (*schema.Tag, error) {
 | 
						|
	user := repository.GetUserFromContext(ctx)
 | 
						|
	if user == nil {
 | 
						|
		return nil, fmt.Errorf("no user in context")
 | 
						|
	}
 | 
						|
 | 
						|
	// Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag
 | 
						|
	if user.HasRole(schema.RoleAdmin) && scope == "admin" ||
 | 
						|
		user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && scope == "global" ||
 | 
						|
		user.Username == scope {
 | 
						|
		// Create in DB
 | 
						|
		id, err := r.Repo.CreateTag(typeArg, name, scope)
 | 
						|
		if err != nil {
 | 
						|
			log.Warn("Error while creating tag")
 | 
						|
			return nil, err
 | 
						|
		}
 | 
						|
		return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil
 | 
						|
	} else {
 | 
						|
		log.Warnf("Not authorized to create tag with scope: %s", scope)
 | 
						|
		return nil, fmt.Errorf("not authorized to create tag with scope: %s", scope)
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// DeleteTag is the resolver for the deleteTag field.
 | 
						|
func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) {
 | 
						|
	// This Uses ID string <-> ID string, removeTagFromList uses []string <-> []int
 | 
						|
	panic(fmt.Errorf("not implemented: DeleteTag - deleteTag"))
 | 
						|
}
 | 
						|
 | 
						|
// AddTagsToJob is the resolver for the addTagsToJob field.
 | 
						|
func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
 | 
						|
	user := repository.GetUserFromContext(ctx)
 | 
						|
	if user == nil {
 | 
						|
		return nil, fmt.Errorf("no user in context")
 | 
						|
	}
 | 
						|
 | 
						|
	jid, err := strconv.ParseInt(job, 10, 64)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while adding tag to job")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	tags := []*schema.Tag{}
 | 
						|
	for _, tagId := range tagIds {
 | 
						|
		// Get ID
 | 
						|
		tid, err := strconv.ParseInt(tagId, 10, 64)
 | 
						|
		if err != nil {
 | 
						|
			log.Warn("Error while parsing tag id")
 | 
						|
			return nil, err
 | 
						|
		}
 | 
						|
 | 
						|
		// Test Exists
 | 
						|
		_, _, tscope, exists := r.Repo.TagInfo(tid)
 | 
						|
		if !exists {
 | 
						|
			log.Warnf("Tag does not exist (ID): %d", tid)
 | 
						|
			return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
 | 
						|
		}
 | 
						|
 | 
						|
		// Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag
 | 
						|
		if user.HasRole(schema.RoleAdmin) && tscope == "admin" ||
 | 
						|
			user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && tscope == "global" ||
 | 
						|
			user.Username == tscope {
 | 
						|
			// Add to Job
 | 
						|
			if tags, err = r.Repo.AddTag(user, jid, tid); err != nil {
 | 
						|
				log.Warn("Error while adding tag")
 | 
						|
				return nil, err
 | 
						|
			}
 | 
						|
		} else {
 | 
						|
			log.Warnf("Not authorized to add tag: %d", tid)
 | 
						|
			return nil, fmt.Errorf("not authorized to add tag: %d", tid)
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return tags, nil
 | 
						|
}
 | 
						|
 | 
						|
// RemoveTagsFromJob is the resolver for the removeTagsFromJob field.
 | 
						|
func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
 | 
						|
	user := repository.GetUserFromContext(ctx)
 | 
						|
	if user == nil {
 | 
						|
		return nil, fmt.Errorf("no user in context")
 | 
						|
	}
 | 
						|
 | 
						|
	jid, err := strconv.ParseInt(job, 10, 64)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while parsing job id")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	tags := []*schema.Tag{}
 | 
						|
	for _, tagId := range tagIds {
 | 
						|
		// Get ID
 | 
						|
		tid, err := strconv.ParseInt(tagId, 10, 64)
 | 
						|
		if err != nil {
 | 
						|
			log.Warn("Error while parsing tag id")
 | 
						|
			return nil, err
 | 
						|
		}
 | 
						|
 | 
						|
		// Test Exists
 | 
						|
		_, _, tscope, exists := r.Repo.TagInfo(tid)
 | 
						|
		if !exists {
 | 
						|
			log.Warnf("Tag does not exist (ID): %d", tid)
 | 
						|
			return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
 | 
						|
		}
 | 
						|
 | 
						|
		// Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag
 | 
						|
		if user.HasRole(schema.RoleAdmin) && tscope == "admin" ||
 | 
						|
			user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && tscope == "global" ||
 | 
						|
			user.Username == tscope {
 | 
						|
			// Remove from Job
 | 
						|
			if tags, err = r.Repo.RemoveTag(user, jid, tid); err != nil {
 | 
						|
				log.Warn("Error while removing tag")
 | 
						|
				return nil, err
 | 
						|
			}
 | 
						|
		} else {
 | 
						|
			log.Warnf("Not authorized to remove tag: %d", tid)
 | 
						|
			return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
 | 
						|
		}
 | 
						|
 | 
						|
	}
 | 
						|
 | 
						|
	return tags, nil
 | 
						|
}
 | 
						|
 | 
						|
// RemoveTagFromList is the resolver for the removeTagFromList field.
 | 
						|
func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []string) ([]int, error) {
 | 
						|
	// Needs Contextuser
 | 
						|
	user := repository.GetUserFromContext(ctx)
 | 
						|
	if user == nil {
 | 
						|
		return nil, fmt.Errorf("no user in context")
 | 
						|
	}
 | 
						|
 | 
						|
	tags := []int{}
 | 
						|
	for _, tagId := range tagIds {
 | 
						|
		// Get ID
 | 
						|
		tid, err := strconv.ParseInt(tagId, 10, 64)
 | 
						|
		if err != nil {
 | 
						|
			log.Warn("Error while parsing tag id for removal")
 | 
						|
			return nil, err
 | 
						|
		}
 | 
						|
 | 
						|
		// Test Exists
 | 
						|
		_, _, tscope, exists := r.Repo.TagInfo(tid)
 | 
						|
		if !exists {
 | 
						|
			log.Warnf("Tag does not exist (ID): %d", tid)
 | 
						|
			return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
 | 
						|
		}
 | 
						|
 | 
						|
		// Test Access: Admins && Admin Tag OR Everyone && Private Tag
 | 
						|
		if user.HasRole(schema.RoleAdmin) && (tscope == "global" || tscope == "admin") || user.Username == tscope {
 | 
						|
			// Remove from DB
 | 
						|
			if err = r.Repo.RemoveTagById(tid); err != nil {
 | 
						|
				log.Warn("Error while removing tag")
 | 
						|
				return nil, err
 | 
						|
			} else {
 | 
						|
				tags = append(tags, int(tid))
 | 
						|
			}
 | 
						|
		} else {
 | 
						|
			log.Warnf("Not authorized to remove tag: %d", tid)
 | 
						|
			return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
 | 
						|
		}
 | 
						|
	}
 | 
						|
	return tags, nil
 | 
						|
}
 | 
						|
 | 
						|
// UpdateConfiguration is the resolver for the updateConfiguration field.
 | 
						|
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
 | 
						|
	if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil {
 | 
						|
		log.Warn("Error while updating user config")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	return nil, nil
 | 
						|
}
 | 
						|
 | 
						|
// Clusters is the resolver for the clusters field.
 | 
						|
func (r *queryResolver) Clusters(ctx context.Context) ([]*schema.Cluster, error) {
 | 
						|
	return archive.Clusters, nil
 | 
						|
}
 | 
						|
 | 
						|
// Tags is the resolver for the tags field.
 | 
						|
func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) {
 | 
						|
	return r.Repo.GetTags(repository.GetUserFromContext(ctx), nil)
 | 
						|
}
 | 
						|
 | 
						|
// GlobalMetrics is the resolver for the globalMetrics field.
 | 
						|
func (r *queryResolver) GlobalMetrics(ctx context.Context) ([]*schema.GlobalMetricListItem, error) {
 | 
						|
	return archive.GlobalMetricList, nil
 | 
						|
}
 | 
						|
 | 
						|
// User is the resolver for the user field.
 | 
						|
func (r *queryResolver) User(ctx context.Context, username string) (*model.User, error) {
 | 
						|
	return repository.GetUserRepository().FetchUserInCtx(ctx, username)
 | 
						|
}
 | 
						|
 | 
						|
// AllocatedNodes is the resolver for the allocatedNodes field.
 | 
						|
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
 | 
						|
	data, err := r.Repo.AllocatedNodes(cluster)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while fetching allocated nodes")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	counts := make([]*model.Count, 0, len(data))
 | 
						|
	for subcluster, hosts := range data {
 | 
						|
		counts = append(counts, &model.Count{
 | 
						|
			Name:  subcluster,
 | 
						|
			Count: len(hosts),
 | 
						|
		})
 | 
						|
	}
 | 
						|
 | 
						|
	return counts, nil
 | 
						|
}
 | 
						|
 | 
						|
// Job is the resolver for the job field.
 | 
						|
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
 | 
						|
	numericId, err := strconv.ParseInt(id, 10, 64)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while parsing job id")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	job, err := r.Repo.FindById(ctx, numericId)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while finding job by id")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	if user := repository.GetUserFromContext(ctx); user != nil &&
 | 
						|
		job.User != user.Username &&
 | 
						|
		user.HasNotRoles([]schema.Role{schema.RoleAdmin, schema.RoleSupport, schema.RoleManager}) {
 | 
						|
		return nil, errors.New("you are not allowed to see this job")
 | 
						|
	}
 | 
						|
 | 
						|
	return job, nil
 | 
						|
}
 | 
						|
 | 
						|
// JobMetrics is the resolver for the jobMetrics field.
 | 
						|
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) {
 | 
						|
	if resolution == nil { // Load from Config
 | 
						|
		if config.Keys.EnableResampling != nil {
 | 
						|
			defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions)
 | 
						|
			resolution = &defaultRes
 | 
						|
		} else { // Set 0 (Loads configured metric timestep)
 | 
						|
			defaultRes := 0
 | 
						|
			resolution = &defaultRes
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	job, err := r.Query().Job(ctx, id)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while querying job for metrics")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while loading job data")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	res := []*model.JobMetricWithName{}
 | 
						|
	for name, md := range data {
 | 
						|
		for scope, metric := range md {
 | 
						|
			res = append(res, &model.JobMetricWithName{
 | 
						|
				Name:   name,
 | 
						|
				Scope:  scope,
 | 
						|
				Metric: metric,
 | 
						|
			})
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return res, err
 | 
						|
}
 | 
						|
 | 
						|
// JobStats is the resolver for the jobStats field.
 | 
						|
func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) {
 | 
						|
	job, err := r.Query().Job(ctx, id)
 | 
						|
	if err != nil {
 | 
						|
		log.Warnf("Error while querying job %s for metadata", id)
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
 | 
						|
	if err != nil {
 | 
						|
		log.Warnf("Error while loading jobStats data for job id %s", id)
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	res := []*model.NamedStats{}
 | 
						|
	for name, md := range data {
 | 
						|
		res = append(res, &model.NamedStats{
 | 
						|
			Name: name,
 | 
						|
			Data: &md,
 | 
						|
		})
 | 
						|
	}
 | 
						|
 | 
						|
	return res, err
 | 
						|
}
 | 
						|
 | 
						|
// ScopedJobStats is the resolver for the scopedJobStats field.
 | 
						|
func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) {
 | 
						|
	job, err := r.Query().Job(ctx, id)
 | 
						|
	if err != nil {
 | 
						|
		log.Warnf("Error while querying job %s for metadata", id)
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
 | 
						|
	if err != nil {
 | 
						|
		log.Warnf("Error while loading scopedJobStats data for job id %s", id)
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	res := make([]*model.NamedStatsWithScope, 0)
 | 
						|
	for name, scoped := range data {
 | 
						|
		for scope, stats := range scoped {
 | 
						|
 | 
						|
			mdlStats := make([]*model.ScopedStats, 0)
 | 
						|
			for _, stat := range stats {
 | 
						|
				mdlStats = append(mdlStats, &model.ScopedStats{
 | 
						|
					Hostname: stat.Hostname,
 | 
						|
					ID:       stat.Id,
 | 
						|
					Data:     stat.Data,
 | 
						|
				})
 | 
						|
			}
 | 
						|
 | 
						|
			res = append(res, &model.NamedStatsWithScope{
 | 
						|
				Name:  name,
 | 
						|
				Scope: scope,
 | 
						|
				Stats: mdlStats,
 | 
						|
			})
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return res, nil
 | 
						|
}
 | 
						|
 | 
						|
// Jobs is the resolver for the jobs field.
 | 
						|
func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) {
 | 
						|
	if page == nil {
 | 
						|
		page = &model.PageRequest{
 | 
						|
			ItemsPerPage: 50,
 | 
						|
			Page:         1,
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while querying jobs")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	count, err := r.Repo.CountJobs(ctx, filter)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while counting jobs")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	// Note: Even if App-Default 'config.Keys.UiDefaults["job_list_usePaging"]' is set, always return hasNextPage boolean.
 | 
						|
	// Users can decide in frontend to use continuous scroll, even if app-default is paging!
 | 
						|
	/*
 | 
						|
	  Example Page 4 @ 10 IpP : Does item 41 exist?
 | 
						|
	  Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists.
 | 
						|
	*/
 | 
						|
	nextPage := &model.PageRequest{
 | 
						|
		ItemsPerPage: 1,
 | 
						|
		Page:         ((page.Page * page.ItemsPerPage) + 1),
 | 
						|
	}
 | 
						|
	nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while querying next jobs")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	hasNextPage := len(nextJobs) == 1
 | 
						|
 | 
						|
	return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
 | 
						|
}
 | 
						|
 | 
						|
// JobsStatistics is the resolver for the jobsStatistics field.
 | 
						|
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) {
 | 
						|
	var err error
 | 
						|
	var stats []*model.JobsStatistics
 | 
						|
 | 
						|
	// Top Level Defaults
 | 
						|
	defaultDurationBins := "1h"
 | 
						|
	defaultMetricBins := 10
 | 
						|
 | 
						|
	if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
 | 
						|
		requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
 | 
						|
		if groupBy == nil {
 | 
						|
			stats, err = r.Repo.JobsStats(ctx, filter)
 | 
						|
		} else {
 | 
						|
			stats, err = r.Repo.JobsStatsGrouped(ctx, filter, page, sortBy, groupBy)
 | 
						|
		}
 | 
						|
	} else {
 | 
						|
		stats = make([]*model.JobsStatistics, 0, 1)
 | 
						|
		stats = append(stats, &model.JobsStatistics{})
 | 
						|
	}
 | 
						|
 | 
						|
	if groupBy != nil {
 | 
						|
		if requireField(ctx, "shortJobs") {
 | 
						|
			stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "short")
 | 
						|
		}
 | 
						|
		if requireField(ctx, "runningJobs") {
 | 
						|
			stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "running")
 | 
						|
		}
 | 
						|
	} else {
 | 
						|
		if requireField(ctx, "shortJobs") {
 | 
						|
			stats, err = r.Repo.AddJobCount(ctx, filter, stats, "short")
 | 
						|
		}
 | 
						|
		if requireField(ctx, "runningJobs") {
 | 
						|
			stats, err = r.Repo.AddJobCount(ctx, filter, stats, "running")
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	if err != nil {
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") || requireField(ctx, "histNumCores") || requireField(ctx, "histNumAccs") {
 | 
						|
 | 
						|
		if numDurationBins == nil {
 | 
						|
			numDurationBins = &defaultDurationBins
 | 
						|
		}
 | 
						|
 | 
						|
		if groupBy == nil {
 | 
						|
			stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0], numDurationBins)
 | 
						|
			if err != nil {
 | 
						|
				return nil, err
 | 
						|
			}
 | 
						|
		} else {
 | 
						|
			return nil, errors.New("histograms only implemented without groupBy argument")
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	if requireField(ctx, "histMetrics") {
 | 
						|
 | 
						|
		if numMetricBins == nil {
 | 
						|
			numMetricBins = &defaultMetricBins
 | 
						|
		}
 | 
						|
 | 
						|
		if groupBy == nil {
 | 
						|
			stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0], numMetricBins)
 | 
						|
			if err != nil {
 | 
						|
				return nil, err
 | 
						|
			}
 | 
						|
		} else {
 | 
						|
			return nil, errors.New("metric histograms only implemented without groupBy argument")
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return stats, nil
 | 
						|
}
 | 
						|
 | 
						|
// JobsMetricStats is the resolver for the jobsMetricStats field.
 | 
						|
func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.JobStats, error) {
 | 
						|
	// No Paging, Fixed Order by StartTime ASC
 | 
						|
	order := &model.OrderByInput{
 | 
						|
		Field: "startTime",
 | 
						|
		Type:  "col",
 | 
						|
		Order: "ASC",
 | 
						|
	}
 | 
						|
 | 
						|
	jobs, err := r.Repo.QueryJobs(ctx, filter, nil, order)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("Error while querying jobs for comparison")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	res := []*model.JobStats{}
 | 
						|
	for _, job := range jobs {
 | 
						|
		data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
 | 
						|
		if err != nil {
 | 
						|
			log.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
 | 
						|
			continue
 | 
						|
			// return nil, err
 | 
						|
		}
 | 
						|
 | 
						|
		sres := []*model.NamedStats{}
 | 
						|
		for name, md := range data {
 | 
						|
			sres = append(sres, &model.NamedStats{
 | 
						|
				Name: name,
 | 
						|
				Data: &md,
 | 
						|
			})
 | 
						|
		}
 | 
						|
 | 
						|
		numThreadsInt := int(job.NumHWThreads)
 | 
						|
		numAccsInt := int(job.NumAcc)
 | 
						|
		res = append(res, &model.JobStats{
 | 
						|
			ID:              int(job.ID),
 | 
						|
			JobID:           strconv.Itoa(int(job.JobID)),
 | 
						|
			StartTime:       int(job.StartTime.Unix()),
 | 
						|
			Duration:        int(job.Duration),
 | 
						|
			Cluster:         job.Cluster,
 | 
						|
			SubCluster:      job.SubCluster,
 | 
						|
			NumNodes:        int(job.NumNodes),
 | 
						|
			NumHWThreads:    &numThreadsInt,
 | 
						|
			NumAccelerators: &numAccsInt,
 | 
						|
			Stats:           sres,
 | 
						|
		})
 | 
						|
	}
 | 
						|
	return res, err
 | 
						|
}
 | 
						|
 | 
						|
// JobsFootprints is the resolver for the jobsFootprints field.
 | 
						|
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
 | 
						|
	// NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column!
 | 
						|
	return r.jobsFootprints(ctx, filter, metrics)
 | 
						|
}
 | 
						|
 | 
						|
// RooflineHeatmap is the resolver for the rooflineHeatmap field.
 | 
						|
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
 | 
						|
	return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
 | 
						|
}
 | 
						|
 | 
						|
// NodeMetrics is the resolver for the nodeMetrics field.
 | 
						|
func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) {
 | 
						|
	user := repository.GetUserFromContext(ctx)
 | 
						|
	if user != nil && !user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) {
 | 
						|
		return nil, errors.New("you need to be administrator or support staff for this query")
 | 
						|
	}
 | 
						|
 | 
						|
	if metrics == nil {
 | 
						|
		for _, mc := range archive.GetCluster(cluster).MetricConfig {
 | 
						|
			metrics = append(metrics, mc.Name)
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("error while loading node data")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	nodeMetrics := make([]*model.NodeMetrics, 0, len(data))
 | 
						|
	for hostname, metrics := range data {
 | 
						|
		host := &model.NodeMetrics{
 | 
						|
			Host:    hostname,
 | 
						|
			Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
 | 
						|
		}
 | 
						|
		host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
 | 
						|
		if err != nil {
 | 
						|
			log.Warnf("error in nodeMetrics resolver: %s", err)
 | 
						|
		}
 | 
						|
 | 
						|
		for metric, scopedMetrics := range metrics {
 | 
						|
			for _, scopedMetric := range scopedMetrics {
 | 
						|
				host.Metrics = append(host.Metrics, &model.JobMetricWithName{
 | 
						|
					Name:   metric,
 | 
						|
					Scope:  schema.MetricScopeNode,
 | 
						|
					Metric: scopedMetric,
 | 
						|
				})
 | 
						|
			}
 | 
						|
		}
 | 
						|
 | 
						|
		nodeMetrics = append(nodeMetrics, host)
 | 
						|
	}
 | 
						|
 | 
						|
	return nodeMetrics, nil
 | 
						|
}
 | 
						|
 | 
						|
// NodeMetricsList is the resolver for the nodeMetricsList field.
 | 
						|
func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) {
 | 
						|
	if resolution == nil { // Load from Config
 | 
						|
		if config.Keys.EnableResampling != nil {
 | 
						|
			defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions)
 | 
						|
			resolution = &defaultRes
 | 
						|
		} else { // Set 0 (Loads configured metric timestep)
 | 
						|
			defaultRes := 0
 | 
						|
			resolution = &defaultRes
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	user := repository.GetUserFromContext(ctx)
 | 
						|
	if user != nil && !user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) {
 | 
						|
		return nil, errors.New("you need to be administrator or support staff for this query")
 | 
						|
	}
 | 
						|
 | 
						|
	if metrics == nil {
 | 
						|
		for _, mc := range archive.GetCluster(cluster).MetricConfig {
 | 
						|
			metrics = append(metrics, mc.Name)
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx)
 | 
						|
	if err != nil {
 | 
						|
		log.Warn("error while loading node data")
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
 | 
						|
	nodeMetricsList := make([]*model.NodeMetrics, 0, len(data))
 | 
						|
	for hostname, metrics := range data {
 | 
						|
		host := &model.NodeMetrics{
 | 
						|
			Host:    hostname,
 | 
						|
			Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
 | 
						|
		}
 | 
						|
		host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
 | 
						|
		if err != nil {
 | 
						|
			log.Warnf("error in nodeMetrics resolver: %s", err)
 | 
						|
		}
 | 
						|
 | 
						|
		for metric, scopedMetrics := range metrics {
 | 
						|
			for scope, scopedMetric := range scopedMetrics {
 | 
						|
				host.Metrics = append(host.Metrics, &model.JobMetricWithName{
 | 
						|
					Name:   metric,
 | 
						|
					Scope:  scope,
 | 
						|
					Metric: scopedMetric,
 | 
						|
				})
 | 
						|
			}
 | 
						|
		}
 | 
						|
 | 
						|
		nodeMetricsList = append(nodeMetricsList, host)
 | 
						|
	}
 | 
						|
 | 
						|
	nodeMetricsListResult := &model.NodesResultList{
 | 
						|
		Items:       nodeMetricsList,
 | 
						|
		TotalNodes:  &totalNodes,
 | 
						|
		HasNextPage: &hasNextPage,
 | 
						|
	}
 | 
						|
 | 
						|
	return nodeMetricsListResult, nil
 | 
						|
}
 | 
						|
 | 
						|
// NumberOfNodes is the resolver for the numberOfNodes field.
 | 
						|
func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) {
 | 
						|
	nodeList, err := archive.ParseNodeList(obj.Nodes)
 | 
						|
	if err != nil {
 | 
						|
		return 0, err
 | 
						|
	}
 | 
						|
	return nodeList.NodeCount(), nil
 | 
						|
}
 | 
						|
 | 
						|
// Cluster returns generated.ClusterResolver implementation.
 | 
						|
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }
 | 
						|
 | 
						|
// Job returns generated.JobResolver implementation.
 | 
						|
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
 | 
						|
 | 
						|
// MetricValue returns generated.MetricValueResolver implementation.
 | 
						|
func (r *Resolver) MetricValue() generated.MetricValueResolver { return &metricValueResolver{r} }
 | 
						|
 | 
						|
// Mutation returns generated.MutationResolver implementation.
 | 
						|
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
 | 
						|
 | 
						|
// Query returns generated.QueryResolver implementation.
 | 
						|
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
 | 
						|
 | 
						|
// SubCluster returns generated.SubClusterResolver implementation.
 | 
						|
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
 | 
						|
 | 
						|
type (
 | 
						|
	clusterResolver     struct{ *Resolver }
 | 
						|
	jobResolver         struct{ *Resolver }
 | 
						|
	metricValueResolver struct{ *Resolver }
 | 
						|
	mutationResolver    struct{ *Resolver }
 | 
						|
	queryResolver       struct{ *Resolver }
 | 
						|
	subClusterResolver  struct{ *Resolver }
 | 
						|
)
 |