2021-03-31 07:23:48 +02:00
|
|
|
package graph
|
|
|
|
|
|
|
|
// This file will be automatically regenerated based on the schema, any resolver implementations
|
|
|
|
// will be copied through when generating and any unknown code will be moved to the end.
|
2024-07-11 16:58:12 +02:00
|
|
|
// Code generated by github.com/99designs/gqlgen version v0.17.49
|
2021-10-26 10:24:43 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-12-09 16:25:48 +01:00
|
|
|
"errors"
|
2021-10-26 10:24:43 +02:00
|
|
|
"fmt"
|
2024-09-27 13:45:44 +02:00
|
|
|
"regexp"
|
2024-09-24 11:13:39 +02:00
|
|
|
"slices"
|
2021-10-26 10:24:43 +02:00
|
|
|
"strconv"
|
2024-07-11 17:23:59 +02:00
|
|
|
"strings"
|
2021-12-09 16:25:48 +01:00
|
|
|
"time"
|
2021-10-26 10:24:43 +02:00
|
|
|
|
2024-03-26 16:27:04 +01:00
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
2022-06-21 17:52:36 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
2024-08-28 10:03:04 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
2022-09-05 17:46:38 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
2023-01-23 18:48:06 +01:00
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
2022-06-21 17:52:36 +02:00
|
|
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
2021-10-26 10:24:43 +02:00
|
|
|
)
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// Partitions is the resolver for the partitions field.
|
|
|
|
func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error) {
|
2022-03-14 10:24:27 +01:00
|
|
|
return r.Repo.Partitions(obj.Name)
|
2022-03-14 10:18:56 +01:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// Tags is the resolver for the tags field.
|
2022-03-15 11:04:54 +01:00
|
|
|
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
2024-08-01 18:59:24 +02:00
|
|
|
return r.Repo.GetTags(ctx, &obj.ID)
|
2022-03-15 11:04:54 +01:00
|
|
|
}
|
|
|
|
|
2023-05-16 12:42:06 +02:00
|
|
|
// ConcurrentJobs is the resolver for the concurrentJobs field.
|
2023-06-28 13:35:41 +02:00
|
|
|
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
|
2023-06-20 10:38:53 +02:00
|
|
|
if obj.State == schema.JobStateRunning {
|
|
|
|
obj.Duration = int32(time.Now().Unix() - obj.StartTimeUnix)
|
|
|
|
}
|
2023-05-16 12:42:06 +02:00
|
|
|
|
2023-06-20 10:38:53 +02:00
|
|
|
if obj.Exclusive != 1 && obj.Duration > 600 {
|
2023-06-22 06:26:19 +02:00
|
|
|
return r.Repo.FindConcurrentJobs(ctx, obj)
|
2023-05-16 12:42:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2024-06-28 17:08:28 +02:00
|
|
|
// Footprint is the resolver for the footprint field.
|
2024-07-11 17:23:59 +02:00
|
|
|
func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) {
|
|
|
|
rawFootprint, err := r.Repo.FetchFootprint(obj)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Error while fetching job footprint data")
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
res := []*model.FootprintValue{}
|
|
|
|
for name, value := range rawFootprint {
|
|
|
|
|
|
|
|
parts := strings.Split(name, "_")
|
|
|
|
statPart := parts[len(parts)-1]
|
|
|
|
nameParts := parts[:len(parts)-1]
|
|
|
|
|
|
|
|
res = append(res, &model.FootprintValue{
|
|
|
|
Name: strings.Join(nameParts, "_"),
|
|
|
|
Stat: statPart,
|
|
|
|
Value: value,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return res, err
|
2024-06-28 17:08:28 +02:00
|
|
|
}
|
|
|
|
|
2024-09-27 13:45:44 +02:00
|
|
|
// EnergyFootprint is the resolver for the energyFootprint field.
|
|
|
|
func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]*model.EnergyFootprintValue, error) {
|
|
|
|
rawEnergyFootprint, err := r.Repo.FetchEnergyFootprint(obj)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Error while fetching job energy footprint data")
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
res := []*model.EnergyFootprintValue{}
|
|
|
|
for name, value := range rawEnergyFootprint {
|
|
|
|
// Suboptimal: Nearly hardcoded metric name expectations
|
|
|
|
matchCpu := regexp.MustCompile(`cpu|Cpu|CPU`)
|
|
|
|
matchAcc := regexp.MustCompile(`acc|Acc|ACC`)
|
|
|
|
matchMem := regexp.MustCompile(`mem|Mem|MEM`)
|
|
|
|
matchCore := regexp.MustCompile(`core|Core|CORE`)
|
|
|
|
|
|
|
|
hwType := ""
|
|
|
|
switch test := name; { // NOtice ';' for var declaration
|
|
|
|
case matchCpu.MatchString(test):
|
|
|
|
hwType = "CPU"
|
|
|
|
case matchAcc.MatchString(test):
|
|
|
|
hwType = "Accelerator"
|
|
|
|
case matchMem.MatchString(test):
|
|
|
|
hwType = "Memory"
|
|
|
|
case matchCore.MatchString(test):
|
|
|
|
hwType = "Core"
|
|
|
|
default:
|
2024-09-30 12:27:32 +02:00
|
|
|
hwType = "Other"
|
2024-09-27 13:45:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
res = append(res, &model.EnergyFootprintValue{
|
|
|
|
Hardware: hwType,
|
|
|
|
Metric: name,
|
|
|
|
Value: value,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// MetaData is the resolver for the metaData field.
|
2024-07-11 16:58:12 +02:00
|
|
|
func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (any, error) {
|
2022-03-08 11:53:24 +01:00
|
|
|
return r.Repo.FetchMetadata(obj)
|
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// UserData is the resolver for the userData field.
|
2022-03-15 11:04:54 +01:00
|
|
|
func (r *jobResolver) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) {
|
2023-08-17 10:29:00 +02:00
|
|
|
return repository.GetUserRepository().FetchUserInCtx(ctx, obj.User)
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2024-06-28 17:08:28 +02:00
|
|
|
// Name is the resolver for the name field.
|
|
|
|
func (r *metricValueResolver) Name(ctx context.Context, obj *schema.MetricValue) (*string, error) {
|
|
|
|
panic(fmt.Errorf("not implemented: Name - name"))
|
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// CreateTag is the resolver for the createTag field.
|
2024-08-01 18:59:24 +02:00
|
|
|
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string, scope string) (*schema.Tag, error) {
|
|
|
|
id, err := r.Repo.CreateTag(typeArg, name, scope)
|
2021-10-26 10:24:43 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while creating tag")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-08-01 18:59:24 +02:00
|
|
|
return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// DeleteTag is the resolver for the deleteTag field.
|
2021-10-26 10:24:43 +02:00
|
|
|
func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) {
|
2022-09-07 12:24:45 +02:00
|
|
|
panic(fmt.Errorf("not implemented: DeleteTag - deleteTag"))
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// AddTagsToJob is the resolver for the addTagsToJob field.
|
2021-12-17 15:49:22 +01:00
|
|
|
func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
|
2024-08-01 18:59:24 +02:00
|
|
|
// Selectable Tags Pre-Filtered by Scope in Frontend: No backend check required
|
2022-02-17 09:04:57 +01:00
|
|
|
jid, err := strconv.ParseInt(job, 10, 64)
|
2021-10-26 10:24:43 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while adding tag to job")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-02-23 14:18:55 +01:00
|
|
|
tags := []*schema.Tag{}
|
2021-10-26 10:24:43 +02:00
|
|
|
for _, tagId := range tagIds {
|
2022-02-17 09:04:57 +01:00
|
|
|
tid, err := strconv.ParseInt(tagId, 10, 64)
|
2021-10-26 10:24:43 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while parsing tag id")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-08-01 18:59:24 +02:00
|
|
|
if tags, err = r.Repo.AddTag(ctx, jid, tid); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while adding tag")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-23 14:18:55 +01:00
|
|
|
return tags, nil
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// RemoveTagsFromJob is the resolver for the removeTagsFromJob field.
|
2021-12-17 15:49:22 +01:00
|
|
|
func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
|
2024-08-01 18:59:24 +02:00
|
|
|
// Removable Tags Pre-Filtered by Scope in Frontend: No backend check required
|
2022-02-17 09:20:57 +01:00
|
|
|
jid, err := strconv.ParseInt(job, 10, 64)
|
2021-10-26 10:24:43 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while parsing job id")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-02-23 14:18:55 +01:00
|
|
|
tags := []*schema.Tag{}
|
2021-10-26 10:24:43 +02:00
|
|
|
for _, tagId := range tagIds {
|
2022-02-17 09:20:57 +01:00
|
|
|
tid, err := strconv.ParseInt(tagId, 10, 64)
|
2021-10-26 10:24:43 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while parsing tag id")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-08-01 18:59:24 +02:00
|
|
|
if tags, err = r.Repo.RemoveTag(ctx, jid, tid); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while removing tag")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-23 14:18:55 +01:00
|
|
|
return tags, nil
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// UpdateConfiguration is the resolver for the updateConfiguration field.
|
2021-10-26 10:24:43 +02:00
|
|
|
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
|
2023-08-17 10:29:00 +02:00
|
|
|
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while updating user config")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// Clusters is the resolver for the clusters field.
|
|
|
|
func (r *queryResolver) Clusters(ctx context.Context) ([]*schema.Cluster, error) {
|
2022-09-05 17:46:38 +02:00
|
|
|
return archive.Clusters, nil
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// Tags is the resolver for the tags field.
|
2021-12-17 15:49:22 +01:00
|
|
|
func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) {
|
2024-08-01 18:59:24 +02:00
|
|
|
return r.Repo.GetTags(ctx, nil)
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2024-07-11 11:09:14 +02:00
|
|
|
// GlobalMetrics is the resolver for the globalMetrics field.
|
|
|
|
func (r *queryResolver) GlobalMetrics(ctx context.Context) ([]*schema.GlobalMetricListItem, error) {
|
|
|
|
return archive.GlobalMetricList, nil
|
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// User is the resolver for the user field.
|
2022-03-15 11:04:54 +01:00
|
|
|
func (r *queryResolver) User(ctx context.Context, username string) (*model.User, error) {
|
2023-08-17 10:29:00 +02:00
|
|
|
return repository.GetUserRepository().FetchUserInCtx(ctx, username)
|
2022-03-15 11:04:54 +01:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// AllocatedNodes is the resolver for the allocatedNodes field.
|
2022-03-24 16:08:47 +01:00
|
|
|
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
|
|
|
|
data, err := r.Repo.AllocatedNodes(cluster)
|
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while fetching allocated nodes")
|
2022-03-24 16:08:47 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
counts := make([]*model.Count, 0, len(data))
|
|
|
|
for subcluster, hosts := range data {
|
|
|
|
counts = append(counts, &model.Count{
|
|
|
|
Name: subcluster,
|
|
|
|
Count: len(hosts),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return counts, nil
|
2022-03-24 10:32:08 +01:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// Job is the resolver for the job field.
|
2021-12-17 15:49:22 +01:00
|
|
|
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
|
2022-02-17 09:04:57 +01:00
|
|
|
numericId, err := strconv.ParseInt(id, 10, 64)
|
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while parsing job id")
|
2022-02-17 09:04:57 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-07-03 17:24:26 +02:00
|
|
|
job, err := r.Repo.FindById(ctx, numericId)
|
2022-02-17 09:04:57 +01:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while finding job by id")
|
2022-02-17 09:04:57 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-08-17 10:29:00 +02:00
|
|
|
if user := repository.GetUserFromContext(ctx); user != nil &&
|
|
|
|
job.User != user.Username &&
|
|
|
|
user.HasNotRoles([]schema.Role{schema.RoleAdmin, schema.RoleSupport, schema.RoleManager}) {
|
2022-02-17 09:04:57 +01:00
|
|
|
return nil, errors.New("you are not allowed to see this job")
|
2021-12-17 15:49:22 +01:00
|
|
|
}
|
|
|
|
|
2022-02-17 09:04:57 +01:00
|
|
|
return job, nil
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// JobMetrics is the resolver for the jobMetrics field.
|
2024-08-16 14:50:31 +02:00
|
|
|
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) {
|
2024-10-08 10:42:13 +02:00
|
|
|
|
|
|
|
if resolution == nil { // Load from Config
|
|
|
|
if config.Keys.EnableResampling != nil {
|
|
|
|
defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions)
|
|
|
|
resolution = &defaultRes
|
|
|
|
} else { // Set 0 (Loads configured metric timestep)
|
|
|
|
defaultRes := 0
|
|
|
|
resolution = &defaultRes
|
|
|
|
}
|
2024-08-16 14:50:31 +02:00
|
|
|
}
|
|
|
|
|
2021-10-26 10:24:43 +02:00
|
|
|
job, err := r.Query().Job(ctx, id)
|
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while querying job for metrics")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-09-05 17:26:43 +02:00
|
|
|
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
|
2021-10-26 10:24:43 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while loading job data")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
res := []*model.JobMetricWithName{}
|
|
|
|
for name, md := range data {
|
2023-03-24 14:43:37 +01:00
|
|
|
for scope, metric := range md {
|
2021-12-20 10:48:58 +01:00
|
|
|
res = append(res, &model.JobMetricWithName{
|
|
|
|
Name: name,
|
2023-03-24 14:43:37 +01:00
|
|
|
Scope: scope,
|
2021-12-20 10:48:58 +01:00
|
|
|
Metric: metric,
|
|
|
|
})
|
|
|
|
}
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// JobsFootprints is the resolver for the jobsFootprints field.
|
2023-06-09 09:09:41 +02:00
|
|
|
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
|
2021-10-26 10:24:43 +02:00
|
|
|
return r.jobsFootprints(ctx, filter, metrics)
|
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// Jobs is the resolver for the jobs field.
|
2023-06-09 09:09:41 +02:00
|
|
|
func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) {
|
2022-02-22 09:19:59 +01:00
|
|
|
if page == nil {
|
|
|
|
page = &model.PageRequest{
|
|
|
|
ItemsPerPage: 50,
|
|
|
|
Page: 1,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-17 09:04:57 +01:00
|
|
|
jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
|
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while querying jobs")
|
2022-02-17 09:04:57 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
count, err := r.Repo.CountJobs(ctx, filter)
|
2021-10-26 10:24:43 +02:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while counting jobs")
|
2021-10-26 10:24:43 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-03-26 16:27:04 +01:00
|
|
|
if !config.Keys.UiDefaults["job_list_usePaging"].(bool) {
|
|
|
|
hasNextPage := false
|
2024-04-22 11:29:31 +02:00
|
|
|
// page.Page += 1 : Simple, but expensive
|
|
|
|
// Example Page 4 @ 10 IpP : Does item 41 exist?
|
|
|
|
// Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists.
|
|
|
|
nextPage := &model.PageRequest{
|
|
|
|
ItemsPerPage: 1,
|
|
|
|
Page: ((page.Page * page.ItemsPerPage) + 1),
|
|
|
|
}
|
2024-03-26 16:27:04 +01:00
|
|
|
|
2024-04-22 11:29:31 +02:00
|
|
|
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
|
2024-03-26 16:27:04 +01:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("Error while querying next jobs")
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-04-22 11:29:31 +02:00
|
|
|
|
|
|
|
if len(nextJobs) == 1 {
|
2024-03-26 16:27:04 +01:00
|
|
|
hasNextPage = true
|
|
|
|
}
|
|
|
|
|
|
|
|
return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
|
|
|
|
} else {
|
|
|
|
return &model.JobResultList{Items: jobs, Count: &count}, nil
|
|
|
|
}
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// JobsStatistics is the resolver for the jobsStatistics field.
|
2023-12-01 13:22:01 +01:00
|
|
|
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
|
2023-06-07 11:58:58 +02:00
|
|
|
var err error
|
|
|
|
var stats []*model.JobsStatistics
|
|
|
|
|
2023-08-29 17:38:17 +02:00
|
|
|
if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
|
|
|
|
requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
|
2023-06-09 09:09:41 +02:00
|
|
|
if groupBy == nil {
|
|
|
|
stats, err = r.Repo.JobsStats(ctx, filter)
|
2023-06-07 11:58:58 +02:00
|
|
|
} else {
|
2023-08-25 13:14:34 +02:00
|
|
|
stats, err = r.Repo.JobsStatsGrouped(ctx, filter, page, sortBy, groupBy)
|
2023-06-07 11:58:58 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
stats = make([]*model.JobsStatistics, 0, 1)
|
2023-08-24 14:55:49 +02:00
|
|
|
stats = append(stats, &model.JobsStatistics{})
|
2023-06-07 11:58:58 +02:00
|
|
|
}
|
|
|
|
|
2023-06-09 09:09:41 +02:00
|
|
|
if groupBy != nil {
|
|
|
|
if requireField(ctx, "shortJobs") {
|
|
|
|
stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "short")
|
|
|
|
}
|
2023-08-24 14:55:49 +02:00
|
|
|
if requireField(ctx, "runningJobs") {
|
2023-06-09 09:09:41 +02:00
|
|
|
stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "running")
|
|
|
|
}
|
|
|
|
} else {
|
2023-06-09 13:15:25 +02:00
|
|
|
if requireField(ctx, "shortJobs") {
|
|
|
|
stats, err = r.Repo.AddJobCount(ctx, filter, stats, "short")
|
|
|
|
}
|
2023-08-24 14:55:49 +02:00
|
|
|
if requireField(ctx, "runningJobs") {
|
2023-06-09 13:15:25 +02:00
|
|
|
stats, err = r.Repo.AddJobCount(ctx, filter, stats, "running")
|
|
|
|
}
|
2023-06-09 09:09:41 +02:00
|
|
|
}
|
|
|
|
|
2023-06-07 11:58:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-08-29 14:02:23 +02:00
|
|
|
if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") || requireField(ctx, "histNumCores") || requireField(ctx, "histNumAccs") {
|
2023-06-07 11:58:58 +02:00
|
|
|
if groupBy == nil {
|
|
|
|
stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return nil, errors.New("histograms only implemented without groupBy argument")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-01 13:22:01 +01:00
|
|
|
if requireField(ctx, "histMetrics") {
|
|
|
|
if groupBy == nil {
|
|
|
|
stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return nil, errors.New("metric histograms only implemented without groupBy argument")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-07 11:58:58 +02:00
|
|
|
return stats, nil
|
2021-10-26 10:24:43 +02:00
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// RooflineHeatmap is the resolver for the rooflineHeatmap field.
|
2022-09-12 13:33:01 +02:00
|
|
|
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
|
2021-10-26 10:24:43 +02:00
|
|
|
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
|
|
|
|
}
|
|
|
|
|
2022-09-07 12:24:45 +02:00
|
|
|
// NodeMetrics is the resolver for the nodeMetrics field.
|
2022-03-24 14:34:42 +01:00
|
|
|
func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) {
|
2023-08-17 10:29:00 +02:00
|
|
|
user := repository.GetUserFromContext(ctx)
|
|
|
|
if user != nil && !user.HasRole(schema.RoleAdmin) {
|
2021-12-09 16:25:48 +01:00
|
|
|
return nil, errors.New("you need to be an administrator for this query")
|
|
|
|
}
|
|
|
|
|
2022-02-02 13:59:08 +01:00
|
|
|
if metrics == nil {
|
2022-09-05 17:46:38 +02:00
|
|
|
for _, mc := range archive.GetCluster(cluster).MetricConfig {
|
2022-02-02 13:59:08 +01:00
|
|
|
metrics = append(metrics, mc.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-28 10:03:04 +02:00
|
|
|
data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
2022-02-02 13:05:21 +01:00
|
|
|
if err != nil {
|
2023-01-31 18:28:44 +01:00
|
|
|
log.Warn("Error while loading node data")
|
2022-02-02 13:05:21 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeMetrics := make([]*model.NodeMetrics, 0, len(data))
|
|
|
|
for hostname, metrics := range data {
|
|
|
|
host := &model.NodeMetrics{
|
|
|
|
Host: hostname,
|
|
|
|
Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
|
|
|
|
}
|
2022-09-05 17:46:38 +02:00
|
|
|
host.SubCluster, _ = archive.GetSubClusterByNode(cluster, hostname)
|
2022-02-02 13:05:21 +01:00
|
|
|
|
|
|
|
for metric, scopedMetrics := range metrics {
|
|
|
|
for _, scopedMetric := range scopedMetrics {
|
|
|
|
host.Metrics = append(host.Metrics, &model.JobMetricWithName{
|
|
|
|
Name: metric,
|
2023-03-31 17:18:16 +02:00
|
|
|
Scope: schema.MetricScopeNode,
|
2022-02-02 13:05:21 +01:00
|
|
|
Metric: scopedMetric,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeMetrics = append(nodeMetrics, host)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nodeMetrics, nil
|
2021-12-09 16:25:48 +01:00
|
|
|
}
|
|
|
|
|
2023-03-24 15:10:23 +01:00
|
|
|
// NumberOfNodes is the resolver for the numberOfNodes field.
|
|
|
|
func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) {
|
2023-03-30 15:21:35 +02:00
|
|
|
nodeList, err := archive.ParseNodeList(obj.Nodes)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2023-04-06 11:38:10 +02:00
|
|
|
return nodeList.NodeCount(), nil
|
2023-03-24 15:10:23 +01:00
|
|
|
}
|
|
|
|
|
2022-03-14 10:18:56 +01:00
|
|
|
// Cluster returns generated.ClusterResolver implementation.
|
|
|
|
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }
|
|
|
|
|
2021-10-26 10:24:43 +02:00
|
|
|
// Job returns generated.JobResolver implementation.
|
|
|
|
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
|
|
|
|
|
2024-06-28 17:08:28 +02:00
|
|
|
// MetricValue returns generated.MetricValueResolver implementation.
|
|
|
|
func (r *Resolver) MetricValue() generated.MetricValueResolver { return &metricValueResolver{r} }
|
|
|
|
|
2021-10-26 10:24:43 +02:00
|
|
|
// Mutation returns generated.MutationResolver implementation.
|
|
|
|
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
|
|
|
|
|
|
|
|
// Query returns generated.QueryResolver implementation.
|
|
|
|
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
|
|
|
|
|
2023-03-24 15:10:23 +01:00
|
|
|
// SubCluster returns generated.SubClusterResolver implementation.
|
|
|
|
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
|
|
|
|
|
2024-09-24 11:13:39 +02:00
|
|
|
type clusterResolver struct{ *Resolver }
|
|
|
|
type jobResolver struct{ *Resolver }
|
|
|
|
type metricValueResolver struct{ *Resolver }
|
|
|
|
type mutationResolver struct{ *Resolver }
|
|
|
|
type queryResolver struct{ *Resolver }
|
|
|
|
type subClusterResolver struct{ *Resolver }
|