package graph

// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.49

import (
	"context"
	"errors"
	"fmt"
	"regexp"
	"slices"
	"strconv"
	"strings"
	"time"

	"github.com/ClusterCockpit/cc-backend/internal/config"
	"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
	"github.com/ClusterCockpit/cc-backend/internal/graph/model"
	"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
	"github.com/ClusterCockpit/cc-backend/internal/repository"
	"github.com/ClusterCockpit/cc-backend/pkg/archive"
	"github.com/ClusterCockpit/cc-backend/pkg/log"
	"github.com/ClusterCockpit/cc-backend/pkg/schema"
)

// Partitions is the resolver for the partitions field.
func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error) {
	return r.Repo.Partitions(obj.Name)
}

// Tags is the resolver for the tags field.
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
	return r.Repo.GetTags(repository.GetUserFromContext(ctx), &obj.ID)
}

// ConcurrentJobs is the resolver for the concurrentJobs field.
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
	if obj.State == schema.JobStateRunning {
		obj.Duration = int32(time.Now().Unix() - obj.StartTimeUnix)
	}

	if obj.Exclusive != 1 && obj.Duration > 600 {
		return r.Repo.FindConcurrentJobs(ctx, obj)
	}

	return nil, nil
}

// Footprint is the resolver for the footprint field.
func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) {
	rawFootprint, err := r.Repo.FetchFootprint(obj)
	if err != nil {
		log.Warn("Error while fetching job footprint data")
		return nil, err
	}

	res := []*model.FootprintValue{}
	for name, value := range rawFootprint {

		parts := strings.Split(name, "_")
		statPart := parts[len(parts)-1]
		nameParts := parts[:len(parts)-1]

		res = append(res, &model.FootprintValue{
			Name:  strings.Join(nameParts, "_"),
			Stat:  statPart,
			Value: value,
		})
	}

	return res, err
}

// EnergyFootprint is the resolver for the energyFootprint field.
func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]*model.EnergyFootprintValue, error) {
	rawEnergyFootprint, err := r.Repo.FetchEnergyFootprint(obj)
	if err != nil {
		log.Warn("Error while fetching job energy footprint data")
		return nil, err
	}

	res := []*model.EnergyFootprintValue{}
	for name, value := range rawEnergyFootprint {
		// Suboptimal: Nearly hardcoded metric name expectations
		matchCpu := regexp.MustCompile(`cpu|Cpu|CPU`)
		matchAcc := regexp.MustCompile(`acc|Acc|ACC`)
		matchMem := regexp.MustCompile(`mem|Mem|MEM`)
		matchCore := regexp.MustCompile(`core|Core|CORE`)

		hwType := ""
		switch test := name; { // NOtice ';' for var declaration
		case matchCpu.MatchString(test):
			hwType = "CPU"
		case matchAcc.MatchString(test):
			hwType = "Accelerator"
		case matchMem.MatchString(test):
			hwType = "Memory"
		case matchCore.MatchString(test):
			hwType = "Core"
		default:
			hwType = "Other"
		}

		res = append(res, &model.EnergyFootprintValue{
			Hardware: hwType,
			Metric:   name,
			Value:    value,
		})
	}
	return res, err
}

// MetaData is the resolver for the metaData field.
func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (any, error) {
	return r.Repo.FetchMetadata(obj)
}

// UserData is the resolver for the userData field.
func (r *jobResolver) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) {
	return repository.GetUserRepository().FetchUserInCtx(ctx, obj.User)
}

// Name is the resolver for the name field.
func (r *metricValueResolver) Name(ctx context.Context, obj *schema.MetricValue) (*string, error) {
	panic(fmt.Errorf("not implemented: Name - name"))
}

// CreateTag is the resolver for the createTag field.
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string, scope string) (*schema.Tag, error) {
	id, err := r.Repo.CreateTag(typeArg, name, scope)
	if err != nil {
		log.Warn("Error while creating tag")
		return nil, err
	}

	return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil
}

// DeleteTag is the resolver for the deleteTag field.
func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) {
	panic(fmt.Errorf("not implemented: DeleteTag - deleteTag"))
}

// AddTagsToJob is the resolver for the addTagsToJob field.
func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
	// Selectable Tags Pre-Filtered by Scope in Frontend: No backend check required
	jid, err := strconv.ParseInt(job, 10, 64)
	if err != nil {
		log.Warn("Error while adding tag to job")
		return nil, err
	}

	tags := []*schema.Tag{}
	for _, tagId := range tagIds {
		tid, err := strconv.ParseInt(tagId, 10, 64)
		if err != nil {
			log.Warn("Error while parsing tag id")
			return nil, err
		}

		if tags, err = r.Repo.AddTag(repository.GetUserFromContext(ctx), jid, tid); err != nil {
			log.Warn("Error while adding tag")
			return nil, err
		}
	}

	return tags, nil
}

// RemoveTagsFromJob is the resolver for the removeTagsFromJob field.
func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
	// Removable Tags Pre-Filtered by Scope in Frontend: No backend check required
	jid, err := strconv.ParseInt(job, 10, 64)
	if err != nil {
		log.Warn("Error while parsing job id")
		return nil, err
	}

	tags := []*schema.Tag{}
	for _, tagId := range tagIds {
		tid, err := strconv.ParseInt(tagId, 10, 64)
		if err != nil {
			log.Warn("Error while parsing tag id")
			return nil, err
		}

		if tags, err = r.Repo.RemoveTag(repository.GetUserFromContext(ctx), jid, tid); err != nil {
			log.Warn("Error while removing tag")
			return nil, err
		}
	}

	return tags, nil
}

// UpdateConfiguration is the resolver for the updateConfiguration field.
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
	if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil {
		log.Warn("Error while updating user config")
		return nil, err
	}

	return nil, nil
}

// Clusters is the resolver for the clusters field.
func (r *queryResolver) Clusters(ctx context.Context) ([]*schema.Cluster, error) {
	return archive.Clusters, nil
}

// Tags is the resolver for the tags field.
func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) {
	return r.Repo.GetTags(repository.GetUserFromContext(ctx), nil)
}

// GlobalMetrics is the resolver for the globalMetrics field.
func (r *queryResolver) GlobalMetrics(ctx context.Context) ([]*schema.GlobalMetricListItem, error) {
	return archive.GlobalMetricList, nil
}

// User is the resolver for the user field.
func (r *queryResolver) User(ctx context.Context, username string) (*model.User, error) {
	return repository.GetUserRepository().FetchUserInCtx(ctx, username)
}

// AllocatedNodes is the resolver for the allocatedNodes field.
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
	data, err := r.Repo.AllocatedNodes(cluster)
	if err != nil {
		log.Warn("Error while fetching allocated nodes")
		return nil, err
	}

	counts := make([]*model.Count, 0, len(data))
	for subcluster, hosts := range data {
		counts = append(counts, &model.Count{
			Name:  subcluster,
			Count: len(hosts),
		})
	}

	return counts, nil
}

// Job is the resolver for the job field.
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
	numericId, err := strconv.ParseInt(id, 10, 64)
	if err != nil {
		log.Warn("Error while parsing job id")
		return nil, err
	}

	job, err := r.Repo.FindById(ctx, numericId)
	if err != nil {
		log.Warn("Error while finding job by id")
		return nil, err
	}

	if user := repository.GetUserFromContext(ctx); user != nil &&
		job.User != user.Username &&
		user.HasNotRoles([]schema.Role{schema.RoleAdmin, schema.RoleSupport, schema.RoleManager}) {
		return nil, errors.New("you are not allowed to see this job")
	}

	return job, nil
}

// JobMetrics is the resolver for the jobMetrics field.
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) {
	if resolution == nil { // Load from Config
		if config.Keys.EnableResampling != nil {
			defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions)
			resolution = &defaultRes
		} else { // Set 0 (Loads configured metric timestep)
			defaultRes := 0
			resolution = &defaultRes
		}
	}

	job, err := r.Query().Job(ctx, id)
	if err != nil {
		log.Warn("Error while querying job for metrics")
		return nil, err
	}

	data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
	if err != nil {
		log.Warn("Error while loading job data")
		return nil, err
	}

	res := []*model.JobMetricWithName{}
	for name, md := range data {
		for scope, metric := range md {
			res = append(res, &model.JobMetricWithName{
				Name:   name,
				Scope:  scope,
				Metric: metric,
			})
		}
	}

	return res, err
}

// JobsFootprints is the resolver for the jobsFootprints field.
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
	return r.jobsFootprints(ctx, filter, metrics)
}

// Jobs is the resolver for the jobs field.
func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) {
	if page == nil {
		page = &model.PageRequest{
			ItemsPerPage: 50,
			Page:         1,
		}
	}

	jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
	if err != nil {
		log.Warn("Error while querying jobs")
		return nil, err
	}

	count, err := r.Repo.CountJobs(ctx, filter)
	if err != nil {
		log.Warn("Error while counting jobs")
		return nil, err
	}

	if !config.Keys.UiDefaults["job_list_usePaging"].(bool) {
		hasNextPage := false
		// page.Page += 1 		   : Simple, but expensive
		// Example Page 4 @ 10 IpP : Does item 41 exist?
		// Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists.
		nextPage := &model.PageRequest{
			ItemsPerPage: 1,
			Page:         ((page.Page * page.ItemsPerPage) + 1),
		}

		nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
		if err != nil {
			log.Warn("Error while querying next jobs")
			return nil, err
		}

		if len(nextJobs) == 1 {
			hasNextPage = true
		}

		return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
	} else {
		return &model.JobResultList{Items: jobs, Count: &count}, nil
	}
}

// JobsStatistics is the resolver for the jobsStatistics field.
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
	var err error
	var stats []*model.JobsStatistics

	if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
		requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
		if groupBy == nil {
			stats, err = r.Repo.JobsStats(ctx, filter)
		} else {
			stats, err = r.Repo.JobsStatsGrouped(ctx, filter, page, sortBy, groupBy)
		}
	} else {
		stats = make([]*model.JobsStatistics, 0, 1)
		stats = append(stats, &model.JobsStatistics{})
	}

	if groupBy != nil {
		if requireField(ctx, "shortJobs") {
			stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "short")
		}
		if requireField(ctx, "runningJobs") {
			stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "running")
		}
	} else {
		if requireField(ctx, "shortJobs") {
			stats, err = r.Repo.AddJobCount(ctx, filter, stats, "short")
		}
		if requireField(ctx, "runningJobs") {
			stats, err = r.Repo.AddJobCount(ctx, filter, stats, "running")
		}
	}

	if err != nil {
		return nil, err
	}

	if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") || requireField(ctx, "histNumCores") || requireField(ctx, "histNumAccs") {
		if groupBy == nil {
			stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0])
			if err != nil {
				return nil, err
			}
		} else {
			return nil, errors.New("histograms only implemented without groupBy argument")
		}
	}

	if requireField(ctx, "histMetrics") {
		if groupBy == nil {
			stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0])
			if err != nil {
				return nil, err
			}
		} else {
			return nil, errors.New("metric histograms only implemented without groupBy argument")
		}
	}

	return stats, nil
}

// RooflineHeatmap is the resolver for the rooflineHeatmap field.
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
	return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
}

// NodeMetrics is the resolver for the nodeMetrics field.
func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) {
	user := repository.GetUserFromContext(ctx)
	if user != nil && !user.HasRole(schema.RoleAdmin) {
		return nil, errors.New("you need to be an administrator for this query")
	}

	if metrics == nil {
		for _, mc := range archive.GetCluster(cluster).MetricConfig {
			metrics = append(metrics, mc.Name)
		}
	}

	data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
	if err != nil {
		log.Warn("Error while loading node data")
		return nil, err
	}

	nodeMetrics := make([]*model.NodeMetrics, 0, len(data))
	for hostname, metrics := range data {
		host := &model.NodeMetrics{
			Host:    hostname,
			Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
		}
		host.SubCluster, _ = archive.GetSubClusterByNode(cluster, hostname)

		for metric, scopedMetrics := range metrics {
			for _, scopedMetric := range scopedMetrics {
				host.Metrics = append(host.Metrics, &model.JobMetricWithName{
					Name:   metric,
					Scope:  schema.MetricScopeNode,
					Metric: scopedMetric,
				})
			}
		}

		nodeMetrics = append(nodeMetrics, host)
	}

	return nodeMetrics, nil
}

// NumberOfNodes is the resolver for the numberOfNodes field.
func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) {
	nodeList, err := archive.ParseNodeList(obj.Nodes)
	if err != nil {
		return 0, err
	}
	return nodeList.NodeCount(), nil
}

// Cluster returns generated.ClusterResolver implementation.
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }

// Job returns generated.JobResolver implementation.
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }

// MetricValue returns generated.MetricValueResolver implementation.
func (r *Resolver) MetricValue() generated.MetricValueResolver { return &metricValueResolver{r} }

// Mutation returns generated.MutationResolver implementation.
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }

// Query returns generated.QueryResolver implementation.
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }

// SubCluster returns generated.SubClusterResolver implementation.
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }

type (
	clusterResolver     struct{ *Resolver }
	jobResolver         struct{ *Resolver }
	metricValueResolver struct{ *Resolver }
	mutationResolver    struct{ *Resolver }
	queryResolver       struct{ *Resolver }
	subClusterResolver  struct{ *Resolver }
)