subclusters instead of slurm partitions

This commit is contained in:
Lou Knauer
2022-03-14 10:18:56 +01:00
parent 2651b96499
commit 85ad6d9543
15 changed files with 868 additions and 561 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@ type Cluster struct {
Name string `json:"name"`
MetricConfig []*MetricConfig `json:"metricConfig"`
FilterRanges *FilterRanges `json:"filterRanges"`
Partitions []*Partition `json:"partitions"`
SubClusters []*SubCluster `json:"subClusters"`
// NOT part of the API:
MetricDataRepository *MetricDataRepository `json:"metricDataRepository"`

View File

@@ -122,8 +122,16 @@ type PageRequest struct {
Page int `json:"page"`
}
type Partition struct {
type StringInput struct {
Eq *string `json:"eq"`
Contains *string `json:"contains"`
StartsWith *string `json:"startsWith"`
EndsWith *string `json:"endsWith"`
}
type SubCluster struct {
Name string `json:"name"`
Nodes string `json:"nodes"`
ProcessorType string `json:"processorType"`
SocketsPerNode int `json:"socketsPerNode"`
CoresPerSocket int `json:"coresPerSocket"`
@@ -134,13 +142,6 @@ type Partition struct {
Topology *Topology `json:"topology"`
}
type StringInput struct {
Eq *string `json:"eq"`
Contains *string `json:"contains"`
StartsWith *string `json:"startsWith"`
EndsWith *string `json:"endsWith"`
}
type TimeRange struct {
From *time.Time `json:"from"`
To *time.Time `json:"to"`

View File

@@ -33,11 +33,12 @@ type Cluster {
name: String!
metricConfig: [MetricConfig!]!
filterRanges: FilterRanges!
partitions: [Partition!]!
subClusters: [SubCluster!]!
}
type Partition {
type SubCluster {
name: String!
nodes: String!
processorType: String!
socketsPerNode: Int!
coresPerSocket: Int!

View File

@@ -18,6 +18,10 @@ import (
"github.com/ClusterCockpit/cc-backend/schema"
)
func (r *clusterResolver) SubClusters(ctx context.Context, obj *model.Cluster) ([]*model.SubCluster, error) {
panic(fmt.Errorf("not implemented"))
}
func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) {
return r.Repo.FetchMetadata(obj)
}
@@ -204,7 +208,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, partiti
}
if metrics == nil {
for _, mc := range config.GetClusterConfig(cluster).MetricConfig {
for _, mc := range config.GetCluster(cluster).MetricConfig {
metrics = append(metrics, mc.Name)
}
}
@@ -236,6 +240,9 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, partiti
return nodeMetrics, nil
}
// Cluster returns generated.ClusterResolver implementation.
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }
// Job returns generated.JobResolver implementation.
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
@@ -245,6 +252,7 @@ func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResol
// Query returns generated.QueryResolver implementation.
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
type clusterResolver struct{ *Resolver }
type jobResolver struct{ *Resolver }
type mutationResolver struct{ *Resolver }
type queryResolver struct{ *Resolver }

View File

@@ -32,8 +32,8 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
// `socketsPerNode` and `coresPerSocket` can differ from cluster to cluster, so we need to explicitly loop over those.
for _, cluster := range config.Clusters {
for _, partition := range cluster.Partitions {
corehoursCol := fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes * %d * %d) / 3600) as int)", partition.SocketsPerNode, partition.CoresPerSocket)
for _, subcluster := range cluster.SubClusters {
corehoursCol := fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes * %d * %d) / 3600) as int)", subcluster.SocketsPerNode, subcluster.CoresPerSocket)
var query sq.SelectBuilder
if groupBy == nil {
query = sq.Select(
@@ -54,7 +54,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
query = query.
Where("job.cluster = ?", cluster.Name).
Where("job.partition = ?", partition.Name)
Where("job.subcluster = ?", subcluster.Name)
query = repository.SecurityCheck(ctx, query)
for _, f := range filter {