mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-02-11 13:31:45 +01:00
add subCluster jobFilter for statusDetail queries
This commit is contained in:
@@ -429,7 +429,7 @@ type TimeRangeOutput {
|
||||
input NodeFilter {
|
||||
hostname: StringInput
|
||||
cluster: StringInput
|
||||
subcluster: StringInput
|
||||
subCluster: StringInput
|
||||
schedulerState: SchedulerState
|
||||
healthState: MonitoringState
|
||||
timeStart: Int
|
||||
@@ -444,6 +444,7 @@ input JobFilter {
|
||||
project: StringInput
|
||||
jobName: StringInput
|
||||
cluster: StringInput
|
||||
subCluster: StringInput
|
||||
partition: StringInput
|
||||
duration: IntRange
|
||||
energy: FloatRange
|
||||
|
||||
2
go.sum
2
go.sum
@@ -4,8 +4,6 @@ github.com/99designs/gqlgen v0.17.85 h1:EkGx3U2FDcxQm8YDLQSpXIAVmpDyZ3IcBMOJi2nH
|
||||
github.com/99designs/gqlgen v0.17.85/go.mod h1:yvs8s0bkQlRfqg03YXr3eR4OQUowVhODT/tHzCXnbOU=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.0 h1:gqMsh7zsJMUhaXviXzaZ3gqXcLVgerjRJHzIcwX4FmQ=
|
||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.0/go.mod h1:JuxMAuEOaLLNEnnL9U3ejha8kMvsSatLdKPZEgJw6iw=
|
||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.1 h1:iCVas+Jc61zFH5S2VG3H1sc7tsn+U4lOJwUYjYZEims=
|
||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.1/go.mod h1:JuxMAuEOaLLNEnnL9U3ejha8kMvsSatLdKPZEgJw6iw=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
|
||||
@@ -2712,7 +2712,7 @@ type TimeRangeOutput {
|
||||
input NodeFilter {
|
||||
hostname: StringInput
|
||||
cluster: StringInput
|
||||
subcluster: StringInput
|
||||
subCluster: StringInput
|
||||
schedulerState: SchedulerState
|
||||
healthState: MonitoringState
|
||||
timeStart: Int
|
||||
@@ -2727,6 +2727,7 @@ input JobFilter {
|
||||
project: StringInput
|
||||
jobName: StringInput
|
||||
cluster: StringInput
|
||||
subCluster: StringInput
|
||||
partition: StringInput
|
||||
duration: IntRange
|
||||
energy: FloatRange
|
||||
@@ -13199,7 +13200,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any
|
||||
asMap[k] = v
|
||||
}
|
||||
|
||||
fieldsInOrder := [...]string{"tags", "dbId", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "shared", "schedule", "node"}
|
||||
fieldsInOrder := [...]string{"tags", "dbId", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "subCluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "shared", "schedule", "node"}
|
||||
for _, k := range fieldsInOrder {
|
||||
v, ok := asMap[k]
|
||||
if !ok {
|
||||
@@ -13262,6 +13263,13 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any
|
||||
return it, err
|
||||
}
|
||||
it.Cluster = data
|
||||
case "subCluster":
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subCluster"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
it.SubCluster = data
|
||||
case "partition":
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
@@ -13400,7 +13408,7 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an
|
||||
asMap[k] = v
|
||||
}
|
||||
|
||||
fieldsInOrder := [...]string{"hostname", "cluster", "subcluster", "schedulerState", "healthState", "timeStart"}
|
||||
fieldsInOrder := [...]string{"hostname", "cluster", "subCluster", "schedulerState", "healthState", "timeStart"}
|
||||
for _, k := range fieldsInOrder {
|
||||
v, ok := asMap[k]
|
||||
if !ok {
|
||||
@@ -13421,13 +13429,13 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an
|
||||
return it, err
|
||||
}
|
||||
it.Cluster = data
|
||||
case "subcluster":
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subcluster"))
|
||||
case "subCluster":
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subCluster"))
|
||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
it.Subcluster = data
|
||||
it.SubCluster = data
|
||||
case "schedulerState":
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("schedulerState"))
|
||||
data, err := ec.unmarshalOSchedulerState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋv2ᚋschemaᚐSchedulerState(ctx, v)
|
||||
|
||||
@@ -71,6 +71,7 @@ type JobFilter struct {
|
||||
Project *StringInput `json:"project,omitempty"`
|
||||
JobName *StringInput `json:"jobName,omitempty"`
|
||||
Cluster *StringInput `json:"cluster,omitempty"`
|
||||
SubCluster *StringInput `json:"subCluster,omitempty"`
|
||||
Partition *StringInput `json:"partition,omitempty"`
|
||||
Duration *config.IntRange `json:"duration,omitempty"`
|
||||
Energy *FloatRange `json:"energy,omitempty"`
|
||||
@@ -186,7 +187,7 @@ type NamedStatsWithScope struct {
|
||||
type NodeFilter struct {
|
||||
Hostname *StringInput `json:"hostname,omitempty"`
|
||||
Cluster *StringInput `json:"cluster,omitempty"`
|
||||
Subcluster *StringInput `json:"subcluster,omitempty"`
|
||||
SubCluster *StringInput `json:"subCluster,omitempty"`
|
||||
SchedulerState *schema.SchedulerState `json:"schedulerState,omitempty"`
|
||||
HealthState *string `json:"healthState,omitempty"`
|
||||
TimeStart *int `json:"timeStart,omitempty"`
|
||||
|
||||
@@ -149,7 +149,7 @@ func (ccms *CCMetricStore) buildQueries(
|
||||
// Similar to buildQueries but uses full node topology instead of job-allocated resources.
|
||||
//
|
||||
// The function handles:
|
||||
// - Subcluster topology resolution (either pre-loaded or per-node lookup)
|
||||
// - SubCluster topology resolution (either pre-loaded or per-node lookup)
|
||||
// - Full node hardware thread lists (not job-specific subsets)
|
||||
// - All accelerators on each node
|
||||
// - Metric configuration validation with subcluster filtering
|
||||
|
||||
@@ -190,6 +190,9 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
||||
if filter.Cluster != nil {
|
||||
query = buildStringCondition("job.cluster", filter.Cluster, query)
|
||||
}
|
||||
if filter.SubCluster != nil {
|
||||
query = buildStringCondition("job.subcluster", filter.SubCluster, query)
|
||||
}
|
||||
if filter.Partition != nil {
|
||||
query = buildStringCondition("job.cluster_partition", filter.Partition, query)
|
||||
}
|
||||
|
||||
@@ -254,8 +254,8 @@ func (r *NodeRepository) QueryNodes(
|
||||
if f.Cluster != nil {
|
||||
query = buildStringCondition("cluster", f.Cluster, query)
|
||||
}
|
||||
if f.Subcluster != nil {
|
||||
query = buildStringCondition("subcluster", f.Subcluster, query)
|
||||
if f.SubCluster != nil {
|
||||
query = buildStringCondition("subcluster", f.SubCluster, query)
|
||||
}
|
||||
if f.Hostname != nil {
|
||||
query = buildStringCondition("hostname", f.Hostname, query)
|
||||
@@ -322,8 +322,8 @@ func (r *NodeRepository) CountNodes(
|
||||
if f.Cluster != nil {
|
||||
query = buildStringCondition("cluster", f.Cluster, query)
|
||||
}
|
||||
if f.Subcluster != nil {
|
||||
query = buildStringCondition("subcluster", f.Subcluster, query)
|
||||
if f.SubCluster != nil {
|
||||
query = buildStringCondition("subcluster", f.SubCluster, query)
|
||||
}
|
||||
if f.Hostname != nil {
|
||||
query = buildStringCondition("hostname", f.Hostname, query)
|
||||
@@ -440,8 +440,8 @@ func (r *NodeRepository) CountStates(ctx context.Context, filters []*model.NodeF
|
||||
if f.Cluster != nil {
|
||||
query = buildStringCondition("cluster", f.Cluster, query)
|
||||
}
|
||||
if f.Subcluster != nil {
|
||||
query = buildStringCondition("subcluster", f.Subcluster, query)
|
||||
if f.SubCluster != nil {
|
||||
query = buildStringCondition("subcluster", f.SubCluster, query)
|
||||
}
|
||||
if f.SchedulerState != nil {
|
||||
query = query.Where("node_state = ?", f.SchedulerState)
|
||||
@@ -504,8 +504,8 @@ func (r *NodeRepository) CountStatesTimed(ctx context.Context, filters []*model.
|
||||
if f.Cluster != nil {
|
||||
query = buildStringCondition("cluster", f.Cluster, query)
|
||||
}
|
||||
if f.Subcluster != nil {
|
||||
query = buildStringCondition("subcluster", f.Subcluster, query)
|
||||
if f.SubCluster != nil {
|
||||
query = buildStringCondition("subcluster", f.SubCluster, query)
|
||||
}
|
||||
if f.SchedulerState != nil {
|
||||
query = query.Where("node_state = ?", f.SchedulerState)
|
||||
@@ -573,7 +573,7 @@ func (r *NodeRepository) GetNodesForList(
|
||||
queryFilters = append(queryFilters, &model.NodeFilter{Cluster: &model.StringInput{Eq: &cluster}})
|
||||
}
|
||||
if subCluster != "" {
|
||||
queryFilters = append(queryFilters, &model.NodeFilter{Subcluster: &model.StringInput{Eq: &subCluster}})
|
||||
queryFilters = append(queryFilters, &model.NodeFilter{SubCluster: &model.StringInput{Eq: &subCluster}})
|
||||
}
|
||||
if nodeFilter != "" && stateFilter != "notindb" {
|
||||
queryFilters = append(queryFilters, &model.NodeFilter{Hostname: &model.StringInput{Contains: &nodeFilter}})
|
||||
|
||||
@@ -196,7 +196,7 @@ func (r *JobRepository) buildStatsQuery(
|
||||
// - filter: Filters to apply (time range, cluster, job state, etc.)
|
||||
// - page: Optional pagination (ItemsPerPage: -1 disables pagination)
|
||||
// - sortBy: Optional sort column (totalJobs, totalWalltime, totalCoreHours, etc.)
|
||||
// - groupBy: Required grouping dimension (User, Project, Cluster, or Subcluster)
|
||||
// - groupBy: Required grouping dimension (User, Project, Cluster, or SubCluster)
|
||||
//
|
||||
// Returns a slice of JobsStatistics, one per group, with:
|
||||
// - ID: The group identifier (username, project name, cluster name, etc.)
|
||||
@@ -420,7 +420,7 @@ func LoadJobStat(job *schema.Job, metric string, statType string) float64 {
|
||||
// Parameters:
|
||||
// - ctx: Context for security checks
|
||||
// - filter: Filters to apply
|
||||
// - groupBy: Grouping dimension (User, Project, Cluster, or Subcluster)
|
||||
// - groupBy: Grouping dimension (User, Project, Cluster, or SubCluster)
|
||||
//
|
||||
// Returns JobsStatistics with only ID and TotalJobs populated for each group.
|
||||
func (r *JobRepository) JobCountGrouped(
|
||||
|
||||
@@ -49,7 +49,7 @@ func RegisterFootprintWorker() {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// NOTE: Additional Subcluster Loop Could Allow For Limited List Of Footprint-Metrics Only.
|
||||
// NOTE: Additional SubCluster Loop Could Allow For Limited List Of Footprint-Metrics Only.
|
||||
// - Chunk-Size Would Then Be 'SubCluster' (Running Jobs, Transactions) as Lists Can Change Within SCs
|
||||
// - Would Require Review of 'updateFootprint' Usage (Logic Could Possibly Be Included Here Completely)
|
||||
allMetrics := make([]string, 0)
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
const canvasPrefix = $derived(`${presetCluster}-${presetSubCluster ? presetSubCluster : ''}`)
|
||||
|
||||
const statusFilter = $derived(presetSubCluster
|
||||
? [{ state: ["running"] }, { cluster: { eq: presetCluster} }, { partition: { eq: presetSubCluster } }]
|
||||
? [{ state: ["running"] }, { cluster: { eq: presetCluster} }, { subCluster: { eq: presetSubCluster } }]
|
||||
: [{ state: ["running"] }, { cluster: { eq: presetCluster} }]
|
||||
);
|
||||
const topJobsQuery = $derived(queryStore({
|
||||
|
||||
Reference in New Issue
Block a user