mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-02-11 13:31:45 +01:00
add subCluster jobFilter for statusDetail queries
This commit is contained in:
@@ -429,7 +429,7 @@ type TimeRangeOutput {
|
|||||||
input NodeFilter {
|
input NodeFilter {
|
||||||
hostname: StringInput
|
hostname: StringInput
|
||||||
cluster: StringInput
|
cluster: StringInput
|
||||||
subcluster: StringInput
|
subCluster: StringInput
|
||||||
schedulerState: SchedulerState
|
schedulerState: SchedulerState
|
||||||
healthState: MonitoringState
|
healthState: MonitoringState
|
||||||
timeStart: Int
|
timeStart: Int
|
||||||
@@ -444,6 +444,7 @@ input JobFilter {
|
|||||||
project: StringInput
|
project: StringInput
|
||||||
jobName: StringInput
|
jobName: StringInput
|
||||||
cluster: StringInput
|
cluster: StringInput
|
||||||
|
subCluster: StringInput
|
||||||
partition: StringInput
|
partition: StringInput
|
||||||
duration: IntRange
|
duration: IntRange
|
||||||
energy: FloatRange
|
energy: FloatRange
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -4,8 +4,6 @@ github.com/99designs/gqlgen v0.17.85 h1:EkGx3U2FDcxQm8YDLQSpXIAVmpDyZ3IcBMOJi2nH
|
|||||||
github.com/99designs/gqlgen v0.17.85/go.mod h1:yvs8s0bkQlRfqg03YXr3eR4OQUowVhODT/tHzCXnbOU=
|
github.com/99designs/gqlgen v0.17.85/go.mod h1:yvs8s0bkQlRfqg03YXr3eR4OQUowVhODT/tHzCXnbOU=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.0 h1:gqMsh7zsJMUhaXviXzaZ3gqXcLVgerjRJHzIcwX4FmQ=
|
|
||||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.0/go.mod h1:JuxMAuEOaLLNEnnL9U3ejha8kMvsSatLdKPZEgJw6iw=
|
|
||||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.1 h1:iCVas+Jc61zFH5S2VG3H1sc7tsn+U4lOJwUYjYZEims=
|
github.com/ClusterCockpit/cc-lib/v2 v2.2.1 h1:iCVas+Jc61zFH5S2VG3H1sc7tsn+U4lOJwUYjYZEims=
|
||||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.1/go.mod h1:JuxMAuEOaLLNEnnL9U3ejha8kMvsSatLdKPZEgJw6iw=
|
github.com/ClusterCockpit/cc-lib/v2 v2.2.1/go.mod h1:JuxMAuEOaLLNEnnL9U3ejha8kMvsSatLdKPZEgJw6iw=
|
||||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||||
|
|||||||
@@ -2712,7 +2712,7 @@ type TimeRangeOutput {
|
|||||||
input NodeFilter {
|
input NodeFilter {
|
||||||
hostname: StringInput
|
hostname: StringInput
|
||||||
cluster: StringInput
|
cluster: StringInput
|
||||||
subcluster: StringInput
|
subCluster: StringInput
|
||||||
schedulerState: SchedulerState
|
schedulerState: SchedulerState
|
||||||
healthState: MonitoringState
|
healthState: MonitoringState
|
||||||
timeStart: Int
|
timeStart: Int
|
||||||
@@ -2727,6 +2727,7 @@ input JobFilter {
|
|||||||
project: StringInput
|
project: StringInput
|
||||||
jobName: StringInput
|
jobName: StringInput
|
||||||
cluster: StringInput
|
cluster: StringInput
|
||||||
|
subCluster: StringInput
|
||||||
partition: StringInput
|
partition: StringInput
|
||||||
duration: IntRange
|
duration: IntRange
|
||||||
energy: FloatRange
|
energy: FloatRange
|
||||||
@@ -13199,7 +13200,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any
|
|||||||
asMap[k] = v
|
asMap[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
fieldsInOrder := [...]string{"tags", "dbId", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "shared", "schedule", "node"}
|
fieldsInOrder := [...]string{"tags", "dbId", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "subCluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "shared", "schedule", "node"}
|
||||||
for _, k := range fieldsInOrder {
|
for _, k := range fieldsInOrder {
|
||||||
v, ok := asMap[k]
|
v, ok := asMap[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -13262,6 +13263,13 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any
|
|||||||
return it, err
|
return it, err
|
||||||
}
|
}
|
||||||
it.Cluster = data
|
it.Cluster = data
|
||||||
|
case "subCluster":
|
||||||
|
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subCluster"))
|
||||||
|
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||||
|
if err != nil {
|
||||||
|
return it, err
|
||||||
|
}
|
||||||
|
it.SubCluster = data
|
||||||
case "partition":
|
case "partition":
|
||||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition"))
|
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition"))
|
||||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||||
@@ -13400,7 +13408,7 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an
|
|||||||
asMap[k] = v
|
asMap[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
fieldsInOrder := [...]string{"hostname", "cluster", "subcluster", "schedulerState", "healthState", "timeStart"}
|
fieldsInOrder := [...]string{"hostname", "cluster", "subCluster", "schedulerState", "healthState", "timeStart"}
|
||||||
for _, k := range fieldsInOrder {
|
for _, k := range fieldsInOrder {
|
||||||
v, ok := asMap[k]
|
v, ok := asMap[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -13421,13 +13429,13 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an
|
|||||||
return it, err
|
return it, err
|
||||||
}
|
}
|
||||||
it.Cluster = data
|
it.Cluster = data
|
||||||
case "subcluster":
|
case "subCluster":
|
||||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subcluster"))
|
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subCluster"))
|
||||||
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return it, err
|
return it, err
|
||||||
}
|
}
|
||||||
it.Subcluster = data
|
it.SubCluster = data
|
||||||
case "schedulerState":
|
case "schedulerState":
|
||||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("schedulerState"))
|
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("schedulerState"))
|
||||||
data, err := ec.unmarshalOSchedulerState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋv2ᚋschemaᚐSchedulerState(ctx, v)
|
data, err := ec.unmarshalOSchedulerState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋv2ᚋschemaᚐSchedulerState(ctx, v)
|
||||||
|
|||||||
@@ -71,6 +71,7 @@ type JobFilter struct {
|
|||||||
Project *StringInput `json:"project,omitempty"`
|
Project *StringInput `json:"project,omitempty"`
|
||||||
JobName *StringInput `json:"jobName,omitempty"`
|
JobName *StringInput `json:"jobName,omitempty"`
|
||||||
Cluster *StringInput `json:"cluster,omitempty"`
|
Cluster *StringInput `json:"cluster,omitempty"`
|
||||||
|
SubCluster *StringInput `json:"subCluster,omitempty"`
|
||||||
Partition *StringInput `json:"partition,omitempty"`
|
Partition *StringInput `json:"partition,omitempty"`
|
||||||
Duration *config.IntRange `json:"duration,omitempty"`
|
Duration *config.IntRange `json:"duration,omitempty"`
|
||||||
Energy *FloatRange `json:"energy,omitempty"`
|
Energy *FloatRange `json:"energy,omitempty"`
|
||||||
@@ -186,7 +187,7 @@ type NamedStatsWithScope struct {
|
|||||||
type NodeFilter struct {
|
type NodeFilter struct {
|
||||||
Hostname *StringInput `json:"hostname,omitempty"`
|
Hostname *StringInput `json:"hostname,omitempty"`
|
||||||
Cluster *StringInput `json:"cluster,omitempty"`
|
Cluster *StringInput `json:"cluster,omitempty"`
|
||||||
Subcluster *StringInput `json:"subcluster,omitempty"`
|
SubCluster *StringInput `json:"subCluster,omitempty"`
|
||||||
SchedulerState *schema.SchedulerState `json:"schedulerState,omitempty"`
|
SchedulerState *schema.SchedulerState `json:"schedulerState,omitempty"`
|
||||||
HealthState *string `json:"healthState,omitempty"`
|
HealthState *string `json:"healthState,omitempty"`
|
||||||
TimeStart *int `json:"timeStart,omitempty"`
|
TimeStart *int `json:"timeStart,omitempty"`
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ func (ccms *CCMetricStore) buildQueries(
|
|||||||
// Similar to buildQueries but uses full node topology instead of job-allocated resources.
|
// Similar to buildQueries but uses full node topology instead of job-allocated resources.
|
||||||
//
|
//
|
||||||
// The function handles:
|
// The function handles:
|
||||||
// - Subcluster topology resolution (either pre-loaded or per-node lookup)
|
// - SubCluster topology resolution (either pre-loaded or per-node lookup)
|
||||||
// - Full node hardware thread lists (not job-specific subsets)
|
// - Full node hardware thread lists (not job-specific subsets)
|
||||||
// - All accelerators on each node
|
// - All accelerators on each node
|
||||||
// - Metric configuration validation with subcluster filtering
|
// - Metric configuration validation with subcluster filtering
|
||||||
|
|||||||
@@ -190,6 +190,9 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
|||||||
if filter.Cluster != nil {
|
if filter.Cluster != nil {
|
||||||
query = buildStringCondition("job.cluster", filter.Cluster, query)
|
query = buildStringCondition("job.cluster", filter.Cluster, query)
|
||||||
}
|
}
|
||||||
|
if filter.SubCluster != nil {
|
||||||
|
query = buildStringCondition("job.subcluster", filter.SubCluster, query)
|
||||||
|
}
|
||||||
if filter.Partition != nil {
|
if filter.Partition != nil {
|
||||||
query = buildStringCondition("job.cluster_partition", filter.Partition, query)
|
query = buildStringCondition("job.cluster_partition", filter.Partition, query)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -254,8 +254,8 @@ func (r *NodeRepository) QueryNodes(
|
|||||||
if f.Cluster != nil {
|
if f.Cluster != nil {
|
||||||
query = buildStringCondition("cluster", f.Cluster, query)
|
query = buildStringCondition("cluster", f.Cluster, query)
|
||||||
}
|
}
|
||||||
if f.Subcluster != nil {
|
if f.SubCluster != nil {
|
||||||
query = buildStringCondition("subcluster", f.Subcluster, query)
|
query = buildStringCondition("subcluster", f.SubCluster, query)
|
||||||
}
|
}
|
||||||
if f.Hostname != nil {
|
if f.Hostname != nil {
|
||||||
query = buildStringCondition("hostname", f.Hostname, query)
|
query = buildStringCondition("hostname", f.Hostname, query)
|
||||||
@@ -322,8 +322,8 @@ func (r *NodeRepository) CountNodes(
|
|||||||
if f.Cluster != nil {
|
if f.Cluster != nil {
|
||||||
query = buildStringCondition("cluster", f.Cluster, query)
|
query = buildStringCondition("cluster", f.Cluster, query)
|
||||||
}
|
}
|
||||||
if f.Subcluster != nil {
|
if f.SubCluster != nil {
|
||||||
query = buildStringCondition("subcluster", f.Subcluster, query)
|
query = buildStringCondition("subcluster", f.SubCluster, query)
|
||||||
}
|
}
|
||||||
if f.Hostname != nil {
|
if f.Hostname != nil {
|
||||||
query = buildStringCondition("hostname", f.Hostname, query)
|
query = buildStringCondition("hostname", f.Hostname, query)
|
||||||
@@ -440,8 +440,8 @@ func (r *NodeRepository) CountStates(ctx context.Context, filters []*model.NodeF
|
|||||||
if f.Cluster != nil {
|
if f.Cluster != nil {
|
||||||
query = buildStringCondition("cluster", f.Cluster, query)
|
query = buildStringCondition("cluster", f.Cluster, query)
|
||||||
}
|
}
|
||||||
if f.Subcluster != nil {
|
if f.SubCluster != nil {
|
||||||
query = buildStringCondition("subcluster", f.Subcluster, query)
|
query = buildStringCondition("subcluster", f.SubCluster, query)
|
||||||
}
|
}
|
||||||
if f.SchedulerState != nil {
|
if f.SchedulerState != nil {
|
||||||
query = query.Where("node_state = ?", f.SchedulerState)
|
query = query.Where("node_state = ?", f.SchedulerState)
|
||||||
@@ -504,8 +504,8 @@ func (r *NodeRepository) CountStatesTimed(ctx context.Context, filters []*model.
|
|||||||
if f.Cluster != nil {
|
if f.Cluster != nil {
|
||||||
query = buildStringCondition("cluster", f.Cluster, query)
|
query = buildStringCondition("cluster", f.Cluster, query)
|
||||||
}
|
}
|
||||||
if f.Subcluster != nil {
|
if f.SubCluster != nil {
|
||||||
query = buildStringCondition("subcluster", f.Subcluster, query)
|
query = buildStringCondition("subcluster", f.SubCluster, query)
|
||||||
}
|
}
|
||||||
if f.SchedulerState != nil {
|
if f.SchedulerState != nil {
|
||||||
query = query.Where("node_state = ?", f.SchedulerState)
|
query = query.Where("node_state = ?", f.SchedulerState)
|
||||||
@@ -573,7 +573,7 @@ func (r *NodeRepository) GetNodesForList(
|
|||||||
queryFilters = append(queryFilters, &model.NodeFilter{Cluster: &model.StringInput{Eq: &cluster}})
|
queryFilters = append(queryFilters, &model.NodeFilter{Cluster: &model.StringInput{Eq: &cluster}})
|
||||||
}
|
}
|
||||||
if subCluster != "" {
|
if subCluster != "" {
|
||||||
queryFilters = append(queryFilters, &model.NodeFilter{Subcluster: &model.StringInput{Eq: &subCluster}})
|
queryFilters = append(queryFilters, &model.NodeFilter{SubCluster: &model.StringInput{Eq: &subCluster}})
|
||||||
}
|
}
|
||||||
if nodeFilter != "" && stateFilter != "notindb" {
|
if nodeFilter != "" && stateFilter != "notindb" {
|
||||||
queryFilters = append(queryFilters, &model.NodeFilter{Hostname: &model.StringInput{Contains: &nodeFilter}})
|
queryFilters = append(queryFilters, &model.NodeFilter{Hostname: &model.StringInput{Contains: &nodeFilter}})
|
||||||
|
|||||||
@@ -196,7 +196,7 @@ func (r *JobRepository) buildStatsQuery(
|
|||||||
// - filter: Filters to apply (time range, cluster, job state, etc.)
|
// - filter: Filters to apply (time range, cluster, job state, etc.)
|
||||||
// - page: Optional pagination (ItemsPerPage: -1 disables pagination)
|
// - page: Optional pagination (ItemsPerPage: -1 disables pagination)
|
||||||
// - sortBy: Optional sort column (totalJobs, totalWalltime, totalCoreHours, etc.)
|
// - sortBy: Optional sort column (totalJobs, totalWalltime, totalCoreHours, etc.)
|
||||||
// - groupBy: Required grouping dimension (User, Project, Cluster, or Subcluster)
|
// - groupBy: Required grouping dimension (User, Project, Cluster, or SubCluster)
|
||||||
//
|
//
|
||||||
// Returns a slice of JobsStatistics, one per group, with:
|
// Returns a slice of JobsStatistics, one per group, with:
|
||||||
// - ID: The group identifier (username, project name, cluster name, etc.)
|
// - ID: The group identifier (username, project name, cluster name, etc.)
|
||||||
@@ -420,7 +420,7 @@ func LoadJobStat(job *schema.Job, metric string, statType string) float64 {
|
|||||||
// Parameters:
|
// Parameters:
|
||||||
// - ctx: Context for security checks
|
// - ctx: Context for security checks
|
||||||
// - filter: Filters to apply
|
// - filter: Filters to apply
|
||||||
// - groupBy: Grouping dimension (User, Project, Cluster, or Subcluster)
|
// - groupBy: Grouping dimension (User, Project, Cluster, or SubCluster)
|
||||||
//
|
//
|
||||||
// Returns JobsStatistics with only ID and TotalJobs populated for each group.
|
// Returns JobsStatistics with only ID and TotalJobs populated for each group.
|
||||||
func (r *JobRepository) JobCountGrouped(
|
func (r *JobRepository) JobCountGrouped(
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func RegisterFootprintWorker() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// NOTE: Additional Subcluster Loop Could Allow For Limited List Of Footprint-Metrics Only.
|
// NOTE: Additional SubCluster Loop Could Allow For Limited List Of Footprint-Metrics Only.
|
||||||
// - Chunk-Size Would Then Be 'SubCluster' (Running Jobs, Transactions) as Lists Can Change Within SCs
|
// - Chunk-Size Would Then Be 'SubCluster' (Running Jobs, Transactions) as Lists Can Change Within SCs
|
||||||
// - Would Require Review of 'updateFootprint' Usage (Logic Could Possibly Be Included Here Completely)
|
// - Would Require Review of 'updateFootprint' Usage (Logic Could Possibly Be Included Here Completely)
|
||||||
allMetrics := make([]string, 0)
|
allMetrics := make([]string, 0)
|
||||||
|
|||||||
@@ -59,7 +59,7 @@
|
|||||||
const canvasPrefix = $derived(`${presetCluster}-${presetSubCluster ? presetSubCluster : ''}`)
|
const canvasPrefix = $derived(`${presetCluster}-${presetSubCluster ? presetSubCluster : ''}`)
|
||||||
|
|
||||||
const statusFilter = $derived(presetSubCluster
|
const statusFilter = $derived(presetSubCluster
|
||||||
? [{ state: ["running"] }, { cluster: { eq: presetCluster} }, { partition: { eq: presetSubCluster } }]
|
? [{ state: ["running"] }, { cluster: { eq: presetCluster} }, { subCluster: { eq: presetSubCluster } }]
|
||||||
: [{ state: ["running"] }, { cluster: { eq: presetCluster} }]
|
: [{ state: ["running"] }, { cluster: { eq: presetCluster} }]
|
||||||
);
|
);
|
||||||
const topJobsQuery = $derived(queryStore({
|
const topJobsQuery = $derived(queryStore({
|
||||||
|
|||||||
Reference in New Issue
Block a user