diff --git a/api/schema.graphqls b/api/schema.graphqls
index c19fc64..d05c658 100644
--- a/api/schema.graphqls
+++ b/api/schema.graphqls
@@ -12,12 +12,13 @@ type Node {
hostname: String!
cluster: String!
subCluster: String!
+ runningJobs: Int!
nodeState: NodeState!
- HealthState: MonitoringState!
+ healthState: MonitoringState!
metaData: Any
}
-type NodeStats {
+type NodeStates {
state: String!
count: Int!
}
@@ -236,10 +237,12 @@ enum Aggregate {
USER
PROJECT
CLUSTER
+ SUBCLUSTER
}
enum SortByAggregate {
TOTALWALLTIME
TOTALJOBS
+ TOTALUSERS
TOTALNODES
TOTALNODEHOURS
TOTALCORES
@@ -300,9 +303,10 @@ type Query {
user(username: String!): User
allocatedNodes(cluster: String!): [Count!]!
+ ## Node Queries New
node(id: ID!): Node
nodes(filter: [NodeFilter!], order: OrderByInput): NodeStateResultList!
- nodeStats(filter: [NodeFilter!]): [NodeStats!]!
+ nodeStates(filter: [NodeFilter!]): [NodeStates!]!
job(id: ID!): Job
jobMetrics(
@@ -357,6 +361,7 @@ type Query {
from: Time!
to: Time!
): [NodeMetrics!]!
+
nodeMetricsList(
cluster: String!
subCluster: String!
@@ -393,6 +398,7 @@ type TimeRangeOutput {
input NodeFilter {
hostname: StringInput
cluster: StringInput
+ subcluster: StringInput
nodeState: NodeState
healthState: MonitoringState
}
@@ -497,11 +503,12 @@ type MetricHistoPoint {
}
type JobsStatistics {
- id: ID! # If `groupBy` was used, ID of the user/project/cluster
+ id: ID! # If `groupBy` was used, ID of the user/project/cluster/subcluster
name: String! # if User-Statistics: Given Name of Account (ID) Owner
+ totalUsers: Int! # if *not* User-Statistics: Number of active users (based on running jobs)
totalJobs: Int! # Number of jobs
runningJobs: Int! # Number of running jobs
- shortJobs: Int! # Number of jobs with a duration of less than duration
+ shortJobs: Int! # Number of jobs with a duration of less than config'd ShortRunningJobsDuration
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
totalNodes: Int! # Sum of the nodes of all matched jobs
totalNodeHours: Int! # Sum of the node hours of all matched jobs
diff --git a/internal/api/job.go b/internal/api/job.go
index 9367bcc..2ce2a3a 100644
--- a/internal/api/job.go
+++ b/internal/api/job.go
@@ -113,6 +113,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
for key, vals := range r.URL.Query() {
switch key {
+ // TODO: add project filter
case "state":
for _, s := range vals {
state := schema.JobState(s)
@@ -125,7 +126,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
}
case "cluster":
filter.Cluster = &model.StringInput{Eq: &vals[0]}
- case "start-time":
+ case "start-time": // ?startTime=1753707480-1754053139
st := strings.Split(vals[0], "-")
if len(st) != 2 {
handleError(fmt.Errorf("invalid query parameter value: startTime"),
diff --git a/internal/auth/auth.go b/internal/auth/auth.go
index 5a80f7c..78c66ae 100644
--- a/internal/auth/auth.go
+++ b/internal/auth/auth.go
@@ -402,7 +402,7 @@ func (auth *Authentication) AuthUserApi(
return
}
case len(user.Roles) >= 2:
- if user.HasRole(schema.RoleApi) && user.HasAnyRole([]schema.Role{schema.RoleUser, schema.RoleManager, schema.RoleAdmin}) {
+ if user.HasRole(schema.RoleApi) && user.HasAnyRole([]schema.Role{schema.RoleUser, schema.RoleManager, schema.RoleSupport, schema.RoleAdmin}) {
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return
@@ -530,6 +530,7 @@ func securedCheck(user *schema.User, r *http.Request) error {
IPAddress = r.RemoteAddr
}
+ // FIXME: IPV6 not handled
if strings.Contains(IPAddress, ":") {
IPAddress = strings.Split(IPAddress, ":")[0]
}
diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go
index 778f1d6..766b748 100644
--- a/internal/graph/generated/generated.go
+++ b/internal/graph/generated/generated.go
@@ -202,6 +202,7 @@ type ComplexityRoot struct {
TotalJobs func(childComplexity int) int
TotalNodeHours func(childComplexity int) int
TotalNodes func(childComplexity int) int
+ TotalUsers func(childComplexity int) int
TotalWalltime func(childComplexity int) int
}
@@ -277,6 +278,7 @@ type ComplexityRoot struct {
ID func(childComplexity int) int
MetaData func(childComplexity int) int
NodeState func(childComplexity int) int
+ RunningJobs func(childComplexity int) int
SubCluster func(childComplexity int) int
}
@@ -291,7 +293,7 @@ type ComplexityRoot struct {
Items func(childComplexity int) int
}
- NodeStats struct {
+ NodeStates struct {
Count func(childComplexity int) int
State func(childComplexity int) int
}
@@ -319,7 +321,7 @@ type ComplexityRoot struct {
Node func(childComplexity int, id string) int
NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int
NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int
- NodeStats func(childComplexity int, filter []*model.NodeFilter) int
+ NodeStates func(childComplexity int, filter []*model.NodeFilter) int
Nodes func(childComplexity int, filter []*model.NodeFilter, order *model.OrderByInput) int
RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int
ScopedJobStats func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int
@@ -445,6 +447,7 @@ type MutationResolver interface {
UpdateConfiguration(ctx context.Context, name string, value string) (*string, error)
}
type NodeResolver interface {
+ RunningJobs(ctx context.Context, obj *schema.Node) (int, error)
NodeState(ctx context.Context, obj *schema.Node) (string, error)
HealthState(ctx context.Context, obj *schema.Node) (schema.NodeState, error)
MetaData(ctx context.Context, obj *schema.Node) (any, error)
@@ -457,7 +460,7 @@ type QueryResolver interface {
AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error)
Node(ctx context.Context, id string) (*schema.Node, error)
Nodes(ctx context.Context, filter []*model.NodeFilter, order *model.OrderByInput) (*model.NodeStateResultList, error)
- NodeStats(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStats, error)
+ NodeStates(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStates, error)
Job(ctx context.Context, id string) (*schema.Job, error)
JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error)
JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error)
@@ -1165,6 +1168,13 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.JobsStatistics.TotalNodes(childComplexity), true
+ case "JobsStatistics.totalUsers":
+ if e.complexity.JobsStatistics.TotalUsers == nil {
+ break
+ }
+
+ return e.complexity.JobsStatistics.TotalUsers(childComplexity), true
+
case "JobsStatistics.totalWalltime":
if e.complexity.JobsStatistics.TotalWalltime == nil {
break
@@ -1475,7 +1485,7 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.Node.Cluster(childComplexity), true
- case "Node.HealthState":
+ case "Node.healthState":
if e.complexity.Node.HealthState == nil {
break
}
@@ -1510,6 +1520,13 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.Node.NodeState(childComplexity), true
+ case "Node.runningJobs":
+ if e.complexity.Node.RunningJobs == nil {
+ break
+ }
+
+ return e.complexity.Node.RunningJobs(childComplexity), true
+
case "Node.subCluster":
if e.complexity.Node.SubCluster == nil {
break
@@ -1552,19 +1569,19 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.NodeStateResultList.Items(childComplexity), true
- case "NodeStats.count":
- if e.complexity.NodeStats.Count == nil {
+ case "NodeStates.count":
+ if e.complexity.NodeStates.Count == nil {
break
}
- return e.complexity.NodeStats.Count(childComplexity), true
+ return e.complexity.NodeStates.Count(childComplexity), true
- case "NodeStats.state":
- if e.complexity.NodeStats.State == nil {
+ case "NodeStates.state":
+ if e.complexity.NodeStates.State == nil {
break
}
- return e.complexity.NodeStats.State(childComplexity), true
+ return e.complexity.NodeStates.State(childComplexity), true
case "NodesResultList.count":
if e.complexity.NodesResultList.Count == nil {
@@ -1754,17 +1771,17 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.Query.NodeMetricsList(childComplexity, args["cluster"].(string), args["subCluster"].(string), args["nodeFilter"].(string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time), args["page"].(*model.PageRequest), args["resolution"].(*int)), true
- case "Query.nodeStats":
- if e.complexity.Query.NodeStats == nil {
+ case "Query.nodeStates":
+ if e.complexity.Query.NodeStates == nil {
break
}
- args, err := ec.field_Query_nodeStats_args(ctx, rawArgs)
+ args, err := ec.field_Query_nodeStates_args(ctx, rawArgs)
if err != nil {
return 0, false
}
- return e.complexity.Query.NodeStats(childComplexity, args["filter"].([]*model.NodeFilter)), true
+ return e.complexity.Query.NodeStates(childComplexity, args["filter"].([]*model.NodeFilter)), true
case "Query.nodes":
if e.complexity.Query.Nodes == nil {
@@ -2334,12 +2351,13 @@ type Node {
hostname: String!
cluster: String!
subCluster: String!
+ runningJobs: Int!
nodeState: NodeState!
- HealthState: MonitoringState!
+ healthState: MonitoringState!
metaData: Any
}
-type NodeStats {
+type NodeStates {
state: String!
count: Int!
}
@@ -2558,10 +2576,12 @@ enum Aggregate {
USER
PROJECT
CLUSTER
+ SUBCLUSTER
}
enum SortByAggregate {
TOTALWALLTIME
TOTALJOBS
+ TOTALUSERS
TOTALNODES
TOTALNODEHOURS
TOTALCORES
@@ -2622,9 +2642,10 @@ type Query {
user(username: String!): User
allocatedNodes(cluster: String!): [Count!]!
+ ## Node Queries New
node(id: ID!): Node
nodes(filter: [NodeFilter!], order: OrderByInput): NodeStateResultList!
- nodeStats(filter: [NodeFilter!]): [NodeStats!]!
+ nodeStates(filter: [NodeFilter!]): [NodeStates!]!
job(id: ID!): Job
jobMetrics(
@@ -2679,6 +2700,7 @@ type Query {
from: Time!
to: Time!
): [NodeMetrics!]!
+
nodeMetricsList(
cluster: String!
subCluster: String!
@@ -2715,6 +2737,7 @@ type TimeRangeOutput {
input NodeFilter {
hostname: StringInput
cluster: StringInput
+ subcluster: StringInput
nodeState: NodeState
healthState: MonitoringState
}
@@ -2819,11 +2842,12 @@ type MetricHistoPoint {
}
type JobsStatistics {
- id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
+ id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster/subcluster
name: String! # if User-Statistics: Given Name of Account (ID) Owner
+ totalUsers: Int! # if *not* User-Statistics: Number of active users (based on running jobs)
totalJobs: Int! # Number of jobs
runningJobs: Int! # Number of running jobs
- shortJobs: Int! # Number of jobs with a duration of less than duration
+ shortJobs: Int! # Number of jobs with a duration of less than config'd ShortRunningJobsDuration
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
totalNodes: Int! # Sum of the nodes of all matched jobs
totalNodeHours: Int! # Sum of the node hours of all matched jobs
@@ -3197,7 +3221,7 @@ func (ec *executionContext) field_Query_nodeMetrics_args(ctx context.Context, ra
return args, nil
}
-func (ec *executionContext) field_Query_nodeStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
+func (ec *executionContext) field_Query_nodeStates_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
var err error
args := map[string]any{}
arg0, err := graphql.ProcessArgField(ctx, rawArgs, "filter", ec.unmarshalONodeFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeFilterᚄ)
@@ -7125,6 +7149,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_name(_ context.Context,
return fc, nil
}
+func (ec *executionContext) _JobsStatistics_totalUsers(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobsStatistics_totalUsers(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.TotalUsers, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobsStatistics_totalUsers(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobsStatistics",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
func (ec *executionContext) _JobsStatistics_totalJobs(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_JobsStatistics_totalJobs(ctx, field)
if err != nil {
@@ -9788,6 +9856,50 @@ func (ec *executionContext) fieldContext_Node_subCluster(_ context.Context, fiel
return fc, nil
}
+func (ec *executionContext) _Node_runningJobs(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_runningJobs(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Node().RunningJobs(rctx, obj)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Node_runningJobs(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Node",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
func (ec *executionContext) _Node_nodeState(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Node_nodeState(ctx, field)
if err != nil {
@@ -9832,8 +9944,8 @@ func (ec *executionContext) fieldContext_Node_nodeState(_ context.Context, field
return fc, nil
}
-func (ec *executionContext) _Node_HealthState(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_Node_HealthState(ctx, field)
+func (ec *executionContext) _Node_healthState(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_healthState(ctx, field)
if err != nil {
return graphql.Null
}
@@ -9863,7 +9975,7 @@ func (ec *executionContext) _Node_HealthState(ctx context.Context, field graphql
return ec.marshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx, field.Selections, res)
}
-func (ec *executionContext) fieldContext_Node_HealthState(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+func (ec *executionContext) fieldContext_Node_healthState(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Node",
Field: field,
@@ -10104,10 +10216,12 @@ func (ec *executionContext) fieldContext_NodeStateResultList_items(_ context.Con
return ec.fieldContext_Node_cluster(ctx, field)
case "subCluster":
return ec.fieldContext_Node_subCluster(ctx, field)
+ case "runningJobs":
+ return ec.fieldContext_Node_runningJobs(ctx, field)
case "nodeState":
return ec.fieldContext_Node_nodeState(ctx, field)
- case "HealthState":
- return ec.fieldContext_Node_HealthState(ctx, field)
+ case "healthState":
+ return ec.fieldContext_Node_healthState(ctx, field)
case "metaData":
return ec.fieldContext_Node_metaData(ctx, field)
}
@@ -10158,8 +10272,8 @@ func (ec *executionContext) fieldContext_NodeStateResultList_count(_ context.Con
return fc, nil
}
-func (ec *executionContext) _NodeStats_state(ctx context.Context, field graphql.CollectedField, obj *model.NodeStats) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_NodeStats_state(ctx, field)
+func (ec *executionContext) _NodeStates_state(ctx context.Context, field graphql.CollectedField, obj *model.NodeStates) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NodeStates_state(ctx, field)
if err != nil {
return graphql.Null
}
@@ -10189,9 +10303,9 @@ func (ec *executionContext) _NodeStats_state(ctx context.Context, field graphql.
return ec.marshalNString2string(ctx, field.Selections, res)
}
-func (ec *executionContext) fieldContext_NodeStats_state(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+func (ec *executionContext) fieldContext_NodeStates_state(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
- Object: "NodeStats",
+ Object: "NodeStates",
Field: field,
IsMethod: false,
IsResolver: false,
@@ -10202,8 +10316,8 @@ func (ec *executionContext) fieldContext_NodeStats_state(_ context.Context, fiel
return fc, nil
}
-func (ec *executionContext) _NodeStats_count(ctx context.Context, field graphql.CollectedField, obj *model.NodeStats) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_NodeStats_count(ctx, field)
+func (ec *executionContext) _NodeStates_count(ctx context.Context, field graphql.CollectedField, obj *model.NodeStates) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NodeStates_count(ctx, field)
if err != nil {
return graphql.Null
}
@@ -10233,9 +10347,9 @@ func (ec *executionContext) _NodeStats_count(ctx context.Context, field graphql.
return ec.marshalNInt2int(ctx, field.Selections, res)
}
-func (ec *executionContext) fieldContext_NodeStats_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+func (ec *executionContext) fieldContext_NodeStates_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
- Object: "NodeStats",
+ Object: "NodeStates",
Field: field,
IsMethod: false,
IsResolver: false,
@@ -10830,10 +10944,12 @@ func (ec *executionContext) fieldContext_Query_node(ctx context.Context, field g
return ec.fieldContext_Node_cluster(ctx, field)
case "subCluster":
return ec.fieldContext_Node_subCluster(ctx, field)
+ case "runningJobs":
+ return ec.fieldContext_Node_runningJobs(ctx, field)
case "nodeState":
return ec.fieldContext_Node_nodeState(ctx, field)
- case "HealthState":
- return ec.fieldContext_Node_HealthState(ctx, field)
+ case "healthState":
+ return ec.fieldContext_Node_healthState(ctx, field)
case "metaData":
return ec.fieldContext_Node_metaData(ctx, field)
}
@@ -10915,8 +11031,8 @@ func (ec *executionContext) fieldContext_Query_nodes(ctx context.Context, field
return fc, nil
}
-func (ec *executionContext) _Query_nodeStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_Query_nodeStats(ctx, field)
+func (ec *executionContext) _Query_nodeStates(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_nodeStates(ctx, field)
if err != nil {
return graphql.Null
}
@@ -10929,7 +11045,7 @@ func (ec *executionContext) _Query_nodeStats(ctx context.Context, field graphql.
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
ctx = rctx // use context from middleware stack in children
- return ec.resolvers.Query().NodeStats(rctx, fc.Args["filter"].([]*model.NodeFilter))
+ return ec.resolvers.Query().NodeStates(rctx, fc.Args["filter"].([]*model.NodeFilter))
})
if err != nil {
ec.Error(ctx, err)
@@ -10941,12 +11057,12 @@ func (ec *executionContext) _Query_nodeStats(ctx context.Context, field graphql.
}
return graphql.Null
}
- res := resTmp.([]*model.NodeStats)
+ res := resTmp.([]*model.NodeStates)
fc.Result = res
- return ec.marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatsᚄ(ctx, field.Selections, res)
+ return ec.marshalNNodeStates2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatesᚄ(ctx, field.Selections, res)
}
-func (ec *executionContext) fieldContext_Query_nodeStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+func (ec *executionContext) fieldContext_Query_nodeStates(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Query",
Field: field,
@@ -10955,11 +11071,11 @@ func (ec *executionContext) fieldContext_Query_nodeStats(ctx context.Context, fi
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
case "state":
- return ec.fieldContext_NodeStats_state(ctx, field)
+ return ec.fieldContext_NodeStates_state(ctx, field)
case "count":
- return ec.fieldContext_NodeStats_count(ctx, field)
+ return ec.fieldContext_NodeStates_count(ctx, field)
}
- return nil, fmt.Errorf("no field named %q was found under type NodeStats", field.Name)
+ return nil, fmt.Errorf("no field named %q was found under type NodeStates", field.Name)
},
}
defer func() {
@@ -10969,7 +11085,7 @@ func (ec *executionContext) fieldContext_Query_nodeStats(ctx context.Context, fi
}
}()
ctx = graphql.WithFieldContext(ctx, fc)
- if fc.Args, err = ec.field_Query_nodeStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ if fc.Args, err = ec.field_Query_nodeStates_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
return fc, err
}
@@ -11379,6 +11495,8 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex
return ec.fieldContext_JobsStatistics_id(ctx, field)
case "name":
return ec.fieldContext_JobsStatistics_name(ctx, field)
+ case "totalUsers":
+ return ec.fieldContext_JobsStatistics_totalUsers(ctx, field)
case "totalJobs":
return ec.fieldContext_JobsStatistics_totalJobs(ctx, field)
case "runningJobs":
@@ -16549,7 +16667,7 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an
asMap[k] = v
}
- fieldsInOrder := [...]string{"hostname", "cluster", "nodeState", "healthState"}
+ fieldsInOrder := [...]string{"hostname", "cluster", "subcluster", "nodeState", "healthState"}
for _, k := range fieldsInOrder {
v, ok := asMap[k]
if !ok {
@@ -16570,6 +16688,13 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an
return it, err
}
it.Cluster = data
+ case "subcluster":
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subcluster"))
+ data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
+ if err != nil {
+ return it, err
+ }
+ it.Subcluster = data
case "nodeState":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nodeState"))
data, err := ec.unmarshalONodeState2ᚖstring(ctx, v)
@@ -17976,6 +18101,11 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti
if out.Values[i] == graphql.Null {
out.Invalids++
}
+ case "totalUsers":
+ out.Values[i] = ec._JobsStatistics_totalUsers(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
case "totalJobs":
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
if out.Values[i] == graphql.Null {
@@ -18625,6 +18755,42 @@ func (ec *executionContext) _Node(ctx context.Context, sel ast.SelectionSet, obj
if out.Values[i] == graphql.Null {
atomic.AddUint32(&out.Invalids, 1)
}
+ case "runningJobs":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Node_runningJobs(ctx, field, obj)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ if field.Deferrable != nil {
+ dfs, ok := deferred[field.Deferrable.Label]
+ di := 0
+ if ok {
+ dfs.AddField(field)
+ di = len(dfs.Values) - 1
+ } else {
+ dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+ deferred[field.Deferrable.Label] = dfs
+ }
+ dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+ return innerFunc(ctx, dfs)
+ })
+
+ // don't run the out.Concurrently() call below
+ out.Values[i] = graphql.Null
+ continue
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
case "nodeState":
field := field
@@ -18661,7 +18827,7 @@ func (ec *executionContext) _Node(ctx context.Context, sel ast.SelectionSet, obj
}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
- case "HealthState":
+ case "healthState":
field := field
innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
@@ -18670,7 +18836,7 @@ func (ec *executionContext) _Node(ctx context.Context, sel ast.SelectionSet, obj
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
- res = ec._Node_HealthState(ctx, field, obj)
+ res = ec._Node_healthState(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&fs.Invalids, 1)
}
@@ -18843,24 +19009,24 @@ func (ec *executionContext) _NodeStateResultList(ctx context.Context, sel ast.Se
return out
}
-var nodeStatsImplementors = []string{"NodeStats"}
+var nodeStatesImplementors = []string{"NodeStates"}
-func (ec *executionContext) _NodeStats(ctx context.Context, sel ast.SelectionSet, obj *model.NodeStats) graphql.Marshaler {
- fields := graphql.CollectFields(ec.OperationContext, sel, nodeStatsImplementors)
+func (ec *executionContext) _NodeStates(ctx context.Context, sel ast.SelectionSet, obj *model.NodeStates) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, nodeStatesImplementors)
out := graphql.NewFieldSet(fields)
deferred := make(map[string]*graphql.FieldSet)
for i, field := range fields {
switch field.Name {
case "__typename":
- out.Values[i] = graphql.MarshalString("NodeStats")
+ out.Values[i] = graphql.MarshalString("NodeStates")
case "state":
- out.Values[i] = ec._NodeStats_state(ctx, field, obj)
+ out.Values[i] = ec._NodeStates_state(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "count":
- out.Values[i] = ec._NodeStats_count(ctx, field, obj)
+ out.Values[i] = ec._NodeStates_count(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
@@ -19103,7 +19269,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
- case "nodeStats":
+ case "nodeStates":
field := field
innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
@@ -19112,7 +19278,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
- res = ec._Query_nodeStats(ctx, field)
+ res = ec._Query_nodeStates(ctx, field)
if res == graphql.Null {
atomic.AddUint32(&fs.Invalids, 1)
}
@@ -21756,7 +21922,7 @@ func (ec *executionContext) marshalNNodeStateResultList2ᚖgithubᚗcomᚋCluste
return ec._NodeStateResultList(ctx, sel, v)
}
-func (ec *executionContext) marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeStats) graphql.Marshaler {
+func (ec *executionContext) marshalNNodeStates2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatesᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeStates) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -21780,7 +21946,7 @@ func (ec *executionContext) marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpi
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNNodeStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStats(ctx, sel, v[i])
+ ret[i] = ec.marshalNNodeStates2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStates(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -21800,14 +21966,14 @@ func (ec *executionContext) marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpi
return ret
}
-func (ec *executionContext) marshalNNodeStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStats(ctx context.Context, sel ast.SelectionSet, v *model.NodeStats) graphql.Marshaler {
+func (ec *executionContext) marshalNNodeStates2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStates(ctx context.Context, sel ast.SelectionSet, v *model.NodeStates) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
- return ec._NodeStats(ctx, sel, v)
+ return ec._NodeStates(ctx, sel, v)
}
func (ec *executionContext) marshalNNodesResultList2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v model.NodesResultList) graphql.Marshaler {
diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go
index c4948d0..9b87864 100644
--- a/internal/graph/model/models_gen.go
+++ b/internal/graph/model/models_gen.go
@@ -114,6 +114,7 @@ type JobStats struct {
type JobsStatistics struct {
ID string `json:"id"`
Name string `json:"name"`
+ TotalUsers int `json:"totalUsers"`
TotalJobs int `json:"totalJobs"`
RunningJobs int `json:"runningJobs"`
ShortJobs int `json:"shortJobs"`
@@ -172,6 +173,7 @@ type NamedStatsWithScope struct {
type NodeFilter struct {
Hostname *StringInput `json:"hostname,omitempty"`
Cluster *StringInput `json:"cluster,omitempty"`
+ Subcluster *StringInput `json:"subcluster,omitempty"`
NodeState *string `json:"nodeState,omitempty"`
HealthState *schema.NodeState `json:"healthState,omitempty"`
}
@@ -187,7 +189,7 @@ type NodeStateResultList struct {
Count *int `json:"count,omitempty"`
}
-type NodeStats struct {
+type NodeStates struct {
State string `json:"state"`
Count int `json:"count"`
}
@@ -248,20 +250,22 @@ type User struct {
type Aggregate string
const (
- AggregateUser Aggregate = "USER"
- AggregateProject Aggregate = "PROJECT"
- AggregateCluster Aggregate = "CLUSTER"
+ AggregateUser Aggregate = "USER"
+ AggregateProject Aggregate = "PROJECT"
+ AggregateCluster Aggregate = "CLUSTER"
+ AggregateSubcluster Aggregate = "SUBCLUSTER"
)
var AllAggregate = []Aggregate{
AggregateUser,
AggregateProject,
AggregateCluster,
+ AggregateSubcluster,
}
func (e Aggregate) IsValid() bool {
switch e {
- case AggregateUser, AggregateProject, AggregateCluster:
+ case AggregateUser, AggregateProject, AggregateCluster, AggregateSubcluster:
return true
}
return false
@@ -307,6 +311,7 @@ type SortByAggregate string
const (
SortByAggregateTotalwalltime SortByAggregate = "TOTALWALLTIME"
SortByAggregateTotaljobs SortByAggregate = "TOTALJOBS"
+ SortByAggregateTotalusers SortByAggregate = "TOTALUSERS"
SortByAggregateTotalnodes SortByAggregate = "TOTALNODES"
SortByAggregateTotalnodehours SortByAggregate = "TOTALNODEHOURS"
SortByAggregateTotalcores SortByAggregate = "TOTALCORES"
@@ -318,6 +323,7 @@ const (
var AllSortByAggregate = []SortByAggregate{
SortByAggregateTotalwalltime,
SortByAggregateTotaljobs,
+ SortByAggregateTotalusers,
SortByAggregateTotalnodes,
SortByAggregateTotalnodehours,
SortByAggregateTotalcores,
@@ -328,7 +334,7 @@ var AllSortByAggregate = []SortByAggregate{
func (e SortByAggregate) IsValid() bool {
switch e {
- case SortByAggregateTotalwalltime, SortByAggregateTotaljobs, SortByAggregateTotalnodes, SortByAggregateTotalnodehours, SortByAggregateTotalcores, SortByAggregateTotalcorehours, SortByAggregateTotalaccs, SortByAggregateTotalacchours:
+ case SortByAggregateTotalwalltime, SortByAggregateTotaljobs, SortByAggregateTotalusers, SortByAggregateTotalnodes, SortByAggregateTotalnodehours, SortByAggregateTotalcores, SortByAggregateTotalcorehours, SortByAggregateTotalaccs, SortByAggregateTotalacchours:
return true
}
return false
diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go
index b886c34..cbe3650 100644
--- a/internal/graph/schema.resolvers.go
+++ b/internal/graph/schema.resolvers.go
@@ -305,14 +305,20 @@ func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string,
return nil, nil
}
-// NodeState is the resolver for the nodeState field.
-func (r *nodeResolver) NodeState(ctx context.Context, obj *schema.Node) (string, error) {
- panic(fmt.Errorf("not implemented: NodeState - nodeState"))
+// RunningJobs is the resolver for the runningJobs field.
+func (r *nodeResolver) RunningJobs(ctx context.Context, obj *schema.Node) (int, error) {
+ panic(fmt.Errorf("not implemented: RunningJobs - runningJobs"))
}
-// HealthState is the resolver for the HealthState field.
+// NodeState is the resolver for the nodeState field.
+func (r *nodeResolver) NodeState(ctx context.Context, obj *schema.Node) (string, error) {
+ return string(obj.NodeState), nil
+}
+
+// HealthState is the resolver for the healthState field.
func (r *nodeResolver) HealthState(ctx context.Context, obj *schema.Node) (schema.NodeState, error) {
- panic(fmt.Errorf("not implemented: HealthState - HealthState"))
+ // FIXME: Why is Output of schema.NodeState Type?
+ panic(fmt.Errorf("not implemented: HealthState - healthState"))
}
// MetaData is the resolver for the metaData field.
@@ -378,9 +384,26 @@ func (r *queryResolver) Nodes(ctx context.Context, filter []*model.NodeFilter, o
return &model.NodeStateResultList{Items: nodes, Count: &count}, err
}
-// NodeStats is the resolver for the nodeStats field.
-func (r *queryResolver) NodeStats(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStats, error) {
- panic(fmt.Errorf("not implemented: NodeStats - nodeStats"))
+// NodeStates is the resolver for the nodeStates field.
+func (r *queryResolver) NodeStates(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStates, error) {
+ repo := repository.GetNodeRepository()
+
+ stateCounts, serr := repo.CountNodeStates(ctx, filter)
+ if serr != nil {
+ cclog.Warnf("Error while counting nodeStates: %s", serr.Error())
+ return nil, serr
+ }
+
+ healthCounts, herr := repo.CountHealthStates(ctx, filter)
+ if herr != nil {
+ cclog.Warnf("Error while counting healthStates: %s", herr.Error())
+ return nil, herr
+ }
+
+ allCounts := make([]*model.NodeStates, 0)
+ allCounts = append(stateCounts, healthCounts...)
+
+ return allCounts, nil
}
// Job is the resolver for the job field.
@@ -558,7 +581,7 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
defaultDurationBins := "1h"
defaultMetricBins := 10
- if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
+ if requireField(ctx, "totalJobs") || requireField(ctx, "totalUsers") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
if groupBy == nil {
stats, err = r.Repo.JobsStats(ctx, filter)
diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go
index 179c21c..98dca03 100644
--- a/internal/importer/initDB.go
+++ b/internal/importer/initDB.go
@@ -40,7 +40,7 @@ func InitDB() error {
}
tags := make(map[string]int64)
- // Not using log.Print because we want the line to end with `\r` and
+ // Not using cclog.Print because we want the line to end with `\r` and
// this function is only ever called when a special command line flag
// is passed anyways.
fmt.Printf("%d jobs inserted...\r", 0)
diff --git a/internal/repository/job.go b/internal/repository/job.go
index 68778e1..8a8197c 100644
--- a/internal/repository/job.go
+++ b/internal/repository/job.go
@@ -337,10 +337,10 @@ func (r *JobRepository) FindColumnValue(user *schema.User, searchterm string, ta
// theSql, args, theErr := theQuery.ToSql()
// if theErr != nil {
- // log.Warn("Error while converting query to sql")
+ // cclog.Warn("Error while converting query to sql")
// return "", err
// }
- // log.Debugf("SQL query (FindColumnValue): `%s`, args: %#v", theSql, args)
+ // cclog.Debugf("SQL query (FindColumnValue): `%s`, args: %#v", theSql, args)
err := theQuery.RunWith(r.stmtCache).QueryRow().Scan(&result)
diff --git a/internal/repository/migrations/sqlite3/10_node-table.up.sql b/internal/repository/migrations/sqlite3/10_node-table.up.sql
index 6b9afbe..03a9bf6 100644
--- a/internal/repository/migrations/sqlite3/10_node-table.up.sql
+++ b/internal/repository/migrations/sqlite3/10_node-table.up.sql
@@ -4,12 +4,13 @@ CREATE TABLE "node" (
hostname VARCHAR(255) NOT NULL,
cluster VARCHAR(255) NOT NULL,
subcluster VARCHAR(255) NOT NULL,
- cpus_allocated INTEGER NOT NULL,
- cpus_total INTEGER NOT NULL,
- memory_allocated INTEGER NOT NULL,
- memory_total INTEGER NOT NULL,
- gpus_allocated INTEGER NOT NULL,
- gpus_total INTEGER NOT NULL,
+ jobs_running INTEGER DEFAULT 0 NOT NULL,
+ cpus_allocated INTEGER DEFAULT 0 NOT NULL,
+ cpus_total INTEGER DEFAULT 0 NOT NULL,
+ memory_allocated INTEGER DEFAULT 0 NOT NULL,
+ memory_total INTEGER DEFAULT 0 NOT NULL,
+ gpus_allocated INTEGER DEFAULT 0 NOT NULL,
+ gpus_total INTEGER DEFAULT 0 NOT NULL,
node_state VARCHAR(255) NOT NULL
CHECK (node_state IN (
'allocated', 'reserved', 'idle', 'mixed',
diff --git a/internal/repository/node.go b/internal/repository/node.go
index 83bf062..d7db2f4 100644
--- a/internal/repository/node.go
+++ b/internal/repository/node.go
@@ -49,6 +49,12 @@ func GetNodeRepository() *NodeRepository {
return nodeRepoInstance
}
+var nodeColumns []string = []string{
+ // "node.id,"
+ "node.hostname", "node.cluster", "node.subcluster",
+ "node.node_state", "node.health_state", // "node.meta_data",
+}
+
func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, error) {
start := time.Now()
cachekey := fmt.Sprintf("metadata:%d", node.ID)
@@ -218,9 +224,9 @@ func (r *NodeRepository) DeleteNode(id int64) error {
func (r *NodeRepository) QueryNodes(
ctx context.Context,
filters []*model.NodeFilter,
- order *model.OrderByInput,
+ order *model.OrderByInput, // Currently unused!
) ([]*schema.Node, error) {
- query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("node"))
+ query, qerr := AccessCheck(ctx, sq.Select(nodeColumns...).From("node"))
if qerr != nil {
return nil, qerr
}
@@ -232,6 +238,9 @@ func (r *NodeRepository) QueryNodes(
if f.Cluster != nil {
query = buildStringCondition("node.cluster", f.Cluster, query)
}
+ if f.Subcluster != nil {
+ query = buildStringCondition("node.subcluster", f.Subcluster, query)
+ }
if f.NodeState != nil {
query = query.Where("node.node_state = ?", f.NodeState)
}
@@ -287,3 +296,123 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
return nodeList, nil
}
+
+func (r *NodeRepository) CountNodeStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) {
+ query, qerr := AccessCheck(ctx, sq.Select("node_state AS state", "count(*) AS count").From("node"))
+ if qerr != nil {
+ return nil, qerr
+ }
+
+ for _, f := range filters {
+ if f.Hostname != nil {
+ query = buildStringCondition("node.hostname", f.Hostname, query)
+ }
+ if f.Cluster != nil {
+ query = buildStringCondition("node.cluster", f.Cluster, query)
+ }
+ if f.Subcluster != nil {
+ query = buildStringCondition("node.subcluster", f.Subcluster, query)
+ }
+ if f.NodeState != nil {
+ query = query.Where("node.node_state = ?", f.NodeState)
+ }
+ if f.HealthState != nil {
+ query = query.Where("node.health_state = ?", f.HealthState)
+ }
+ }
+
+ // Add Group and Order
+ query = query.GroupBy("state").OrderBy("count DESC")
+
+ rows, err := query.RunWith(r.stmtCache).Query()
+ if err != nil {
+ queryString, queryVars, _ := query.ToSql()
+ cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
+ return nil, err
+ }
+
+ nodes := make([]*model.NodeStates, 0)
+ for rows.Next() {
+ node := model.NodeStates{}
+
+ if err := rows.Scan(&node.State, &node.Count); err != nil {
+ rows.Close()
+ cclog.Warn("Error while scanning rows (NodeStates)")
+ return nil, err
+ }
+ nodes = append(nodes, &node)
+ }
+
+ return nodes, nil
+}
+
+func (r *NodeRepository) CountHealthStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) {
+ query, qerr := AccessCheck(ctx, sq.Select("health_state AS state", "count(*) AS count").From("node"))
+ if qerr != nil {
+ return nil, qerr
+ }
+
+ for _, f := range filters {
+ if f.Hostname != nil {
+ query = buildStringCondition("node.hostname", f.Hostname, query)
+ }
+ if f.Cluster != nil {
+ query = buildStringCondition("node.cluster", f.Cluster, query)
+ }
+ if f.Subcluster != nil {
+ query = buildStringCondition("node.subcluster", f.Subcluster, query)
+ }
+ if f.NodeState != nil {
+ query = query.Where("node.node_state = ?", f.NodeState)
+ }
+ if f.HealthState != nil {
+ query = query.Where("node.health_state = ?", f.HealthState)
+ }
+ }
+
+ // Add Group and Order
+ query = query.GroupBy("state").OrderBy("count DESC")
+
+ rows, err := query.RunWith(r.stmtCache).Query()
+ if err != nil {
+ queryString, queryVars, _ := query.ToSql()
+ cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
+ return nil, err
+ }
+
+ nodes := make([]*model.NodeStates, 0)
+ for rows.Next() {
+ node := model.NodeStates{}
+
+ if err := rows.Scan(&node.State, &node.Count); err != nil {
+ rows.Close()
+ cclog.Warn("Error while scanning rows (NodeStates)")
+ return nil, err
+ }
+ nodes = append(nodes, &node)
+ }
+
+ return nodes, nil
+}
+
+func AccessCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) {
+ user := GetUserFromContext(ctx)
+ return AccessCheckWithUser(user, query)
+}
+
+func AccessCheckWithUser(user *schema.User, query sq.SelectBuilder) (sq.SelectBuilder, error) {
+ if user == nil {
+ var qnil sq.SelectBuilder
+ return qnil, fmt.Errorf("user context is nil")
+ }
+
+ switch {
+ // case len(user.Roles) == 1 && user.HasRole(schema.RoleApi): // API-User : Access NodeInfos
+ // return query, nil
+ case user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}): // Admin & Support : Access NodeInfos
+ return query, nil
+ default: // No known Role: No Access, return error
+ var qnil sq.SelectBuilder
+ return qnil, fmt.Errorf("user has no or unknown roles")
+ }
+}
diff --git a/internal/repository/stats.go b/internal/repository/stats.go
index 25c862f..19d17bd 100644
--- a/internal/repository/stats.go
+++ b/internal/repository/stats.go
@@ -28,6 +28,7 @@ var groupBy2column = map[model.Aggregate]string{
var sortBy2column = map[model.SortByAggregate]string{
model.SortByAggregateTotaljobs: "totalJobs",
+ model.SortByAggregateTotalusers: "totalUsers",
model.SortByAggregateTotalwalltime: "totalWalltime",
model.SortByAggregateTotalnodes: "totalNodes",
model.SortByAggregateTotalnodehours: "totalNodeHours",
@@ -76,8 +77,12 @@ func (r *JobRepository) buildStatsQuery(
// fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as %s) as value`, time.Now().Unix(), castType)
if col != "" {
- // Scan columns: id, totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
- query = sq.Select(col, "COUNT(job.id) as totalJobs", "name",
+ // Scan columns: id, name, totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
+ query = sq.Select(
+ col,
+ "name",
+ "COUNT(job.id) as totalJobs",
+ "COUNT(DISTINCT job.hpc_user) AS totalUsers",
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s) as totalWalltime`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s) as totalNodes`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s) as totalNodeHours`, time.Now().Unix(), castType),
@@ -87,8 +92,10 @@ func (r *JobRepository) buildStatsQuery(
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType),
).From("job").LeftJoin("hpc_user ON hpc_user.username = job.hpc_user").GroupBy(col)
} else {
- // Scan columns: totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
- query = sq.Select("COUNT(job.id)",
+ // Scan columns: totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
+ query = sq.Select(
+ "COUNT(job.id) as totalJobs",
+ "COUNT(DISTINCT job.hpc_user) AS totalUsers",
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s)`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s)`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s)`, time.Now().Unix(), castType),
@@ -167,14 +174,14 @@ func (r *JobRepository) JobsStatsGrouped(
for rows.Next() {
var id sql.NullString
var name sql.NullString
- var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
- if err := rows.Scan(&id, &jobs, &name, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
+ var jobs, users, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
+ if err := rows.Scan(&id, &name, &jobs, &users, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
cclog.Warn("Error while scanning rows")
return nil, err
}
if id.Valid {
- var totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours int
+ var totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours int
var personName string
if name.Valid {
@@ -185,6 +192,10 @@ func (r *JobRepository) JobsStatsGrouped(
totalJobs = int(jobs.Int64)
}
+ if users.Valid {
+ totalUsers = int(users.Int64)
+ }
+
if walltime.Valid {
totalWalltime = int(walltime.Int64)
}
@@ -228,8 +239,9 @@ func (r *JobRepository) JobsStatsGrouped(
stats = append(stats,
&model.JobsStatistics{
ID: id.String,
- TotalJobs: int(jobs.Int64),
- TotalWalltime: int(walltime.Int64),
+ TotalJobs: totalJobs,
+ TotalUsers: totalUsers,
+ TotalWalltime: totalWalltime,
TotalNodes: totalNodes,
TotalNodeHours: totalNodeHours,
TotalCores: totalCores,
@@ -259,8 +271,8 @@ func (r *JobRepository) JobsStats(
row := query.RunWith(r.DB).QueryRow()
stats := make([]*model.JobsStatistics, 0, 1)
- var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
- if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
+ var jobs, users, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
+ if err := row.Scan(&jobs, &users, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -280,6 +292,7 @@ func (r *JobRepository) JobsStats(
stats = append(stats,
&model.JobsStatistics{
TotalJobs: int(jobs.Int64),
+ TotalUsers: int(users.Int64),
TotalWalltime: int(walltime.Int64),
TotalNodeHours: totalNodeHours,
TotalCoreHours: totalCoreHours,
diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte
index 689b7a2..122a67b 100644
--- a/web/frontend/src/Analysis.root.svelte
+++ b/web/frontend/src/Analysis.root.svelte
@@ -459,7 +459,7 @@
{#each $topQuery.data.topList as te, i}
- |
+ |
{#if groupSelection.key == "user"}
-
-
- Current utilization of cluster "{cluster}"
-
-
-
-
-
- {
- from = new Date(Date.now() - 5 * 60 * 1000);
- to = new Date(Date.now());
- }}
- />
-
-
-
+
- {#if $initq.fetching || $mainQuery.fetching}
-
- {:else if $initq.error}
- {$initq.error.message}
- {:else}
-
- {/if}
+ Current Status of Cluster "{presetCluster.charAt(0).toUpperCase() + presetCluster.slice(1)}"
-{#if $mainQuery.error}
-
-
- {$mainQuery.error.message}
-
-
-{/if}
-
+
+
+
+
+
+
+
-
-
-{#if $initq.data && $mainQuery.data}
- {#each $initq.data.clusters.find((c) => c.name == cluster).subClusters as subCluster, i}
-
-
-
-
- SubCluster "{subCluster.name}"
-
-
-
-
- Allocated Nodes |
- |
- {allocatedNodes[subCluster.name]} / {subCluster.numberOfNodes}
- Nodes |
-
-
- Flop Rate (Any) |
- |
-
- {scaleNumbers(
- flopRate[subCluster.name],
- subCluster.flopRateSimd.value * subCluster.numberOfNodes,
- flopRateUnitPrefix[subCluster.name],
- )}{flopRateUnitBase[subCluster.name]} [Max]
- |
-
-
- MemBw Rate |
- |
-
- {scaleNumbers(
- memBwRate[subCluster.name],
- subCluster.memoryBandwidth.value * subCluster.numberOfNodes,
- memBwRateUnitPrefix[subCluster.name],
- )}{memBwRateUnitBase[subCluster.name]} [Max]
- |
-
-
-
-
-
-
-
- {#key $mainQuery.data.nodeMetrics}
- data.subCluster == subCluster.name,
- ),
- )}
- />
- {/key}
-
-
-
- {/each}
-
-
-
-
-
-
-
-
-
- Top Users on {cluster.charAt(0).toUpperCase() + cluster.slice(1)}
-
- {#key $topUserQuery.data}
- {#if $topUserQuery.fetching}
-
- {:else if $topUserQuery.error}
- {$topUserQuery.error.message}
- {:else}
- tu[topUserSelection.key],
- )}
- entities={$topUserQuery.data.topUser.map((tu) => scrambleNames ? scramble(tu.id) : tu.id)}
- />
- {/if}
- {/key}
-
-
-
- {#key $topUserQuery.data}
- {#if $topUserQuery.fetching}
-
- {:else if $topUserQuery.error}
- {$topUserQuery.error.message}
- {:else}
-
-
- Legend |
- User Name |
- Number of
-
- |
-
- {#each $topUserQuery.data.topUser as tu, i}
-
- |
- {scrambleNames ? scramble(tu.id) : tu.id} |
- {#if tu?.name}
- {scrambleNames ? scramble(tu.name) : tu.name}
- {/if}
- {tu[topUserSelection.key]} |
-
- {/each}
-
- {/if}
- {/key}
-
-
-
- Top Projects on {cluster.charAt(0).toUpperCase() + cluster.slice(1)}
-
- {#key $topProjectQuery.data}
- {#if $topProjectQuery.fetching}
-
- {:else if $topProjectQuery.error}
- {$topProjectQuery.error.message}
- {:else}
- tp[topProjectSelection.key],
- )}
- entities={$topProjectQuery.data.topProjects.map((tp) => scrambleNames ? scramble(tp.id) : tp.id)}
- />
- {/if}
- {/key}
-
-
- {#key $topProjectQuery.data}
- {#if $topProjectQuery.fetching}
-
- {:else if $topProjectQuery.error}
- {$topProjectQuery.error.message}
- {:else}
-
-
- Legend |
- Project Code |
- Number of
-
- |
-
- {#each $topProjectQuery.data.topProjects as tp, i}
-
- |
- {scrambleNames ? scramble(tp.id) : tp.id} |
- {tp[topProjectSelection.key]} |
-
- {/each}
-
- {/if}
- {/key}
-
-
-
-
-
-
-
-
-
- {#key $mainQuery.data.stats}
-
- {/key}
-
-
- {#key $mainQuery.data.stats}
-
- {/key}
-
-
-
-
- {#key $mainQuery.data.stats}
-
- {/key}
-
-
- {#key $mainQuery.data.stats}
-
- {/key}
-
-
-
-
-
-
-
- {#if selectedHistograms}
-
- {#snippet gridContent(item)}
-
- {/snippet}
+
+
+
+
+
- {#key $mainQuery.data.stats[0].histMetrics}
-
- {/key}
- {/if}
-{/if}
-
- {
- selectedHistograms = [...newSelection];
- }}
-/>
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte
index c1f0fb8..f675a0d 100644
--- a/web/frontend/src/User.root.svelte
+++ b/web/frontend/src/User.root.svelte
@@ -404,6 +404,7 @@
cluster={selectedCluster}
bind:isOpen={isHistogramSelectionOpen}
presetSelectedHistograms={selectedHistograms}
+ configName="user_view_histogramMetrics"
applyChange={(newSelection) => {
selectedHistogramsBuffer[selectedCluster || 'all'] = [...newSelection];
}}
diff --git a/web/frontend/src/generic/JobCompare.svelte b/web/frontend/src/generic/JobCompare.svelte
index a1e7bfa..55966ac 100644
--- a/web/frontend/src/generic/JobCompare.svelte
+++ b/web/frontend/src/generic/JobCompare.svelte
@@ -26,7 +26,7 @@
/* Svelte 5 Props */
let {
matchedCompareJobs = $bindable(0),
- metrics = ccconfig?.plot_list_selectedMetrics,
+ metrics = getContext("cc-config")?.plot_list_selectedMetrics,
filterBuffer = [],
} = $props();
diff --git a/web/frontend/src/generic/plots/Comparogram.svelte b/web/frontend/src/generic/plots/Comparogram.svelte
index b6f5fd1..2051088 100644
--- a/web/frontend/src/generic/plots/Comparogram.svelte
+++ b/web/frontend/src/generic/plots/Comparogram.svelte
@@ -44,7 +44,7 @@
/* Const Init */
const clusterCockpitConfig = getContext("cc-config");
- const lineWidth = clusterCockpitConfig.plot_general_lineWidth / window.devicePixelRatio;
+ const lineWidth = clusterCockpitConfig?.plot_general_lineWidth / window.devicePixelRatio || 2;
const cbmode = clusterCockpitConfig?.plot_general_colorblindMode || false;
// UPLOT SERIES INIT //
diff --git a/web/frontend/src/generic/plots/Pie.svelte b/web/frontend/src/generic/plots/Pie.svelte
index aed6026..48031e4 100644
--- a/web/frontend/src/generic/plots/Pie.svelte
+++ b/web/frontend/src/generic/plots/Pie.svelte
@@ -14,28 +14,59 @@
-->
-
+
diff --git a/web/frontend/src/generic/plots/Roofline.svelte b/web/frontend/src/generic/plots/Roofline.svelte
index 8c6e9de..3a0e332 100644
--- a/web/frontend/src/generic/plots/Roofline.svelte
+++ b/web/frontend/src/generic/plots/Roofline.svelte
@@ -3,7 +3,6 @@
Properties:
- `data [null, [], []]`: Roofline Data Structure, see below for details [Default: null]
- - `renderTime Bool?`: If time information should be rendered as colored dots [Default: false]
- `allowSizeChange Bool?`: If dimensions of rendered plot can change [Default: false]
- `subCluster GraphQL.SubCluster?`: SubCluster Object; contains required topology information [Default: null]
- `width Number?`: Plot width (reactively adaptive) [Default: 600]
@@ -21,19 +20,22 @@
- `data[2] = [0.1, 0.15, 0.2, ...]`
- Color Code: Time Information (Floats from 0 to 1) (Optional)
-->
-
-{#if data != null}
+{#if roofData != null}
{:else}
- Cannot render roofline: No data!
+ Cannot render roofline: No data!
{/if}
-
diff --git a/web/frontend/src/generic/plots/RooflineLegacy.svelte b/web/frontend/src/generic/plots/RooflineLegacy.svelte
new file mode 100644
index 0000000..8c6e9de
--- /dev/null
+++ b/web/frontend/src/generic/plots/RooflineLegacy.svelte
@@ -0,0 +1,384 @@
+
+
+
+
+{#if data != null}
+
+{:else}
+ Cannot render roofline: No data!
+{/if}
+
diff --git a/web/frontend/src/generic/select/HistogramSelection.svelte b/web/frontend/src/generic/select/HistogramSelection.svelte
index 0468efd..a424ef4 100644
--- a/web/frontend/src/generic/select/HistogramSelection.svelte
+++ b/web/frontend/src/generic/select/HistogramSelection.svelte
@@ -3,8 +3,9 @@
Properties:
- `cluster String`: Currently selected cluster
- - `selectedHistograms [String]`: The currently selected metrics to display as histogram
- `ìsOpen Bool`: Is selection opened [Bindable]
+ - `configName String`: The config id string to be updated in database on selection change
+ - `presetSelectedHistograms [String]`: The currently selected metrics to display as histogram
- `applyChange Func`: The callback function to apply current selection
-->
@@ -25,6 +26,7 @@
let {
cluster,
isOpen = $bindable(),
+ configName,
presetSelectedHistograms,
applyChange
} = $props();
@@ -67,8 +69,8 @@
applyChange(selectedHistograms)
updateConfiguration({
name: cluster
- ? `user_view_histogramMetrics:${cluster}`
- : "user_view_histogramMetrics",
+ ? `${configName}:${cluster}`
+ : configName,
value: selectedHistograms,
});
}
diff --git a/web/frontend/src/generic/select/MetricSelection.svelte b/web/frontend/src/generic/select/MetricSelection.svelte
index 469cc32..d6da4df 100644
--- a/web/frontend/src/generic/select/MetricSelection.svelte
+++ b/web/frontend/src/generic/select/MetricSelection.svelte
@@ -96,9 +96,9 @@
function printAvailability(metric, cluster) {
const avail = globalMetrics.find((gm) => gm.name === metric)?.availability
if (!cluster) {
- return avail.map((av) => av.cluster).join(',')
+ return avail.map((av) => av.cluster).join(', ')
} else {
- return avail.find((av) => av.cluster === cluster).subClusters.join(',')
+ return avail.find((av) => av.cluster === cluster).subClusters.join(', ')
}
}
@@ -208,7 +208,7 @@
/>
{/if}
{metric}
-
+
{printAvailability(metric, cluster)}
diff --git a/web/frontend/src/job/JobRoofline.svelte b/web/frontend/src/job/JobRoofline.svelte
index ae33017..ae962f1 100644
--- a/web/frontend/src/job/JobRoofline.svelte
+++ b/web/frontend/src/job/JobRoofline.svelte
@@ -19,7 +19,7 @@
import {
transformDataForRoofline,
} from "../generic/utils.js";
- import Roofline from "../generic/plots/Roofline.svelte";
+ import Roofline from "../generic/plots/RooflineLegacy.svelte";
/* Svelte 5 Props */
let {
diff --git a/web/frontend/src/status.entrypoint.js b/web/frontend/src/status.entrypoint.js
index 3e45cb7..c3407c1 100644
--- a/web/frontend/src/status.entrypoint.js
+++ b/web/frontend/src/status.entrypoint.js
@@ -5,7 +5,7 @@ import Status from './Status.root.svelte'
mount(Status, {
target: document.getElementById('svelte-app'),
props: {
- cluster: infos.cluster,
+ presetCluster: infos.cluster,
},
context: new Map([
['cc-config', clusterCockpitConfig]
diff --git a/web/frontend/src/status/StatisticsDash.svelte b/web/frontend/src/status/StatisticsDash.svelte
new file mode 100644
index 0000000..8523c80
--- /dev/null
+++ b/web/frontend/src/status/StatisticsDash.svelte
@@ -0,0 +1,159 @@
+
+
+
+
+
+
+
+
+
+
+ {
+ from = new Date(Date.now() - (30 * 24 * 60 * 60 * 1000)); // Triggers GQL
+ to = new Date(Date.now());
+ }}
+ />
+
+
+
+
+
+ {#if $initq.fetching || $metricStatusQuery.fetching}
+
+ {:else if $initq.error}
+ {$initq.error.message}
+ {:else}
+
+ {/if}
+
+
+{#if $metricStatusQuery.error}
+
+
+ {$metricStatusQuery.error.message}
+
+
+{/if}
+
+{#if $initq.data && $metricStatusQuery.data}
+
+ {#if selectedHistograms}
+
+ {#snippet gridContent(item)}
+
+ {/snippet}
+
+ {#key $metricStatusQuery.data.jobsStatistics[0].histMetrics}
+
+ {/key}
+ {/if}
+{/if}
+
+ {
+ selectedHistograms = [...newSelection];
+ }}
+/>
diff --git a/web/frontend/src/status/StatusDash.svelte b/web/frontend/src/status/StatusDash.svelte
new file mode 100644
index 0000000..280b04b
--- /dev/null
+++ b/web/frontend/src/status/StatusDash.svelte
@@ -0,0 +1,580 @@
+
+
+
+
+
+
+
+ {
+ from = new Date(Date.now() - 5 * 60 * 1000);
+ to = new Date(Date.now());
+ }}
+ />
+
+
+
+
+
+
+{#if $initq.data && $nodesStateCounts.data}
+
+
+
+ {#key refinedStateData}
+
+ {cluster.charAt(0).toUpperCase() + cluster.slice(1)} Node States
+
+ sd.count,
+ )}
+ entities={refinedStateData.map(
+ (sd) => sd.state,
+ )}
+ />
+ {/key}
+
+
+
+ {#key refinedStateData}
+
+
+ |
+ Current State |
+ Nodes |
+
+ {#each refinedStateData as sd, i}
+
+ |
+ {sd.state} |
+ {sd.count} |
+
+ {/each}
+
+ {/key}
+
+
+
+
+ {#key refinedHealthData}
+
+ {cluster.charAt(0).toUpperCase() + cluster.slice(1)} Node Health
+
+ sd.count,
+ )}
+ entities={refinedHealthData.map(
+ (sd) => sd.state,
+ )}
+ />
+ {/key}
+
+
+
+ {#key refinedHealthData}
+
+
+ |
+ Current Health |
+ Nodes |
+
+ {#each refinedHealthData as hd, i}
+
+ |
+ {hd.state} |
+ {hd.count} |
+
+ {/each}
+
+ {/key}
+
+
+{/if}
+
+
+
+{#if $initq.data && $statusQuery.data}
+ {#each $initq.data.clusters.find((c) => c.name == cluster).subClusters as subCluster, i}
+
+
+
+
+ SubCluster "{subCluster.name}"
+ {subCluster.processorType}
+
+
+
+
+ {runningJobs[subCluster.name]} Running Jobs |
+ {activeUsers[subCluster.name]} Active Users |
+
+
+
+
+ Flop Rate (Any)
+ |
+
+ Memory BW Rate
+ |
+
+
+
+ {flopRate[subCluster.name]}
+ {flopRateUnitPrefix[subCluster.name]}{flopRateUnitBase[subCluster.name]}
+ |
+
+ {memBwRate[subCluster.name]}
+ {memBwRateUnitPrefix[subCluster.name]}{memBwRateUnitBase[subCluster.name]}
+ |
+
+
+
+ Allocated Nodes |
+ |
+ {allocatedNodes[subCluster.name]} / {subCluster.numberOfNodes}
+ Nodes |
+
+ {#if totalAccs[subCluster.name] !== null}
+
+ Allocated Accelerators |
+ |
+ {allocatedAccs[subCluster.name]} / {totalAccs[subCluster.name]}
+ Accelerators |
+
+ {/if}
+
+
+
+
+
+
+ {#key $statusQuery?.data?.nodeMetrics}
+ data.subCluster == subCluster.name,
+ )
+ )}
+ nodesData={transformNodesStatsToInfo($statusQuery?.data?.nodeMetrics.filter(
+ (data) => data.subCluster == subCluster.name,
+ )
+ )}
+ />
+ {/key}
+
+
+
+
+ {#key $statusQuery?.data?.jobsMetricStats}
+ data.subCluster == subCluster.name,
+ )
+ )}
+ jobsData={transformJobsStatsToInfo($statusQuery?.data?.jobsMetricStats.filter(
+ (data) => data.subCluster == subCluster.name,
+ )
+ )}
+ />
+ {/key}
+
+
+
+ {/each}
+{:else}
+ Cannot render status rooflines: No data!
+{/if}
diff --git a/web/frontend/src/status/UsageDash.svelte b/web/frontend/src/status/UsageDash.svelte
new file mode 100644
index 0000000..16575e4
--- /dev/null
+++ b/web/frontend/src/status/UsageDash.svelte
@@ -0,0 +1,547 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ Duration Bin Size
+
+
+ {#each durationBinOptions as dbin}
+
+ {/each}
+
+
+
+
+ {
+ from = new Date(Date.now() - (30 * 24 * 60 * 60 * 1000)); // Triggers GQL
+ to = new Date(Date.now());
+ }}
+ />
+
+
+
+
+
+
+{#if $topJobsQuery.fetching || $nodeStatusQuery.fetching}
+
+{:else if $topJobsQuery.data && $nodeStatusQuery.data}
+
+
+ {#key $nodeStatusQuery.data.jobsStatistics[0].histDuration}
+
+ {/key}
+
+
+
+
+ Top Users: Jobs
+
+ tu['totalJobs'],
+ )}
+ entities={$topJobsQuery.data.topUser.map((tu) => scrambleNames ? scramble(tu.id) : tu.id)}
+ />
+
+
+
+
+
+ |
+ User |
+ Jobs |
+
+ {#each $topJobsQuery.data.topUser as tu, i}
+
+ |
+
+ {scrambleNames ? scramble(tu.id) : tu.id}
+
+ |
+ {#if tu?.name}
+ {scrambleNames ? scramble(tu.name) : tu.name}
+ {/if}
+ {tu['totalJobs']} |
+
+ {/each}
+
+
+
+
+
+ Top Projects: Jobs
+
+ tp['totalJobs'],
+ )}
+ entities={$topJobsQuery.data.topProjects.map((tp) => scrambleNames ? scramble(tp.id) : tp.id)}
+ />
+
+
+
+
+
+{:else}
+ Cannot render job status charts: No data!
+{/if}
+
+
+
+
+{#if $topNodesQuery.fetching || $nodeStatusQuery.fetching}
+
+{:else if $topNodesQuery.data && $nodeStatusQuery.data}
+
+
+
+
+
+
+
+ Top Users: Nodes
+
+ tu['totalNodes'],
+ )}
+ entities={$topNodesQuery.data.topUser.map((tu) => scrambleNames ? scramble(tu.id) : tu.id)}
+ />
+
+
+
+
+
+ |
+ User |
+ Nodes |
+
+ {#each $topNodesQuery.data.topUser as tu, i}
+
+ |
+
+ {scrambleNames ? scramble(tu.id) : tu.id}
+
+ |
+ {#if tu?.name}
+ {scrambleNames ? scramble(tu.name) : tu.name}
+ {/if}
+ {tu['totalNodes']} |
+
+ {/each}
+
+
+
+
+
+ Top Projects: Nodes
+
+ tp['totalNodes'],
+ )}
+ entities={$topNodesQuery.data.topProjects.map((tp) => scrambleNames ? scramble(tp.id) : tp.id)}
+ />
+
+
+
+
+
+{:else}
+ Cannot render node status charts: No data!
+{/if}
+
+
+
+
+{#if $topAccsQuery.fetching || $nodeStatusQuery.fetching}
+
+{:else if $topAccsQuery.data && $nodeStatusQuery.data}
+
+
+
+
+
+
+
+ Top Users: GPUs
+
+ tu['totalAccs'],
+ )}
+ entities={$topAccsQuery.data.topUser.map((tu) => scrambleNames ? scramble(tu.id) : tu.id)}
+ />
+
+
+
+
+
+ |
+ User |
+ GPUs |
+
+ {#each $topAccsQuery.data.topUser as tu, i}
+
+ |
+
+ {scrambleNames ? scramble(tu.id) : tu.id}
+
+ |
+ {#if tu?.name}
+ {scrambleNames ? scramble(tu.name) : tu.name}
+ {/if}
+ {tu['totalAccs']} |
+
+ {/each}
+
+
+
+
+
+ Top Projects: GPUs
+
+ tp['totalAccs'],
+ )}
+ entities={$topAccsQuery.data.topProjects.map((tp) => scrambleNames ? scramble(tp.id) : tp.id)}
+ />
+
+
+
+
+
+{:else}
+ Cannot render accelerator status charts: No data!
+{/if}
\ No newline at end of file
|