Merge branch 'dev' into metricstore

This commit is contained in:
Jan Eitzinger
2025-09-10 09:14:50 +02:00
committed by GitHub
26 changed files with 2836 additions and 945 deletions

View File

@@ -113,6 +113,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
for key, vals := range r.URL.Query() {
switch key {
// TODO: add project filter
case "state":
for _, s := range vals {
state := schema.JobState(s)
@@ -125,7 +126,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
}
case "cluster":
filter.Cluster = &model.StringInput{Eq: &vals[0]}
case "start-time":
case "start-time": // ?startTime=1753707480-1754053139
st := strings.Split(vals[0], "-")
if len(st) != 2 {
handleError(fmt.Errorf("invalid query parameter value: startTime"),

View File

@@ -402,7 +402,7 @@ func (auth *Authentication) AuthUserApi(
return
}
case len(user.Roles) >= 2:
if user.HasRole(schema.RoleApi) && user.HasAnyRole([]schema.Role{schema.RoleUser, schema.RoleManager, schema.RoleAdmin}) {
if user.HasRole(schema.RoleApi) && user.HasAnyRole([]schema.Role{schema.RoleUser, schema.RoleManager, schema.RoleSupport, schema.RoleAdmin}) {
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return
@@ -530,6 +530,7 @@ func securedCheck(user *schema.User, r *http.Request) error {
IPAddress = r.RemoteAddr
}
// FIXME: IPV6 not handled
if strings.Contains(IPAddress, ":") {
IPAddress = strings.Split(IPAddress, ":")[0]
}

View File

@@ -202,6 +202,7 @@ type ComplexityRoot struct {
TotalJobs func(childComplexity int) int
TotalNodeHours func(childComplexity int) int
TotalNodes func(childComplexity int) int
TotalUsers func(childComplexity int) int
TotalWalltime func(childComplexity int) int
}
@@ -277,6 +278,7 @@ type ComplexityRoot struct {
ID func(childComplexity int) int
MetaData func(childComplexity int) int
NodeState func(childComplexity int) int
RunningJobs func(childComplexity int) int
SubCluster func(childComplexity int) int
}
@@ -291,7 +293,7 @@ type ComplexityRoot struct {
Items func(childComplexity int) int
}
NodeStats struct {
NodeStates struct {
Count func(childComplexity int) int
State func(childComplexity int) int
}
@@ -319,7 +321,7 @@ type ComplexityRoot struct {
Node func(childComplexity int, id string) int
NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int
NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int
NodeStats func(childComplexity int, filter []*model.NodeFilter) int
NodeStates func(childComplexity int, filter []*model.NodeFilter) int
Nodes func(childComplexity int, filter []*model.NodeFilter, order *model.OrderByInput) int
RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int
ScopedJobStats func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int
@@ -445,6 +447,7 @@ type MutationResolver interface {
UpdateConfiguration(ctx context.Context, name string, value string) (*string, error)
}
type NodeResolver interface {
RunningJobs(ctx context.Context, obj *schema.Node) (int, error)
NodeState(ctx context.Context, obj *schema.Node) (string, error)
HealthState(ctx context.Context, obj *schema.Node) (schema.NodeState, error)
MetaData(ctx context.Context, obj *schema.Node) (any, error)
@@ -457,7 +460,7 @@ type QueryResolver interface {
AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error)
Node(ctx context.Context, id string) (*schema.Node, error)
Nodes(ctx context.Context, filter []*model.NodeFilter, order *model.OrderByInput) (*model.NodeStateResultList, error)
NodeStats(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStats, error)
NodeStates(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStates, error)
Job(ctx context.Context, id string) (*schema.Job, error)
JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error)
JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error)
@@ -1165,6 +1168,13 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.JobsStatistics.TotalNodes(childComplexity), true
case "JobsStatistics.totalUsers":
if e.complexity.JobsStatistics.TotalUsers == nil {
break
}
return e.complexity.JobsStatistics.TotalUsers(childComplexity), true
case "JobsStatistics.totalWalltime":
if e.complexity.JobsStatistics.TotalWalltime == nil {
break
@@ -1475,7 +1485,7 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.Node.Cluster(childComplexity), true
case "Node.HealthState":
case "Node.healthState":
if e.complexity.Node.HealthState == nil {
break
}
@@ -1510,6 +1520,13 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.Node.NodeState(childComplexity), true
case "Node.runningJobs":
if e.complexity.Node.RunningJobs == nil {
break
}
return e.complexity.Node.RunningJobs(childComplexity), true
case "Node.subCluster":
if e.complexity.Node.SubCluster == nil {
break
@@ -1552,19 +1569,19 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.NodeStateResultList.Items(childComplexity), true
case "NodeStats.count":
if e.complexity.NodeStats.Count == nil {
case "NodeStates.count":
if e.complexity.NodeStates.Count == nil {
break
}
return e.complexity.NodeStats.Count(childComplexity), true
return e.complexity.NodeStates.Count(childComplexity), true
case "NodeStats.state":
if e.complexity.NodeStats.State == nil {
case "NodeStates.state":
if e.complexity.NodeStates.State == nil {
break
}
return e.complexity.NodeStats.State(childComplexity), true
return e.complexity.NodeStates.State(childComplexity), true
case "NodesResultList.count":
if e.complexity.NodesResultList.Count == nil {
@@ -1754,17 +1771,17 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin
return e.complexity.Query.NodeMetricsList(childComplexity, args["cluster"].(string), args["subCluster"].(string), args["nodeFilter"].(string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time), args["page"].(*model.PageRequest), args["resolution"].(*int)), true
case "Query.nodeStats":
if e.complexity.Query.NodeStats == nil {
case "Query.nodeStates":
if e.complexity.Query.NodeStates == nil {
break
}
args, err := ec.field_Query_nodeStats_args(ctx, rawArgs)
args, err := ec.field_Query_nodeStates_args(ctx, rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Query.NodeStats(childComplexity, args["filter"].([]*model.NodeFilter)), true
return e.complexity.Query.NodeStates(childComplexity, args["filter"].([]*model.NodeFilter)), true
case "Query.nodes":
if e.complexity.Query.Nodes == nil {
@@ -2334,12 +2351,13 @@ type Node {
hostname: String!
cluster: String!
subCluster: String!
runningJobs: Int!
nodeState: NodeState!
HealthState: MonitoringState!
healthState: MonitoringState!
metaData: Any
}
type NodeStats {
type NodeStates {
state: String!
count: Int!
}
@@ -2558,10 +2576,12 @@ enum Aggregate {
USER
PROJECT
CLUSTER
SUBCLUSTER
}
enum SortByAggregate {
TOTALWALLTIME
TOTALJOBS
TOTALUSERS
TOTALNODES
TOTALNODEHOURS
TOTALCORES
@@ -2622,9 +2642,10 @@ type Query {
user(username: String!): User
allocatedNodes(cluster: String!): [Count!]!
## Node Queries New
node(id: ID!): Node
nodes(filter: [NodeFilter!], order: OrderByInput): NodeStateResultList!
nodeStats(filter: [NodeFilter!]): [NodeStats!]!
nodeStates(filter: [NodeFilter!]): [NodeStates!]!
job(id: ID!): Job
jobMetrics(
@@ -2679,6 +2700,7 @@ type Query {
from: Time!
to: Time!
): [NodeMetrics!]!
nodeMetricsList(
cluster: String!
subCluster: String!
@@ -2715,6 +2737,7 @@ type TimeRangeOutput {
input NodeFilter {
hostname: StringInput
cluster: StringInput
subcluster: StringInput
nodeState: NodeState
healthState: MonitoringState
}
@@ -2819,11 +2842,12 @@ type MetricHistoPoint {
}
type JobsStatistics {
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster/subcluster
name: String! # if User-Statistics: Given Name of Account (ID) Owner
totalUsers: Int! # if *not* User-Statistics: Number of active users (based on running jobs)
totalJobs: Int! # Number of jobs
runningJobs: Int! # Number of running jobs
shortJobs: Int! # Number of jobs with a duration of less than duration
shortJobs: Int! # Number of jobs with a duration of less than config'd ShortRunningJobsDuration
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
totalNodes: Int! # Sum of the nodes of all matched jobs
totalNodeHours: Int! # Sum of the node hours of all matched jobs
@@ -3197,7 +3221,7 @@ func (ec *executionContext) field_Query_nodeMetrics_args(ctx context.Context, ra
return args, nil
}
func (ec *executionContext) field_Query_nodeStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
func (ec *executionContext) field_Query_nodeStates_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
var err error
args := map[string]any{}
arg0, err := graphql.ProcessArgField(ctx, rawArgs, "filter", ec.unmarshalONodeFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeFilterᚄ)
@@ -7125,6 +7149,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_name(_ context.Context,
return fc, nil
}
func (ec *executionContext) _JobsStatistics_totalUsers(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_JobsStatistics_totalUsers(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
ctx = rctx // use context from middleware stack in children
return obj.TotalUsers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobsStatistics_totalUsers(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "JobsStatistics",
Field: field,
IsMethod: false,
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Int does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _JobsStatistics_totalJobs(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_JobsStatistics_totalJobs(ctx, field)
if err != nil {
@@ -9788,6 +9856,50 @@ func (ec *executionContext) fieldContext_Node_subCluster(_ context.Context, fiel
return fc, nil
}
func (ec *executionContext) _Node_runningJobs(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Node_runningJobs(ctx, field)
if err != nil {
return graphql.Null
}
ctx = graphql.WithFieldContext(ctx, fc)
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Node().RunningJobs(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Node_runningJobs(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Node",
Field: field,
IsMethod: true,
IsResolver: true,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Int does not have child fields")
},
}
return fc, nil
}
func (ec *executionContext) _Node_nodeState(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Node_nodeState(ctx, field)
if err != nil {
@@ -9832,8 +9944,8 @@ func (ec *executionContext) fieldContext_Node_nodeState(_ context.Context, field
return fc, nil
}
func (ec *executionContext) _Node_HealthState(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Node_HealthState(ctx, field)
func (ec *executionContext) _Node_healthState(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Node_healthState(ctx, field)
if err != nil {
return graphql.Null
}
@@ -9863,7 +9975,7 @@ func (ec *executionContext) _Node_HealthState(ctx context.Context, field graphql
return ec.marshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Node_HealthState(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
func (ec *executionContext) fieldContext_Node_healthState(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Node",
Field: field,
@@ -10104,10 +10216,12 @@ func (ec *executionContext) fieldContext_NodeStateResultList_items(_ context.Con
return ec.fieldContext_Node_cluster(ctx, field)
case "subCluster":
return ec.fieldContext_Node_subCluster(ctx, field)
case "runningJobs":
return ec.fieldContext_Node_runningJobs(ctx, field)
case "nodeState":
return ec.fieldContext_Node_nodeState(ctx, field)
case "HealthState":
return ec.fieldContext_Node_HealthState(ctx, field)
case "healthState":
return ec.fieldContext_Node_healthState(ctx, field)
case "metaData":
return ec.fieldContext_Node_metaData(ctx, field)
}
@@ -10158,8 +10272,8 @@ func (ec *executionContext) fieldContext_NodeStateResultList_count(_ context.Con
return fc, nil
}
func (ec *executionContext) _NodeStats_state(ctx context.Context, field graphql.CollectedField, obj *model.NodeStats) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_NodeStats_state(ctx, field)
func (ec *executionContext) _NodeStates_state(ctx context.Context, field graphql.CollectedField, obj *model.NodeStates) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_NodeStates_state(ctx, field)
if err != nil {
return graphql.Null
}
@@ -10189,9 +10303,9 @@ func (ec *executionContext) _NodeStats_state(ctx context.Context, field graphql.
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_NodeStats_state(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
func (ec *executionContext) fieldContext_NodeStates_state(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "NodeStats",
Object: "NodeStates",
Field: field,
IsMethod: false,
IsResolver: false,
@@ -10202,8 +10316,8 @@ func (ec *executionContext) fieldContext_NodeStats_state(_ context.Context, fiel
return fc, nil
}
func (ec *executionContext) _NodeStats_count(ctx context.Context, field graphql.CollectedField, obj *model.NodeStats) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_NodeStats_count(ctx, field)
func (ec *executionContext) _NodeStates_count(ctx context.Context, field graphql.CollectedField, obj *model.NodeStates) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_NodeStates_count(ctx, field)
if err != nil {
return graphql.Null
}
@@ -10233,9 +10347,9 @@ func (ec *executionContext) _NodeStats_count(ctx context.Context, field graphql.
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_NodeStats_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
func (ec *executionContext) fieldContext_NodeStates_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "NodeStats",
Object: "NodeStates",
Field: field,
IsMethod: false,
IsResolver: false,
@@ -10830,10 +10944,12 @@ func (ec *executionContext) fieldContext_Query_node(ctx context.Context, field g
return ec.fieldContext_Node_cluster(ctx, field)
case "subCluster":
return ec.fieldContext_Node_subCluster(ctx, field)
case "runningJobs":
return ec.fieldContext_Node_runningJobs(ctx, field)
case "nodeState":
return ec.fieldContext_Node_nodeState(ctx, field)
case "HealthState":
return ec.fieldContext_Node_HealthState(ctx, field)
case "healthState":
return ec.fieldContext_Node_healthState(ctx, field)
case "metaData":
return ec.fieldContext_Node_metaData(ctx, field)
}
@@ -10915,8 +11031,8 @@ func (ec *executionContext) fieldContext_Query_nodes(ctx context.Context, field
return fc, nil
}
func (ec *executionContext) _Query_nodeStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Query_nodeStats(ctx, field)
func (ec *executionContext) _Query_nodeStates(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Query_nodeStates(ctx, field)
if err != nil {
return graphql.Null
}
@@ -10929,7 +11045,7 @@ func (ec *executionContext) _Query_nodeStats(ctx context.Context, field graphql.
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().NodeStats(rctx, fc.Args["filter"].([]*model.NodeFilter))
return ec.resolvers.Query().NodeStates(rctx, fc.Args["filter"].([]*model.NodeFilter))
})
if err != nil {
ec.Error(ctx, err)
@@ -10941,12 +11057,12 @@ func (ec *executionContext) _Query_nodeStats(ctx context.Context, field graphql.
}
return graphql.Null
}
res := resTmp.([]*model.NodeStats)
res := resTmp.([]*model.NodeStates)
fc.Result = res
return ec.marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatsᚄ(ctx, field.Selections, res)
return ec.marshalNNodeStates2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatesᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Query_nodeStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
func (ec *executionContext) fieldContext_Query_nodeStates(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Query",
Field: field,
@@ -10955,11 +11071,11 @@ func (ec *executionContext) fieldContext_Query_nodeStats(ctx context.Context, fi
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
case "state":
return ec.fieldContext_NodeStats_state(ctx, field)
return ec.fieldContext_NodeStates_state(ctx, field)
case "count":
return ec.fieldContext_NodeStats_count(ctx, field)
return ec.fieldContext_NodeStates_count(ctx, field)
}
return nil, fmt.Errorf("no field named %q was found under type NodeStats", field.Name)
return nil, fmt.Errorf("no field named %q was found under type NodeStates", field.Name)
},
}
defer func() {
@@ -10969,7 +11085,7 @@ func (ec *executionContext) fieldContext_Query_nodeStats(ctx context.Context, fi
}
}()
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Query_nodeStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
if fc.Args, err = ec.field_Query_nodeStates_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
return fc, err
}
@@ -11379,6 +11495,8 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex
return ec.fieldContext_JobsStatistics_id(ctx, field)
case "name":
return ec.fieldContext_JobsStatistics_name(ctx, field)
case "totalUsers":
return ec.fieldContext_JobsStatistics_totalUsers(ctx, field)
case "totalJobs":
return ec.fieldContext_JobsStatistics_totalJobs(ctx, field)
case "runningJobs":
@@ -16549,7 +16667,7 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an
asMap[k] = v
}
fieldsInOrder := [...]string{"hostname", "cluster", "nodeState", "healthState"}
fieldsInOrder := [...]string{"hostname", "cluster", "subcluster", "nodeState", "healthState"}
for _, k := range fieldsInOrder {
v, ok := asMap[k]
if !ok {
@@ -16570,6 +16688,13 @@ func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj an
return it, err
}
it.Cluster = data
case "subcluster":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("subcluster"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
if err != nil {
return it, err
}
it.Subcluster = data
case "nodeState":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nodeState"))
data, err := ec.unmarshalONodeState2ᚖstring(ctx, v)
@@ -17976,6 +18101,11 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "totalUsers":
out.Values[i] = ec._JobsStatistics_totalUsers(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "totalJobs":
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
if out.Values[i] == graphql.Null {
@@ -18625,6 +18755,42 @@ func (ec *executionContext) _Node(ctx context.Context, sel ast.SelectionSet, obj
if out.Values[i] == graphql.Null {
atomic.AddUint32(&out.Invalids, 1)
}
case "runningJobs":
field := field
innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Node_runningJobs(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&fs.Invalids, 1)
}
return res
}
if field.Deferrable != nil {
dfs, ok := deferred[field.Deferrable.Label]
di := 0
if ok {
dfs.AddField(field)
di = len(dfs.Values) - 1
} else {
dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
deferred[field.Deferrable.Label] = dfs
}
dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
return innerFunc(ctx, dfs)
})
// don't run the out.Concurrently() call below
out.Values[i] = graphql.Null
continue
}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
case "nodeState":
field := field
@@ -18661,7 +18827,7 @@ func (ec *executionContext) _Node(ctx context.Context, sel ast.SelectionSet, obj
}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
case "HealthState":
case "healthState":
field := field
innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
@@ -18670,7 +18836,7 @@ func (ec *executionContext) _Node(ctx context.Context, sel ast.SelectionSet, obj
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Node_HealthState(ctx, field, obj)
res = ec._Node_healthState(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&fs.Invalids, 1)
}
@@ -18843,24 +19009,24 @@ func (ec *executionContext) _NodeStateResultList(ctx context.Context, sel ast.Se
return out
}
var nodeStatsImplementors = []string{"NodeStats"}
var nodeStatesImplementors = []string{"NodeStates"}
func (ec *executionContext) _NodeStats(ctx context.Context, sel ast.SelectionSet, obj *model.NodeStats) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, nodeStatsImplementors)
func (ec *executionContext) _NodeStates(ctx context.Context, sel ast.SelectionSet, obj *model.NodeStates) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, nodeStatesImplementors)
out := graphql.NewFieldSet(fields)
deferred := make(map[string]*graphql.FieldSet)
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("NodeStats")
out.Values[i] = graphql.MarshalString("NodeStates")
case "state":
out.Values[i] = ec._NodeStats_state(ctx, field, obj)
out.Values[i] = ec._NodeStates_state(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
case "count":
out.Values[i] = ec._NodeStats_count(ctx, field, obj)
out.Values[i] = ec._NodeStates_count(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
@@ -19103,7 +19269,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
case "nodeStats":
case "nodeStates":
field := field
innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
@@ -19112,7 +19278,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Query_nodeStats(ctx, field)
res = ec._Query_nodeStates(ctx, field)
if res == graphql.Null {
atomic.AddUint32(&fs.Invalids, 1)
}
@@ -21756,7 +21922,7 @@ func (ec *executionContext) marshalNNodeStateResultList2ᚖgithubᚗcomᚋCluste
return ec._NodeStateResultList(ctx, sel, v)
}
func (ec *executionContext) marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeStats) graphql.Marshaler {
func (ec *executionContext) marshalNNodeStates2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatesᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeStates) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -21780,7 +21946,7 @@ func (ec *executionContext) marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpi
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNNodeStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStats(ctx, sel, v[i])
ret[i] = ec.marshalNNodeStates2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStates(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -21800,14 +21966,14 @@ func (ec *executionContext) marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpi
return ret
}
func (ec *executionContext) marshalNNodeStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStats(ctx context.Context, sel ast.SelectionSet, v *model.NodeStats) graphql.Marshaler {
func (ec *executionContext) marshalNNodeStates2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStates(ctx context.Context, sel ast.SelectionSet, v *model.NodeStates) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
return ec._NodeStats(ctx, sel, v)
return ec._NodeStates(ctx, sel, v)
}
func (ec *executionContext) marshalNNodesResultList2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v model.NodesResultList) graphql.Marshaler {

View File

@@ -114,6 +114,7 @@ type JobStats struct {
type JobsStatistics struct {
ID string `json:"id"`
Name string `json:"name"`
TotalUsers int `json:"totalUsers"`
TotalJobs int `json:"totalJobs"`
RunningJobs int `json:"runningJobs"`
ShortJobs int `json:"shortJobs"`
@@ -172,6 +173,7 @@ type NamedStatsWithScope struct {
type NodeFilter struct {
Hostname *StringInput `json:"hostname,omitempty"`
Cluster *StringInput `json:"cluster,omitempty"`
Subcluster *StringInput `json:"subcluster,omitempty"`
NodeState *string `json:"nodeState,omitempty"`
HealthState *schema.NodeState `json:"healthState,omitempty"`
}
@@ -187,7 +189,7 @@ type NodeStateResultList struct {
Count *int `json:"count,omitempty"`
}
type NodeStats struct {
type NodeStates struct {
State string `json:"state"`
Count int `json:"count"`
}
@@ -248,20 +250,22 @@ type User struct {
type Aggregate string
const (
AggregateUser Aggregate = "USER"
AggregateProject Aggregate = "PROJECT"
AggregateCluster Aggregate = "CLUSTER"
AggregateUser Aggregate = "USER"
AggregateProject Aggregate = "PROJECT"
AggregateCluster Aggregate = "CLUSTER"
AggregateSubcluster Aggregate = "SUBCLUSTER"
)
var AllAggregate = []Aggregate{
AggregateUser,
AggregateProject,
AggregateCluster,
AggregateSubcluster,
}
func (e Aggregate) IsValid() bool {
switch e {
case AggregateUser, AggregateProject, AggregateCluster:
case AggregateUser, AggregateProject, AggregateCluster, AggregateSubcluster:
return true
}
return false
@@ -307,6 +311,7 @@ type SortByAggregate string
const (
SortByAggregateTotalwalltime SortByAggregate = "TOTALWALLTIME"
SortByAggregateTotaljobs SortByAggregate = "TOTALJOBS"
SortByAggregateTotalusers SortByAggregate = "TOTALUSERS"
SortByAggregateTotalnodes SortByAggregate = "TOTALNODES"
SortByAggregateTotalnodehours SortByAggregate = "TOTALNODEHOURS"
SortByAggregateTotalcores SortByAggregate = "TOTALCORES"
@@ -318,6 +323,7 @@ const (
var AllSortByAggregate = []SortByAggregate{
SortByAggregateTotalwalltime,
SortByAggregateTotaljobs,
SortByAggregateTotalusers,
SortByAggregateTotalnodes,
SortByAggregateTotalnodehours,
SortByAggregateTotalcores,
@@ -328,7 +334,7 @@ var AllSortByAggregate = []SortByAggregate{
func (e SortByAggregate) IsValid() bool {
switch e {
case SortByAggregateTotalwalltime, SortByAggregateTotaljobs, SortByAggregateTotalnodes, SortByAggregateTotalnodehours, SortByAggregateTotalcores, SortByAggregateTotalcorehours, SortByAggregateTotalaccs, SortByAggregateTotalacchours:
case SortByAggregateTotalwalltime, SortByAggregateTotaljobs, SortByAggregateTotalusers, SortByAggregateTotalnodes, SortByAggregateTotalnodehours, SortByAggregateTotalcores, SortByAggregateTotalcorehours, SortByAggregateTotalaccs, SortByAggregateTotalacchours:
return true
}
return false

View File

@@ -305,14 +305,20 @@ func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string,
return nil, nil
}
// NodeState is the resolver for the nodeState field.
func (r *nodeResolver) NodeState(ctx context.Context, obj *schema.Node) (string, error) {
panic(fmt.Errorf("not implemented: NodeState - nodeState"))
// RunningJobs is the resolver for the runningJobs field.
func (r *nodeResolver) RunningJobs(ctx context.Context, obj *schema.Node) (int, error) {
panic(fmt.Errorf("not implemented: RunningJobs - runningJobs"))
}
// HealthState is the resolver for the HealthState field.
// NodeState is the resolver for the nodeState field.
func (r *nodeResolver) NodeState(ctx context.Context, obj *schema.Node) (string, error) {
return string(obj.NodeState), nil
}
// HealthState is the resolver for the healthState field.
func (r *nodeResolver) HealthState(ctx context.Context, obj *schema.Node) (schema.NodeState, error) {
panic(fmt.Errorf("not implemented: HealthState - HealthState"))
// FIXME: Why is Output of schema.NodeState Type?
panic(fmt.Errorf("not implemented: HealthState - healthState"))
}
// MetaData is the resolver for the metaData field.
@@ -378,9 +384,26 @@ func (r *queryResolver) Nodes(ctx context.Context, filter []*model.NodeFilter, o
return &model.NodeStateResultList{Items: nodes, Count: &count}, err
}
// NodeStats is the resolver for the nodeStats field.
func (r *queryResolver) NodeStats(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStats, error) {
panic(fmt.Errorf("not implemented: NodeStats - nodeStats"))
// NodeStates is the resolver for the nodeStates field.
func (r *queryResolver) NodeStates(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStates, error) {
repo := repository.GetNodeRepository()
stateCounts, serr := repo.CountNodeStates(ctx, filter)
if serr != nil {
cclog.Warnf("Error while counting nodeStates: %s", serr.Error())
return nil, serr
}
healthCounts, herr := repo.CountHealthStates(ctx, filter)
if herr != nil {
cclog.Warnf("Error while counting healthStates: %s", herr.Error())
return nil, herr
}
allCounts := make([]*model.NodeStates, 0)
allCounts = append(stateCounts, healthCounts...)
return allCounts, nil
}
// Job is the resolver for the job field.
@@ -558,7 +581,7 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
defaultDurationBins := "1h"
defaultMetricBins := 10
if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
if requireField(ctx, "totalJobs") || requireField(ctx, "totalUsers") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
if groupBy == nil {
stats, err = r.Repo.JobsStats(ctx, filter)

View File

@@ -40,7 +40,7 @@ func InitDB() error {
}
tags := make(map[string]int64)
// Not using log.Print because we want the line to end with `\r` and
// Not using cclog.Print because we want the line to end with `\r` and
// this function is only ever called when a special command line flag
// is passed anyways.
fmt.Printf("%d jobs inserted...\r", 0)

View File

@@ -337,10 +337,10 @@ func (r *JobRepository) FindColumnValue(user *schema.User, searchterm string, ta
// theSql, args, theErr := theQuery.ToSql()
// if theErr != nil {
// log.Warn("Error while converting query to sql")
// cclog.Warn("Error while converting query to sql")
// return "", err
// }
// log.Debugf("SQL query (FindColumnValue): `%s`, args: %#v", theSql, args)
// cclog.Debugf("SQL query (FindColumnValue): `%s`, args: %#v", theSql, args)
err := theQuery.RunWith(r.stmtCache).QueryRow().Scan(&result)

View File

@@ -4,12 +4,13 @@ CREATE TABLE "node" (
hostname VARCHAR(255) NOT NULL,
cluster VARCHAR(255) NOT NULL,
subcluster VARCHAR(255) NOT NULL,
cpus_allocated INTEGER NOT NULL,
cpus_total INTEGER NOT NULL,
memory_allocated INTEGER NOT NULL,
memory_total INTEGER NOT NULL,
gpus_allocated INTEGER NOT NULL,
gpus_total INTEGER NOT NULL,
jobs_running INTEGER DEFAULT 0 NOT NULL,
cpus_allocated INTEGER DEFAULT 0 NOT NULL,
cpus_total INTEGER DEFAULT 0 NOT NULL,
memory_allocated INTEGER DEFAULT 0 NOT NULL,
memory_total INTEGER DEFAULT 0 NOT NULL,
gpus_allocated INTEGER DEFAULT 0 NOT NULL,
gpus_total INTEGER DEFAULT 0 NOT NULL,
node_state VARCHAR(255) NOT NULL
CHECK (node_state IN (
'allocated', 'reserved', 'idle', 'mixed',

View File

@@ -49,6 +49,12 @@ func GetNodeRepository() *NodeRepository {
return nodeRepoInstance
}
var nodeColumns []string = []string{
// "node.id,"
"node.hostname", "node.cluster", "node.subcluster",
"node.node_state", "node.health_state", // "node.meta_data",
}
func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, error) {
start := time.Now()
cachekey := fmt.Sprintf("metadata:%d", node.ID)
@@ -218,9 +224,9 @@ func (r *NodeRepository) DeleteNode(id int64) error {
func (r *NodeRepository) QueryNodes(
ctx context.Context,
filters []*model.NodeFilter,
order *model.OrderByInput,
order *model.OrderByInput, // Currently unused!
) ([]*schema.Node, error) {
query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("node"))
query, qerr := AccessCheck(ctx, sq.Select(nodeColumns...).From("node"))
if qerr != nil {
return nil, qerr
}
@@ -232,6 +238,9 @@ func (r *NodeRepository) QueryNodes(
if f.Cluster != nil {
query = buildStringCondition("node.cluster", f.Cluster, query)
}
if f.Subcluster != nil {
query = buildStringCondition("node.subcluster", f.Subcluster, query)
}
if f.NodeState != nil {
query = query.Where("node.node_state = ?", f.NodeState)
}
@@ -287,3 +296,123 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
return nodeList, nil
}
func (r *NodeRepository) CountNodeStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) {
query, qerr := AccessCheck(ctx, sq.Select("node_state AS state", "count(*) AS count").From("node"))
if qerr != nil {
return nil, qerr
}
for _, f := range filters {
if f.Hostname != nil {
query = buildStringCondition("node.hostname", f.Hostname, query)
}
if f.Cluster != nil {
query = buildStringCondition("node.cluster", f.Cluster, query)
}
if f.Subcluster != nil {
query = buildStringCondition("node.subcluster", f.Subcluster, query)
}
if f.NodeState != nil {
query = query.Where("node.node_state = ?", f.NodeState)
}
if f.HealthState != nil {
query = query.Where("node.health_state = ?", f.HealthState)
}
}
// Add Group and Order
query = query.GroupBy("state").OrderBy("count DESC")
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
queryString, queryVars, _ := query.ToSql()
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
nodes := make([]*model.NodeStates, 0)
for rows.Next() {
node := model.NodeStates{}
if err := rows.Scan(&node.State, &node.Count); err != nil {
rows.Close()
cclog.Warn("Error while scanning rows (NodeStates)")
return nil, err
}
nodes = append(nodes, &node)
}
return nodes, nil
}
func (r *NodeRepository) CountHealthStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) {
query, qerr := AccessCheck(ctx, sq.Select("health_state AS state", "count(*) AS count").From("node"))
if qerr != nil {
return nil, qerr
}
for _, f := range filters {
if f.Hostname != nil {
query = buildStringCondition("node.hostname", f.Hostname, query)
}
if f.Cluster != nil {
query = buildStringCondition("node.cluster", f.Cluster, query)
}
if f.Subcluster != nil {
query = buildStringCondition("node.subcluster", f.Subcluster, query)
}
if f.NodeState != nil {
query = query.Where("node.node_state = ?", f.NodeState)
}
if f.HealthState != nil {
query = query.Where("node.health_state = ?", f.HealthState)
}
}
// Add Group and Order
query = query.GroupBy("state").OrderBy("count DESC")
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
queryString, queryVars, _ := query.ToSql()
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
nodes := make([]*model.NodeStates, 0)
for rows.Next() {
node := model.NodeStates{}
if err := rows.Scan(&node.State, &node.Count); err != nil {
rows.Close()
cclog.Warn("Error while scanning rows (NodeStates)")
return nil, err
}
nodes = append(nodes, &node)
}
return nodes, nil
}
func AccessCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) {
user := GetUserFromContext(ctx)
return AccessCheckWithUser(user, query)
}
func AccessCheckWithUser(user *schema.User, query sq.SelectBuilder) (sq.SelectBuilder, error) {
if user == nil {
var qnil sq.SelectBuilder
return qnil, fmt.Errorf("user context is nil")
}
switch {
// case len(user.Roles) == 1 && user.HasRole(schema.RoleApi): // API-User : Access NodeInfos
// return query, nil
case user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}): // Admin & Support : Access NodeInfos
return query, nil
default: // No known Role: No Access, return error
var qnil sq.SelectBuilder
return qnil, fmt.Errorf("user has no or unknown roles")
}
}

View File

@@ -28,6 +28,7 @@ var groupBy2column = map[model.Aggregate]string{
var sortBy2column = map[model.SortByAggregate]string{
model.SortByAggregateTotaljobs: "totalJobs",
model.SortByAggregateTotalusers: "totalUsers",
model.SortByAggregateTotalwalltime: "totalWalltime",
model.SortByAggregateTotalnodes: "totalNodes",
model.SortByAggregateTotalnodehours: "totalNodeHours",
@@ -76,8 +77,12 @@ func (r *JobRepository) buildStatsQuery(
// fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as %s) as value`, time.Now().Unix(), castType)
if col != "" {
// Scan columns: id, totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
query = sq.Select(col, "COUNT(job.id) as totalJobs", "name",
// Scan columns: id, name, totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
query = sq.Select(
col,
"name",
"COUNT(job.id) as totalJobs",
"COUNT(DISTINCT job.hpc_user) AS totalUsers",
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s) as totalWalltime`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s) as totalNodes`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s) as totalNodeHours`, time.Now().Unix(), castType),
@@ -87,8 +92,10 @@ func (r *JobRepository) buildStatsQuery(
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType),
).From("job").LeftJoin("hpc_user ON hpc_user.username = job.hpc_user").GroupBy(col)
} else {
// Scan columns: totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
query = sq.Select("COUNT(job.id)",
// Scan columns: totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
query = sq.Select(
"COUNT(job.id) as totalJobs",
"COUNT(DISTINCT job.hpc_user) AS totalUsers",
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s)`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s)`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s)`, time.Now().Unix(), castType),
@@ -167,14 +174,14 @@ func (r *JobRepository) JobsStatsGrouped(
for rows.Next() {
var id sql.NullString
var name sql.NullString
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := rows.Scan(&id, &jobs, &name, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
var jobs, users, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := rows.Scan(&id, &name, &jobs, &users, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
cclog.Warn("Error while scanning rows")
return nil, err
}
if id.Valid {
var totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours int
var totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours int
var personName string
if name.Valid {
@@ -185,6 +192,10 @@ func (r *JobRepository) JobsStatsGrouped(
totalJobs = int(jobs.Int64)
}
if users.Valid {
totalUsers = int(users.Int64)
}
if walltime.Valid {
totalWalltime = int(walltime.Int64)
}
@@ -228,8 +239,9 @@ func (r *JobRepository) JobsStatsGrouped(
stats = append(stats,
&model.JobsStatistics{
ID: id.String,
TotalJobs: int(jobs.Int64),
TotalWalltime: int(walltime.Int64),
TotalJobs: totalJobs,
TotalUsers: totalUsers,
TotalWalltime: totalWalltime,
TotalNodes: totalNodes,
TotalNodeHours: totalNodeHours,
TotalCores: totalCores,
@@ -259,8 +271,8 @@ func (r *JobRepository) JobsStats(
row := query.RunWith(r.DB).QueryRow()
stats := make([]*model.JobsStatistics, 0, 1)
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
var jobs, users, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := row.Scan(&jobs, &users, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -280,6 +292,7 @@ func (r *JobRepository) JobsStats(
stats = append(stats,
&model.JobsStatistics{
TotalJobs: int(jobs.Int64),
TotalUsers: int(users.Int64),
TotalWalltime: int(walltime.Int64),
TotalNodeHours: totalNodeHours,
TotalCoreHours: totalCoreHours,