mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-11-10 08:57:25 +01:00
Add additional job stats, fix test
This commit is contained in:
parent
5ba6f0ed3a
commit
616095fe66
@ -145,6 +145,7 @@ type ComplexityRoot struct {
|
||||
Name func(childComplexity int) int
|
||||
RunningJobs func(childComplexity int) int
|
||||
ShortJobs func(childComplexity int) int
|
||||
TotalAccHours func(childComplexity int) int
|
||||
TotalCoreHours func(childComplexity int) int
|
||||
TotalJobs func(childComplexity int) int
|
||||
TotalNodeHours func(childComplexity int) int
|
||||
@ -747,6 +748,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
||||
|
||||
return e.complexity.JobsStatistics.ShortJobs(childComplexity), true
|
||||
|
||||
case "JobsStatistics.totalAccHours":
|
||||
if e.complexity.JobsStatistics.TotalAccHours == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.JobsStatistics.TotalAccHours(childComplexity), true
|
||||
|
||||
case "JobsStatistics.totalCoreHours":
|
||||
if e.complexity.JobsStatistics.TotalCoreHours == nil {
|
||||
break
|
||||
@ -1786,6 +1794,7 @@ type JobsStatistics {
|
||||
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||
totalNodeHours: Int! # Sum of the node hours of all matched jobs
|
||||
totalCoreHours: Int! # Sum of the core hours of all matched jobs
|
||||
totalAccHours: Int! # Sum of the gpu hours of all matched jobs
|
||||
histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
|
||||
histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
|
||||
}
|
||||
@ -5122,6 +5131,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_totalCoreHours(ctx conte
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _JobsStatistics_totalAccHours(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_JobsStatistics_totalAccHours(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.TotalAccHours, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(int)
|
||||
fc.Result = res
|
||||
return ec.marshalNInt2int(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_JobsStatistics_totalAccHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "JobsStatistics",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Int does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _JobsStatistics_histDuration(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_JobsStatistics_histDuration(ctx, field)
|
||||
if err != nil {
|
||||
@ -6983,6 +7036,8 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex
|
||||
return ec.fieldContext_JobsStatistics_totalNodeHours(ctx, field)
|
||||
case "totalCoreHours":
|
||||
return ec.fieldContext_JobsStatistics_totalCoreHours(ctx, field)
|
||||
case "totalAccHours":
|
||||
return ec.fieldContext_JobsStatistics_totalAccHours(ctx, field)
|
||||
case "histDuration":
|
||||
return ec.fieldContext_JobsStatistics_histDuration(ctx, field)
|
||||
case "histNumNodes":
|
||||
@ -12207,6 +12262,13 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti
|
||||
|
||||
out.Values[i] = ec._JobsStatistics_totalCoreHours(ctx, field, obj)
|
||||
|
||||
if out.Values[i] == graphql.Null {
|
||||
invalids++
|
||||
}
|
||||
case "totalAccHours":
|
||||
|
||||
out.Values[i] = ec._JobsStatistics_totalAccHours(ctx, field, obj)
|
||||
|
||||
if out.Values[i] == graphql.Null {
|
||||
invalids++
|
||||
}
|
||||
|
@ -95,6 +95,7 @@ type JobsStatistics struct {
|
||||
TotalWalltime int `json:"totalWalltime"`
|
||||
TotalNodeHours int `json:"totalNodeHours"`
|
||||
TotalCoreHours int `json:"totalCoreHours"`
|
||||
TotalAccHours int `json:"totalAccHours"`
|
||||
HistDuration []*HistoPoint `json:"histDuration"`
|
||||
HistNumNodes []*HistoPoint `json:"histNumNodes"`
|
||||
}
|
||||
|
@ -61,18 +61,20 @@ func (r *JobRepository) buildStatsQuery(
|
||||
castType := r.getCastType()
|
||||
|
||||
if col != "" {
|
||||
// Scan columns: id, totalJobs, totalWalltime, totalNodeHours, totalCoreHours
|
||||
// Scan columns: id, totalJobs, totalWalltime, totalNodeHours, totalCoreHours, totalAccHours
|
||||
query = sq.Select(col, "COUNT(job.id)",
|
||||
fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType),
|
||||
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s)", castType),
|
||||
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s)", castType),
|
||||
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s)", castType),
|
||||
).From("job").GroupBy(col)
|
||||
} else {
|
||||
// Scan columns: totalJobs, totalWalltime, totalNodeHours, totalCoreHours
|
||||
// Scan columns: totalJobs, totalWalltime, totalNodeHours, totalCoreHours, totalAccHours
|
||||
query = sq.Select("COUNT(job.id)",
|
||||
fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType),
|
||||
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s)", castType),
|
||||
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s)", castType),
|
||||
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s)", castType),
|
||||
).From("job")
|
||||
}
|
||||
|
||||
@ -131,27 +133,40 @@ func (r *JobRepository) JobsStatsGrouped(
|
||||
|
||||
for rows.Next() {
|
||||
var id sql.NullString
|
||||
var jobs, walltime, nodeHours, coreHours sql.NullInt64
|
||||
if err := rows.Scan(&id, &jobs, &walltime, &nodeHours, &coreHours); err != nil {
|
||||
var jobs, walltime, nodeHours, coreHours, accHours sql.NullInt64
|
||||
if err := rows.Scan(&id, &jobs, &walltime, &nodeHours, &coreHours, &accHours); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if id.Valid {
|
||||
var totalCoreHours, totalAccHours int
|
||||
|
||||
if coreHours.Valid {
|
||||
totalCoreHours = int(coreHours.Int64)
|
||||
}
|
||||
if accHours.Valid {
|
||||
totalAccHours = int(accHours.Int64)
|
||||
}
|
||||
|
||||
if col == "job.user" {
|
||||
name := r.getUserName(ctx, id.String)
|
||||
stats = append(stats,
|
||||
&model.JobsStatistics{
|
||||
ID: id.String,
|
||||
Name: name,
|
||||
TotalJobs: int(jobs.Int64),
|
||||
TotalWalltime: int(walltime.Int64)})
|
||||
ID: id.String,
|
||||
Name: name,
|
||||
TotalJobs: int(jobs.Int64),
|
||||
TotalWalltime: int(walltime.Int64),
|
||||
TotalCoreHours: totalCoreHours,
|
||||
TotalAccHours: totalAccHours})
|
||||
} else {
|
||||
stats = append(stats,
|
||||
&model.JobsStatistics{
|
||||
ID: id.String,
|
||||
TotalJobs: int(jobs.Int64),
|
||||
TotalWalltime: int(walltime.Int64)})
|
||||
ID: id.String,
|
||||
TotalJobs: int(jobs.Int64),
|
||||
TotalWalltime: int(walltime.Int64),
|
||||
TotalCoreHours: totalCoreHours,
|
||||
TotalAccHours: totalAccHours})
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -174,17 +189,27 @@ func (r *JobRepository) JobsStats(
|
||||
row := query.RunWith(r.DB).QueryRow()
|
||||
stats := make([]*model.JobsStatistics, 0, 1)
|
||||
|
||||
var jobs, walltime, nodeHours, coreHours sql.NullInt64
|
||||
if err := row.Scan(&jobs, &walltime, &nodeHours, &coreHours); err != nil {
|
||||
var jobs, walltime, nodeHours, coreHours, accHours sql.NullInt64
|
||||
if err := row.Scan(&jobs, &walltime, &nodeHours, &coreHours, &accHours); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if jobs.Valid {
|
||||
var totalCoreHours, totalAccHours int
|
||||
|
||||
if coreHours.Valid {
|
||||
totalCoreHours = int(coreHours.Int64)
|
||||
}
|
||||
if accHours.Valid {
|
||||
totalAccHours = int(accHours.Int64)
|
||||
}
|
||||
stats = append(stats,
|
||||
&model.JobsStatistics{
|
||||
TotalJobs: int(jobs.Int64),
|
||||
TotalWalltime: int(walltime.Int64)})
|
||||
TotalJobs: int(jobs.Int64),
|
||||
TotalWalltime: int(walltime.Int64),
|
||||
TotalCoreHours: totalCoreHours,
|
||||
TotalAccHours: totalAccHours})
|
||||
}
|
||||
|
||||
log.Infof("Timer JobStatistics %s", time.Since(start))
|
||||
|
@ -11,14 +11,11 @@ import (
|
||||
|
||||
func TestBuildJobStatsQuery(t *testing.T) {
|
||||
r := setup(t)
|
||||
q := r.buildJobsStatsQuery(nil, "USER")
|
||||
q := r.buildStatsQuery(nil, "USER")
|
||||
|
||||
sql, _, err := q.ToSql()
|
||||
noErr(t, err)
|
||||
|
||||
fmt.Printf("SQL: %s\n", sql)
|
||||
|
||||
if 1 != 5 {
|
||||
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1366", 5)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user