mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-11-14 02:37:25 +01:00
Del JobName from Job type, jobStats name required
- Regenerated GQL - Relates to #121 - return jobStats name as string
This commit is contained in:
parent
dbebecfcdb
commit
4e568e60cd
@ -10,7 +10,6 @@ type Job {
|
|||||||
jobId: Int!
|
jobId: Int!
|
||||||
user: String!
|
user: String!
|
||||||
project: String!
|
project: String!
|
||||||
jobName: String
|
|
||||||
cluster: String!
|
cluster: String!
|
||||||
subCluster: String!
|
subCluster: String!
|
||||||
startTime: Time!
|
startTime: Time!
|
||||||
@ -286,7 +285,7 @@ type HistoPoint {
|
|||||||
|
|
||||||
type JobsStatistics {
|
type JobsStatistics {
|
||||||
id: ID! # If `groupBy` was used, ID of the user/project/cluster
|
id: ID! # If `groupBy` was used, ID of the user/project/cluster
|
||||||
name: String # if User-Statistics: Given Name of Account (ID) Owner
|
name: String! # if User-Statistics: Given Name of Account (ID) Owner
|
||||||
totalJobs: Int! # Number of jobs that matched
|
totalJobs: Int! # Number of jobs that matched
|
||||||
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
||||||
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||||
|
@ -90,7 +90,6 @@ type ComplexityRoot struct {
|
|||||||
Exclusive func(childComplexity int) int
|
Exclusive func(childComplexity int) int
|
||||||
ID func(childComplexity int) int
|
ID func(childComplexity int) int
|
||||||
JobID func(childComplexity int) int
|
JobID func(childComplexity int) int
|
||||||
JobName func(childComplexity int) int
|
|
||||||
MetaData func(childComplexity int) int
|
MetaData func(childComplexity int) int
|
||||||
MonitoringStatus func(childComplexity int) int
|
MonitoringStatus func(childComplexity int) int
|
||||||
NumAcc func(childComplexity int) int
|
NumAcc func(childComplexity int) int
|
||||||
@ -287,8 +286,6 @@ type ClusterResolver interface {
|
|||||||
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
|
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
|
||||||
}
|
}
|
||||||
type JobResolver interface {
|
type JobResolver interface {
|
||||||
JobName(ctx context.Context, obj *schema.Job) (*string, error)
|
|
||||||
|
|
||||||
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
|
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
|
||||||
|
|
||||||
ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error)
|
ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error)
|
||||||
@ -489,13 +486,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||||||
|
|
||||||
return e.complexity.Job.JobID(childComplexity), true
|
return e.complexity.Job.JobID(childComplexity), true
|
||||||
|
|
||||||
case "Job.jobName":
|
|
||||||
if e.complexity.Job.JobName == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.complexity.Job.JobName(childComplexity), true
|
|
||||||
|
|
||||||
case "Job.metaData":
|
case "Job.metaData":
|
||||||
if e.complexity.Job.MetaData == nil {
|
if e.complexity.Job.MetaData == nil {
|
||||||
break
|
break
|
||||||
@ -1498,7 +1488,6 @@ type Job {
|
|||||||
jobId: Int!
|
jobId: Int!
|
||||||
user: String!
|
user: String!
|
||||||
project: String!
|
project: String!
|
||||||
jobName: String
|
|
||||||
cluster: String!
|
cluster: String!
|
||||||
subCluster: String!
|
subCluster: String!
|
||||||
startTime: Time!
|
startTime: Time!
|
||||||
@ -1774,7 +1763,7 @@ type HistoPoint {
|
|||||||
|
|
||||||
type JobsStatistics {
|
type JobsStatistics {
|
||||||
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
|
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
|
||||||
name: String # if User-Statistics: Given Name of Account (ID) Owner
|
name: String! # if User-Statistics: Given Name of Account (ID) Owner
|
||||||
totalJobs: Int! # Number of jobs that matched
|
totalJobs: Int! # Number of jobs that matched
|
||||||
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
||||||
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||||
@ -3177,47 +3166,6 @@ func (ec *executionContext) fieldContext_Job_project(ctx context.Context, field
|
|||||||
return fc, nil
|
return fc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *executionContext) _Job_jobName(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
|
||||||
fc, err := ec.fieldContext_Job_jobName(ctx, field)
|
|
||||||
if err != nil {
|
|
||||||
return graphql.Null
|
|
||||||
}
|
|
||||||
ctx = graphql.WithFieldContext(ctx, fc)
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
ec.Error(ctx, ec.Recover(ctx, r))
|
|
||||||
ret = graphql.Null
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
|
||||||
ctx = rctx // use context from middleware stack in children
|
|
||||||
return ec.resolvers.Job().JobName(rctx, obj)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
ec.Error(ctx, err)
|
|
||||||
return graphql.Null
|
|
||||||
}
|
|
||||||
if resTmp == nil {
|
|
||||||
return graphql.Null
|
|
||||||
}
|
|
||||||
res := resTmp.(*string)
|
|
||||||
fc.Result = res
|
|
||||||
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ec *executionContext) fieldContext_Job_jobName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
|
||||||
fc = &graphql.FieldContext{
|
|
||||||
Object: "Job",
|
|
||||||
Field: field,
|
|
||||||
IsMethod: true,
|
|
||||||
IsResolver: true,
|
|
||||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
|
||||||
return nil, errors.New("field of type String does not have child fields")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return fc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
||||||
fc, err := ec.fieldContext_Job_cluster(ctx, field)
|
fc, err := ec.fieldContext_Job_cluster(ctx, field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -4636,8 +4584,6 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context
|
|||||||
return ec.fieldContext_Job_user(ctx, field)
|
return ec.fieldContext_Job_user(ctx, field)
|
||||||
case "project":
|
case "project":
|
||||||
return ec.fieldContext_Job_project(ctx, field)
|
return ec.fieldContext_Job_project(ctx, field)
|
||||||
case "jobName":
|
|
||||||
return ec.fieldContext_Job_jobName(ctx, field)
|
|
||||||
case "cluster":
|
case "cluster":
|
||||||
return ec.fieldContext_Job_cluster(ctx, field)
|
return ec.fieldContext_Job_cluster(ctx, field)
|
||||||
case "subCluster":
|
case "subCluster":
|
||||||
@ -4871,11 +4817,14 @@ func (ec *executionContext) _JobsStatistics_name(ctx context.Context, field grap
|
|||||||
return graphql.Null
|
return graphql.Null
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
|
ec.Errorf(ctx, "must not be null")
|
||||||
|
}
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
}
|
}
|
||||||
res := resTmp.(*string)
|
res := resTmp.(string)
|
||||||
fc.Result = res
|
fc.Result = res
|
||||||
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
return ec.marshalNString2string(ctx, field.Selections, res)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *executionContext) fieldContext_JobsStatistics_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
func (ec *executionContext) fieldContext_JobsStatistics_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
@ -6635,8 +6584,6 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr
|
|||||||
return ec.fieldContext_Job_user(ctx, field)
|
return ec.fieldContext_Job_user(ctx, field)
|
||||||
case "project":
|
case "project":
|
||||||
return ec.fieldContext_Job_project(ctx, field)
|
return ec.fieldContext_Job_project(ctx, field)
|
||||||
case "jobName":
|
|
||||||
return ec.fieldContext_Job_jobName(ctx, field)
|
|
||||||
case "cluster":
|
case "cluster":
|
||||||
return ec.fieldContext_Job_cluster(ctx, field)
|
return ec.fieldContext_Job_cluster(ctx, field)
|
||||||
case "subCluster":
|
case "subCluster":
|
||||||
@ -11711,23 +11658,6 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj
|
|||||||
if out.Values[i] == graphql.Null {
|
if out.Values[i] == graphql.Null {
|
||||||
atomic.AddUint32(&invalids, 1)
|
atomic.AddUint32(&invalids, 1)
|
||||||
}
|
}
|
||||||
case "jobName":
|
|
||||||
field := field
|
|
||||||
|
|
||||||
innerFunc := func(ctx context.Context) (res graphql.Marshaler) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
ec.Error(ctx, ec.Recover(ctx, r))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
res = ec._Job_jobName(ctx, field, obj)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
out.Concurrently(i, func() graphql.Marshaler {
|
|
||||||
return innerFunc(ctx)
|
|
||||||
|
|
||||||
})
|
|
||||||
case "cluster":
|
case "cluster":
|
||||||
|
|
||||||
out.Values[i] = ec._Job_cluster(ctx, field, obj)
|
out.Values[i] = ec._Job_cluster(ctx, field, obj)
|
||||||
@ -12125,6 +12055,9 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti
|
|||||||
|
|
||||||
out.Values[i] = ec._JobsStatistics_name(ctx, field, obj)
|
out.Values[i] = ec._JobsStatistics_name(ctx, field, obj)
|
||||||
|
|
||||||
|
if out.Values[i] == graphql.Null {
|
||||||
|
invalids++
|
||||||
|
}
|
||||||
case "totalJobs":
|
case "totalJobs":
|
||||||
|
|
||||||
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
|
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
|
||||||
|
@ -88,7 +88,7 @@ type JobResultList struct {
|
|||||||
|
|
||||||
type JobsStatistics struct {
|
type JobsStatistics struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Name *string `json:"name"`
|
Name string `json:"name"`
|
||||||
TotalJobs int `json:"totalJobs"`
|
TotalJobs int `json:"totalJobs"`
|
||||||
ShortJobs int `json:"shortJobs"`
|
ShortJobs int `json:"shortJobs"`
|
||||||
TotalWalltime int `json:"totalWalltime"`
|
TotalWalltime int `json:"totalWalltime"`
|
||||||
|
@ -26,11 +26,6 @@ func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) (
|
|||||||
return r.Repo.Partitions(obj.Name)
|
return r.Repo.Partitions(obj.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobName is the resolver for the jobName field.
|
|
||||||
func (r *jobResolver) JobName(ctx context.Context, obj *schema.Job) (*string, error) {
|
|
||||||
return r.Repo.FetchJobName(obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tags is the resolver for the tags field.
|
// Tags is the resolver for the tags field.
|
||||||
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
||||||
return r.Repo.GetTags(&obj.ID)
|
return r.Repo.GetTags(&obj.ID)
|
||||||
@ -38,7 +33,6 @@ func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag,
|
|||||||
|
|
||||||
// ConcurrentJobs is the resolver for the concurrentJobs field.
|
// ConcurrentJobs is the resolver for the concurrentJobs field.
|
||||||
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
|
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
|
||||||
|
|
||||||
exc := int(obj.Exclusive)
|
exc := int(obj.Exclusive)
|
||||||
if exc != 1 {
|
if exc != 1 {
|
||||||
filter := []*model.JobFilter{}
|
filter := []*model.JobFilter{}
|
||||||
|
@ -151,39 +151,6 @@ func scanJobLink(row interface{ Scan(...interface{}) error }) (*model.JobLink, e
|
|||||||
return jobLink, nil
|
return jobLink, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) FetchJobName(job *schema.Job) (*string, error) {
|
|
||||||
start := time.Now()
|
|
||||||
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
|
||||||
if cached := r.cache.Get(cachekey, nil); cached != nil {
|
|
||||||
job.MetaData = cached.(map[string]string)
|
|
||||||
if jobName := job.MetaData["jobName"]; jobName != "" {
|
|
||||||
return &jobName, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(job.RawMetaData) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
|
||||||
log.Infof("Timer FetchJobName %s", time.Since(start))
|
|
||||||
|
|
||||||
if jobName := job.MetaData["jobName"]; jobName != "" {
|
|
||||||
return &jobName, nil
|
|
||||||
} else {
|
|
||||||
return new(string), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
|
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
||||||
@ -597,9 +564,18 @@ func (r *JobRepository) FindColumnValue(user *auth.User, searchterm string, tabl
|
|||||||
query = "%" + searchterm + "%"
|
query = "%" + searchterm + "%"
|
||||||
}
|
}
|
||||||
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||||
err := sq.Select(table+"."+selectColumn).Distinct().From(table).
|
theQuery := sq.Select(table+"."+selectColumn).Distinct().From(table).
|
||||||
Where(table+"."+whereColumn+compareStr, query).
|
Where(table+"."+whereColumn+compareStr, query)
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&result)
|
|
||||||
|
// theSql, args, theErr := theQuery.ToSql()
|
||||||
|
// if theErr != nil {
|
||||||
|
// log.Warn("Error while converting query to sql")
|
||||||
|
// return "", err
|
||||||
|
// }
|
||||||
|
// log.Debugf("SQL query (FindColumnValue): `%s`, args: %#v", theSql, args)
|
||||||
|
|
||||||
|
err := theQuery.RunWith(r.stmtCache).QueryRow().Scan(&result)
|
||||||
|
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
return "", err
|
return "", err
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
@ -913,9 +889,9 @@ func (r *JobRepository) JobsStatistics(ctx context.Context,
|
|||||||
user := auth.GetUser(ctx)
|
user := auth.GetUser(ctx)
|
||||||
name, _ := r.FindColumnValue(user, id, "user", "name", "username", false)
|
name, _ := r.FindColumnValue(user, id, "user", "name", "username", false)
|
||||||
if name != "" {
|
if name != "" {
|
||||||
stats[id].Name = &name
|
stats[id].Name = name
|
||||||
} else {
|
} else {
|
||||||
stats[id].Name = &emptyDash
|
stats[id].Name = emptyDash
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,6 @@
|
|||||||
jobId
|
jobId
|
||||||
user
|
user
|
||||||
project
|
project
|
||||||
jobName
|
|
||||||
cluster
|
cluster
|
||||||
subCluster
|
subCluster
|
||||||
startTime
|
startTime
|
||||||
|
Loading…
Reference in New Issue
Block a user