mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-27 05:49:04 +01:00
Merge branch 'master' into refactor-job-repository
This commit is contained in:
commit
02752f52e4
@ -10,7 +10,6 @@ type Job {
|
|||||||
jobId: Int!
|
jobId: Int!
|
||||||
user: String!
|
user: String!
|
||||||
project: String!
|
project: String!
|
||||||
jobName: String
|
|
||||||
cluster: String!
|
cluster: String!
|
||||||
subCluster: String!
|
subCluster: String!
|
||||||
startTime: Time!
|
startTime: Time!
|
||||||
@ -286,7 +285,7 @@ type HistoPoint {
|
|||||||
|
|
||||||
type JobsStatistics {
|
type JobsStatistics {
|
||||||
id: ID! # If `groupBy` was used, ID of the user/project/cluster
|
id: ID! # If `groupBy` was used, ID of the user/project/cluster
|
||||||
name: String # if User-Statistics: Given Name of Account (ID) Owner
|
name: String! # if User-Statistics: Given Name of Account (ID) Owner
|
||||||
totalJobs: Int! # Number of jobs that matched
|
totalJobs: Int! # Number of jobs that matched
|
||||||
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
||||||
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||||
|
@ -90,7 +90,6 @@ type ComplexityRoot struct {
|
|||||||
Exclusive func(childComplexity int) int
|
Exclusive func(childComplexity int) int
|
||||||
ID func(childComplexity int) int
|
ID func(childComplexity int) int
|
||||||
JobID func(childComplexity int) int
|
JobID func(childComplexity int) int
|
||||||
JobName func(childComplexity int) int
|
|
||||||
MetaData func(childComplexity int) int
|
MetaData func(childComplexity int) int
|
||||||
MonitoringStatus func(childComplexity int) int
|
MonitoringStatus func(childComplexity int) int
|
||||||
NumAcc func(childComplexity int) int
|
NumAcc func(childComplexity int) int
|
||||||
@ -287,8 +286,6 @@ type ClusterResolver interface {
|
|||||||
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
|
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
|
||||||
}
|
}
|
||||||
type JobResolver interface {
|
type JobResolver interface {
|
||||||
JobName(ctx context.Context, obj *schema.Job) (*string, error)
|
|
||||||
|
|
||||||
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
|
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
|
||||||
|
|
||||||
ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error)
|
ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error)
|
||||||
@ -489,13 +486,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||||||
|
|
||||||
return e.complexity.Job.JobID(childComplexity), true
|
return e.complexity.Job.JobID(childComplexity), true
|
||||||
|
|
||||||
case "Job.jobName":
|
|
||||||
if e.complexity.Job.JobName == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.complexity.Job.JobName(childComplexity), true
|
|
||||||
|
|
||||||
case "Job.metaData":
|
case "Job.metaData":
|
||||||
if e.complexity.Job.MetaData == nil {
|
if e.complexity.Job.MetaData == nil {
|
||||||
break
|
break
|
||||||
@ -1498,7 +1488,6 @@ type Job {
|
|||||||
jobId: Int!
|
jobId: Int!
|
||||||
user: String!
|
user: String!
|
||||||
project: String!
|
project: String!
|
||||||
jobName: String
|
|
||||||
cluster: String!
|
cluster: String!
|
||||||
subCluster: String!
|
subCluster: String!
|
||||||
startTime: Time!
|
startTime: Time!
|
||||||
@ -1774,7 +1763,7 @@ type HistoPoint {
|
|||||||
|
|
||||||
type JobsStatistics {
|
type JobsStatistics {
|
||||||
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
|
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
|
||||||
name: String # if User-Statistics: Given Name of Account (ID) Owner
|
name: String! # if User-Statistics: Given Name of Account (ID) Owner
|
||||||
totalJobs: Int! # Number of jobs that matched
|
totalJobs: Int! # Number of jobs that matched
|
||||||
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
||||||
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||||
@ -3177,47 +3166,6 @@ func (ec *executionContext) fieldContext_Job_project(ctx context.Context, field
|
|||||||
return fc, nil
|
return fc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *executionContext) _Job_jobName(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
|
||||||
fc, err := ec.fieldContext_Job_jobName(ctx, field)
|
|
||||||
if err != nil {
|
|
||||||
return graphql.Null
|
|
||||||
}
|
|
||||||
ctx = graphql.WithFieldContext(ctx, fc)
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
ec.Error(ctx, ec.Recover(ctx, r))
|
|
||||||
ret = graphql.Null
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
|
||||||
ctx = rctx // use context from middleware stack in children
|
|
||||||
return ec.resolvers.Job().JobName(rctx, obj)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
ec.Error(ctx, err)
|
|
||||||
return graphql.Null
|
|
||||||
}
|
|
||||||
if resTmp == nil {
|
|
||||||
return graphql.Null
|
|
||||||
}
|
|
||||||
res := resTmp.(*string)
|
|
||||||
fc.Result = res
|
|
||||||
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ec *executionContext) fieldContext_Job_jobName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
|
||||||
fc = &graphql.FieldContext{
|
|
||||||
Object: "Job",
|
|
||||||
Field: field,
|
|
||||||
IsMethod: true,
|
|
||||||
IsResolver: true,
|
|
||||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
|
||||||
return nil, errors.New("field of type String does not have child fields")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return fc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
||||||
fc, err := ec.fieldContext_Job_cluster(ctx, field)
|
fc, err := ec.fieldContext_Job_cluster(ctx, field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -4636,8 +4584,6 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context
|
|||||||
return ec.fieldContext_Job_user(ctx, field)
|
return ec.fieldContext_Job_user(ctx, field)
|
||||||
case "project":
|
case "project":
|
||||||
return ec.fieldContext_Job_project(ctx, field)
|
return ec.fieldContext_Job_project(ctx, field)
|
||||||
case "jobName":
|
|
||||||
return ec.fieldContext_Job_jobName(ctx, field)
|
|
||||||
case "cluster":
|
case "cluster":
|
||||||
return ec.fieldContext_Job_cluster(ctx, field)
|
return ec.fieldContext_Job_cluster(ctx, field)
|
||||||
case "subCluster":
|
case "subCluster":
|
||||||
@ -4871,11 +4817,14 @@ func (ec *executionContext) _JobsStatistics_name(ctx context.Context, field grap
|
|||||||
return graphql.Null
|
return graphql.Null
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
|
ec.Errorf(ctx, "must not be null")
|
||||||
|
}
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
}
|
}
|
||||||
res := resTmp.(*string)
|
res := resTmp.(string)
|
||||||
fc.Result = res
|
fc.Result = res
|
||||||
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
return ec.marshalNString2string(ctx, field.Selections, res)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *executionContext) fieldContext_JobsStatistics_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
func (ec *executionContext) fieldContext_JobsStatistics_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
@ -6635,8 +6584,6 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr
|
|||||||
return ec.fieldContext_Job_user(ctx, field)
|
return ec.fieldContext_Job_user(ctx, field)
|
||||||
case "project":
|
case "project":
|
||||||
return ec.fieldContext_Job_project(ctx, field)
|
return ec.fieldContext_Job_project(ctx, field)
|
||||||
case "jobName":
|
|
||||||
return ec.fieldContext_Job_jobName(ctx, field)
|
|
||||||
case "cluster":
|
case "cluster":
|
||||||
return ec.fieldContext_Job_cluster(ctx, field)
|
return ec.fieldContext_Job_cluster(ctx, field)
|
||||||
case "subCluster":
|
case "subCluster":
|
||||||
@ -11711,23 +11658,6 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj
|
|||||||
if out.Values[i] == graphql.Null {
|
if out.Values[i] == graphql.Null {
|
||||||
atomic.AddUint32(&invalids, 1)
|
atomic.AddUint32(&invalids, 1)
|
||||||
}
|
}
|
||||||
case "jobName":
|
|
||||||
field := field
|
|
||||||
|
|
||||||
innerFunc := func(ctx context.Context) (res graphql.Marshaler) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
ec.Error(ctx, ec.Recover(ctx, r))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
res = ec._Job_jobName(ctx, field, obj)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
out.Concurrently(i, func() graphql.Marshaler {
|
|
||||||
return innerFunc(ctx)
|
|
||||||
|
|
||||||
})
|
|
||||||
case "cluster":
|
case "cluster":
|
||||||
|
|
||||||
out.Values[i] = ec._Job_cluster(ctx, field, obj)
|
out.Values[i] = ec._Job_cluster(ctx, field, obj)
|
||||||
@ -12125,6 +12055,9 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti
|
|||||||
|
|
||||||
out.Values[i] = ec._JobsStatistics_name(ctx, field, obj)
|
out.Values[i] = ec._JobsStatistics_name(ctx, field, obj)
|
||||||
|
|
||||||
|
if out.Values[i] == graphql.Null {
|
||||||
|
invalids++
|
||||||
|
}
|
||||||
case "totalJobs":
|
case "totalJobs":
|
||||||
|
|
||||||
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
|
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
|
||||||
|
@ -88,7 +88,7 @@ type JobResultList struct {
|
|||||||
|
|
||||||
type JobsStatistics struct {
|
type JobsStatistics struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Name *string `json:"name"`
|
Name string `json:"name"`
|
||||||
TotalJobs int `json:"totalJobs"`
|
TotalJobs int `json:"totalJobs"`
|
||||||
ShortJobs int `json:"shortJobs"`
|
ShortJobs int `json:"shortJobs"`
|
||||||
TotalWalltime int `json:"totalWalltime"`
|
TotalWalltime int `json:"totalWalltime"`
|
||||||
|
@ -27,11 +27,6 @@ func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) (
|
|||||||
return r.Repo.Partitions(obj.Name)
|
return r.Repo.Partitions(obj.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobName is the resolver for the jobName field.
|
|
||||||
func (r *jobResolver) JobName(ctx context.Context, obj *schema.Job) (*string, error) {
|
|
||||||
return r.Repo.FetchJobName(obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tags is the resolver for the tags field.
|
// Tags is the resolver for the tags field.
|
||||||
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
||||||
return r.Repo.GetTags(&obj.ID)
|
return r.Repo.GetTags(&obj.ID)
|
||||||
|
@ -123,6 +123,9 @@ func (r *JobRepository) Flush() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case "mysql":
|
case "mysql":
|
||||||
|
if _, err = r.DB.Exec(`SET FOREIGN_KEY_CHECKS = 0`); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if _, err = r.DB.Exec(`TRUNCATE TABLE jobtag`); err != nil {
|
if _, err = r.DB.Exec(`TRUNCATE TABLE jobtag`); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -132,6 +135,9 @@ func (r *JobRepository) Flush() error {
|
|||||||
if _, err = r.DB.Exec(`TRUNCATE TABLE job`); err != nil {
|
if _, err = r.DB.Exec(`TRUNCATE TABLE job`); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if _, err = r.DB.Exec(`SET FOREIGN_KEY_CHECKS = 1`); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -148,39 +154,6 @@ func scanJobLink(row interface{ Scan(...interface{}) error }) (*model.JobLink, e
|
|||||||
return jobLink, nil
|
return jobLink, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) FetchJobName(job *schema.Job) (*string, error) {
|
|
||||||
start := time.Now()
|
|
||||||
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
|
||||||
if cached := r.cache.Get(cachekey, nil); cached != nil {
|
|
||||||
job.MetaData = cached.(map[string]string)
|
|
||||||
if jobName := job.MetaData["jobName"]; jobName != "" {
|
|
||||||
return &jobName, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(job.RawMetaData) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
|
||||||
log.Infof("Timer FetchJobName %s", time.Since(start))
|
|
||||||
|
|
||||||
if jobName := job.MetaData["jobName"]; jobName != "" {
|
|
||||||
return &jobName, nil
|
|
||||||
} else {
|
|
||||||
return new(string), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
|
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
||||||
@ -594,9 +567,18 @@ func (r *JobRepository) FindColumnValue(user *auth.User, searchterm string, tabl
|
|||||||
query = "%" + searchterm + "%"
|
query = "%" + searchterm + "%"
|
||||||
}
|
}
|
||||||
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||||
err := sq.Select(table+"."+selectColumn).Distinct().From(table).
|
theQuery := sq.Select(table+"."+selectColumn).Distinct().From(table).
|
||||||
Where(table+"."+whereColumn+compareStr, query).
|
Where(table+"."+whereColumn+compareStr, query)
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&result)
|
|
||||||
|
// theSql, args, theErr := theQuery.ToSql()
|
||||||
|
// if theErr != nil {
|
||||||
|
// log.Warn("Error while converting query to sql")
|
||||||
|
// return "", err
|
||||||
|
// }
|
||||||
|
// log.Debugf("SQL query (FindColumnValue): `%s`, args: %#v", theSql, args)
|
||||||
|
|
||||||
|
err := theQuery.RunWith(r.stmtCache).QueryRow().Scan(&result)
|
||||||
|
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
return "", err
|
return "", err
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
|
@ -48,13 +48,6 @@ CREATE TABLE IF NOT EXISTS jobtag (
|
|||||||
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
||||||
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS configuration (
|
|
||||||
username varchar(255),
|
|
||||||
confkey varchar(255),
|
|
||||||
value varchar(255),
|
|
||||||
PRIMARY KEY (username, confkey),
|
|
||||||
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS user (
|
CREATE TABLE IF NOT EXISTS user (
|
||||||
username varchar(255) PRIMARY KEY NOT NULL,
|
username varchar(255) PRIMARY KEY NOT NULL,
|
||||||
password varchar(255) DEFAULT NULL,
|
password varchar(255) DEFAULT NULL,
|
||||||
@ -62,3 +55,12 @@ CREATE TABLE IF NOT EXISTS user (
|
|||||||
name varchar(255) DEFAULT NULL,
|
name varchar(255) DEFAULT NULL,
|
||||||
roles varchar(255) NOT NULL DEFAULT "[]",
|
roles varchar(255) NOT NULL DEFAULT "[]",
|
||||||
email varchar(255) DEFAULT NULL);
|
email varchar(255) DEFAULT NULL);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS configuration (
|
||||||
|
username varchar(255),
|
||||||
|
confkey varchar(255),
|
||||||
|
value varchar(255),
|
||||||
|
PRIMARY KEY (username, confkey),
|
||||||
|
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
||||||
|
|
||||||
|
|
||||||
|
@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE job
|
||||||
|
MODIFY `partition` VARCHAR(255) NOT NULL,
|
||||||
|
MODIFY array_job_id BIGINT NOT NULL,
|
||||||
|
MODIFY num_hwthreads INT NOT NULL,
|
||||||
|
MODIFY num_acc INT NOT NULL;
|
@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE job
|
||||||
|
MODIFY `partition` VARCHAR(255),
|
||||||
|
MODIFY array_job_id BIGINT,
|
||||||
|
MODIFY num_hwthreads INT,
|
||||||
|
MODIFY num_acc INT;
|
@ -42,13 +42,6 @@ PRIMARY KEY (job_id, tag_id),
|
|||||||
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
||||||
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS configuration (
|
|
||||||
username varchar(255),
|
|
||||||
confkey varchar(255),
|
|
||||||
value varchar(255),
|
|
||||||
PRIMARY KEY (username, confkey),
|
|
||||||
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS user (
|
CREATE TABLE IF NOT EXISTS user (
|
||||||
username varchar(255) PRIMARY KEY NOT NULL,
|
username varchar(255) PRIMARY KEY NOT NULL,
|
||||||
password varchar(255) DEFAULT NULL,
|
password varchar(255) DEFAULT NULL,
|
||||||
@ -56,3 +49,12 @@ ldap tinyint NOT NULL DEFAULT 0, /* col called "ldap" for historic reas
|
|||||||
name varchar(255) DEFAULT NULL,
|
name varchar(255) DEFAULT NULL,
|
||||||
roles varchar(255) NOT NULL DEFAULT "[]",
|
roles varchar(255) NOT NULL DEFAULT "[]",
|
||||||
email varchar(255) DEFAULT NULL);
|
email varchar(255) DEFAULT NULL);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS configuration (
|
||||||
|
username varchar(255),
|
||||||
|
confkey varchar(255),
|
||||||
|
value varchar(255),
|
||||||
|
PRIMARY KEY (username, confkey),
|
||||||
|
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
||||||
|
|
||||||
|
|
||||||
|
@ -25,12 +25,13 @@
|
|||||||
filterPresets.startTime = { from: hourAgo.toISOString(), to: now.toISOString() }
|
filterPresets.startTime = { from: hourAgo.toISOString(), to: now.toISOString() }
|
||||||
}
|
}
|
||||||
|
|
||||||
let cluster
|
let cluster;
|
||||||
let filters
|
let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
|
||||||
let rooflineMaxY
|
let jobFilters = [];
|
||||||
let colWidth
|
let rooflineMaxY;
|
||||||
let numBins = 50
|
let colWidth;
|
||||||
let maxY = -1
|
let numBins = 50;
|
||||||
|
let maxY = -1;
|
||||||
const ccconfig = getContext('cc-config')
|
const ccconfig = getContext('cc-config')
|
||||||
const metricConfig = getContext('metrics')
|
const metricConfig = getContext('metrics')
|
||||||
|
|
||||||
@ -54,8 +55,8 @@
|
|||||||
$: statsQuery = queryStore({
|
$: statsQuery = queryStore({
|
||||||
client: client,
|
client: client,
|
||||||
query: gql`
|
query: gql`
|
||||||
query($filters: [JobFilter!]!) {
|
query($jobFilters: [JobFilter!]!) {
|
||||||
stats: jobsStatistics(filter: $filters) {
|
stats: jobsStatistics(filter: $jobFilters) {
|
||||||
totalJobs
|
totalJobs
|
||||||
shortJobs
|
shortJobs
|
||||||
totalWalltime
|
totalWalltime
|
||||||
@ -67,34 +68,34 @@
|
|||||||
topUsers: jobsCount(filter: $filters, groupBy: USER, weight: NODE_HOURS, limit: 5) { name, count }
|
topUsers: jobsCount(filter: $filters, groupBy: USER, weight: NODE_HOURS, limit: 5) { name, count }
|
||||||
}
|
}
|
||||||
`,
|
`,
|
||||||
variables: { filters }
|
variables: { jobFilters }
|
||||||
})
|
})
|
||||||
|
|
||||||
$: footprintsQuery = queryStore({
|
$: footprintsQuery = queryStore({
|
||||||
client: client,
|
client: client,
|
||||||
query: gql`
|
query: gql`
|
||||||
query($filters: [JobFilter!]!, $metrics: [String!]!) {
|
query($jobFilters: [JobFilter!]!, $metrics: [String!]!) {
|
||||||
footprints: jobsFootprints(filter: $filters, metrics: $metrics) {
|
footprints: jobsFootprints(filter: $jobFilters, metrics: $metrics) {
|
||||||
nodehours,
|
nodehours,
|
||||||
metrics { metric, data }
|
metrics { metric, data }
|
||||||
}
|
}
|
||||||
}`,
|
}`,
|
||||||
variables: { filters, metrics }
|
variables: { jobFilters, metrics }
|
||||||
})
|
})
|
||||||
|
|
||||||
$: rooflineQuery = queryStore({
|
$: rooflineQuery = queryStore({
|
||||||
client: client,
|
client: client,
|
||||||
query: gql`
|
query: gql`
|
||||||
query($filters: [JobFilter!]!, $rows: Int!, $cols: Int!,
|
query($jobFilters: [JobFilter!]!, $rows: Int!, $cols: Int!,
|
||||||
$minX: Float!, $minY: Float!, $maxX: Float!, $maxY: Float!) {
|
$minX: Float!, $minY: Float!, $maxX: Float!, $maxY: Float!) {
|
||||||
rooflineHeatmap(filter: $filters, rows: $rows, cols: $cols,
|
rooflineHeatmap(filter: $jobFilters, rows: $rows, cols: $cols,
|
||||||
minX: $minX, minY: $minY, maxX: $maxX, maxY: $maxY)
|
minX: $minX, minY: $minY, maxX: $maxX, maxY: $maxY)
|
||||||
}
|
}
|
||||||
`,
|
`,
|
||||||
variables: { filters, rows: 50, cols: 50, minX: 0.01, minY: 1., maxX: 1000., maxY }
|
variables: { jobFilters, rows: 50, cols: 50, minX: 0.01, minY: 1., maxX: 1000., maxY }
|
||||||
})
|
})
|
||||||
|
|
||||||
onMount(() => filters.update())
|
onMount(() => filterComponent.update())
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Row>
|
<Row>
|
||||||
@ -115,12 +116,12 @@
|
|||||||
</Col>
|
</Col>
|
||||||
<Col xs="auto">
|
<Col xs="auto">
|
||||||
<Filters
|
<Filters
|
||||||
bind:this={filters}
|
bind:this={filterComponent}
|
||||||
filterPresets={filterPresets}
|
filterPresets={filterPresets}
|
||||||
disableClusterSelection={true}
|
disableClusterSelection={true}
|
||||||
startTimeQuickSelect={true}
|
startTimeQuickSelect={true}
|
||||||
on:update={({ detail }) => {
|
on:update={({ detail }) => {
|
||||||
filters = detail.filters;
|
jobFilters = detail.filters;
|
||||||
}} />
|
}} />
|
||||||
</Col>
|
</Col>
|
||||||
</Row>
|
</Row>
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
export let authlevel
|
export let authlevel
|
||||||
export let roles
|
export let roles
|
||||||
|
|
||||||
let filters = []
|
let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
|
||||||
let jobList, matchedJobs = null
|
let jobList, matchedJobs = null
|
||||||
let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false, isMetricsSelectionOpen = false
|
let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false, isMetricsSelectionOpen = false
|
||||||
let metrics = filterPresets.cluster
|
let metrics = filterPresets.cluster
|
||||||
@ -25,12 +25,10 @@
|
|||||||
: ccconfig.plot_list_selectedMetrics
|
: ccconfig.plot_list_selectedMetrics
|
||||||
let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null
|
let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null
|
||||||
|
|
||||||
$: selectedCluster = filters[0]?.cluster ? filters[0].cluster.eq : null
|
|
||||||
|
|
||||||
// The filterPresets are handled by the Filters component,
|
// The filterPresets are handled by the Filters component,
|
||||||
// so we need to wait for it to be ready before we can start a query.
|
// so we need to wait for it to be ready before we can start a query.
|
||||||
// This is also why JobList component starts out with a paused query.
|
// This is also why JobList component starts out with a paused query.
|
||||||
onMount(() => filters.update())
|
onMount(() => filterComponent.update())
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Row>
|
<Row>
|
||||||
@ -61,15 +59,16 @@
|
|||||||
<Col xs="auto">
|
<Col xs="auto">
|
||||||
<Filters
|
<Filters
|
||||||
filterPresets={filterPresets}
|
filterPresets={filterPresets}
|
||||||
bind:this={filters}
|
bind:this={filterComponent}
|
||||||
on:update={({ detail }) => {
|
on:update={({ detail }) => {
|
||||||
filters = detail.filters
|
selectedCluster = detail.filters[0]?.cluster ? detail.filters[0].cluster.eq : null
|
||||||
jobList.update(detail.filters)}
|
jobList.update(detail.filters)
|
||||||
|
}
|
||||||
} />
|
} />
|
||||||
</Col>
|
</Col>
|
||||||
|
|
||||||
<Col xs="3" style="margin-left: auto;">
|
<Col xs="3" style="margin-left: auto;">
|
||||||
<UserOrProject bind:authlevel={authlevel} bind:roles={roles} on:update={({ detail }) => filters.update(detail)}/>
|
<UserOrProject bind:authlevel={authlevel} bind:roles={roles} on:update={({ detail }) => filterComponent.update(detail)}/>
|
||||||
</Col>
|
</Col>
|
||||||
<Col xs="2">
|
<Col xs="2">
|
||||||
<Refresher on:reload={() => jobList.refresh()} />
|
<Refresher on:reload={() => jobList.refresh()} />
|
||||||
|
@ -29,12 +29,17 @@
|
|||||||
"Invalid list type provided!"
|
"Invalid list type provided!"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
|
||||||
|
let jobFilters = [];
|
||||||
|
let nameFilter = "";
|
||||||
|
let sorting = { field: "totalJobs", direction: "down" };
|
||||||
|
|
||||||
const client = getContextClient();
|
const client = getContextClient();
|
||||||
$: stats = queryStore({
|
$: stats = queryStore({
|
||||||
client: client,
|
client: client,
|
||||||
query: gql`
|
query: gql`
|
||||||
query($filters: [JobFilter!]!) {
|
query($jobFilters: [JobFilter!]!) {
|
||||||
rows: jobsStatistics(filter: $filters, groupBy: ${type}) {
|
rows: jobsStatistics(filter: $jobFilters, groupBy: ${type}) {
|
||||||
id
|
id
|
||||||
name
|
name
|
||||||
totalJobs
|
totalJobs
|
||||||
@ -42,13 +47,9 @@
|
|||||||
totalCoreHours
|
totalCoreHours
|
||||||
}
|
}
|
||||||
}`,
|
}`,
|
||||||
variables: { filters }
|
variables: { jobFilters }
|
||||||
});
|
});
|
||||||
|
|
||||||
let filters;
|
|
||||||
let nameFilter = "";
|
|
||||||
let sorting = { field: "totalJobs", direction: "down" };
|
|
||||||
|
|
||||||
function changeSorting(event, field) {
|
function changeSorting(event, field) {
|
||||||
let target = event.target;
|
let target = event.target;
|
||||||
while (target.tagName != "BUTTON") target = target.parentElement;
|
while (target.tagName != "BUTTON") target = target.parentElement;
|
||||||
@ -73,7 +74,7 @@
|
|||||||
return stats.filter((u) => u.id.includes(nameFilter)).sort(cmp);
|
return stats.filter((u) => u.id.includes(nameFilter)).sort(cmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
onMount(() => filters.update());
|
onMount(() => filterComponent.update());
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Row>
|
<Row>
|
||||||
@ -93,12 +94,12 @@
|
|||||||
</Col>
|
</Col>
|
||||||
<Col xs="auto">
|
<Col xs="auto">
|
||||||
<Filters
|
<Filters
|
||||||
bind:this={filters}
|
bind:this={filterComponent}
|
||||||
{filterPresets}
|
{filterPresets}
|
||||||
startTimeQuickSelect={true}
|
startTimeQuickSelect={true}
|
||||||
menuText="Only {type.toLowerCase()}s with jobs that match the filters will show up"
|
menuText="Only {type.toLowerCase()}s with jobs that match the filters will show up"
|
||||||
on:update={({ detail }) => {
|
on:update={({ detail }) => {
|
||||||
filters = detail.filters;
|
jobFilters = detail.filters;
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
</Col>
|
</Col>
|
||||||
|
@ -18,8 +18,9 @@
|
|||||||
export let user
|
export let user
|
||||||
export let filterPresets
|
export let filterPresets
|
||||||
|
|
||||||
let filters = []
|
let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
|
||||||
let jobList
|
let jobList;
|
||||||
|
let jobFilters = [];
|
||||||
let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false
|
let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false
|
||||||
let metrics = ccconfig.plot_list_selectedMetrics, isMetricsSelectionOpen = false
|
let metrics = ccconfig.plot_list_selectedMetrics, isMetricsSelectionOpen = false
|
||||||
let w1, w2, histogramHeight = 250
|
let w1, w2, histogramHeight = 250
|
||||||
@ -29,8 +30,8 @@
|
|||||||
$: stats = queryStore({
|
$: stats = queryStore({
|
||||||
client: client,
|
client: client,
|
||||||
query: gql`
|
query: gql`
|
||||||
query($filters: [JobFilter!]!) {
|
query($jobFilters: [JobFilter!]!) {
|
||||||
jobsStatistics(filter: $filters) {
|
jobsStatistics(filter: $jobFilters) {
|
||||||
totalJobs
|
totalJobs
|
||||||
shortJobs
|
shortJobs
|
||||||
totalWalltime
|
totalWalltime
|
||||||
@ -38,12 +39,10 @@
|
|||||||
histDuration { count, value }
|
histDuration { count, value }
|
||||||
histNumNodes { count, value }
|
histNumNodes { count, value }
|
||||||
}}`,
|
}}`,
|
||||||
variables: { filters }
|
variables: { jobFilters }
|
||||||
})
|
})
|
||||||
|
|
||||||
$: selectedCluster = filters[0]?.cluster ? filters[0].cluster.eq : null
|
onMount(() => filterComponent.update())
|
||||||
|
|
||||||
onMount(() => filters.update())
|
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Row>
|
<Row>
|
||||||
@ -74,10 +73,10 @@
|
|||||||
<Filters
|
<Filters
|
||||||
filterPresets={filterPresets}
|
filterPresets={filterPresets}
|
||||||
startTimeQuickSelect={true}
|
startTimeQuickSelect={true}
|
||||||
bind:this={filters}
|
bind:this={filterComponent}
|
||||||
on:update={({ detail }) => {
|
on:update={({ detail }) => {
|
||||||
let jobFilters = [...detail.filters, { user: { eq: user.username } }]
|
jobFilters = [...detail.filters, { user: { eq: user.username } }]
|
||||||
filters = jobFilters
|
selectedCluster = jobFilters[0]?.cluster ? jobFilters[0].cluster.eq : null
|
||||||
jobList.update(jobFilters)
|
jobList.update(jobFilters)
|
||||||
}} />
|
}} />
|
||||||
</Col>
|
</Col>
|
||||||
|
@ -47,7 +47,6 @@
|
|||||||
jobId
|
jobId
|
||||||
user
|
user
|
||||||
project
|
project
|
||||||
jobName
|
|
||||||
cluster
|
cluster
|
||||||
subCluster
|
subCluster
|
||||||
startTime
|
startTime
|
||||||
|
Loading…
Reference in New Issue
Block a user