diff --git a/api/schema.graphqls b/api/schema.graphqls
index c9aa2a8..b7c16ce 100644
--- a/api/schema.graphqls
+++ b/api/schema.graphqls
@@ -10,7 +10,6 @@ type Job {
jobId: Int!
user: String!
project: String!
- jobName: String
cluster: String!
subCluster: String!
startTime: Time!
@@ -286,7 +285,7 @@ type HistoPoint {
type JobsStatistics {
id: ID! # If `groupBy` was used, ID of the user/project/cluster
- name: String # if User-Statistics: Given Name of Account (ID) Owner
+ name: String! # if User-Statistics: Given Name of Account (ID) Owner
totalJobs: Int! # Number of jobs that matched
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go
index a791e6c..907526e 100644
--- a/internal/graph/generated/generated.go
+++ b/internal/graph/generated/generated.go
@@ -90,7 +90,6 @@ type ComplexityRoot struct {
Exclusive func(childComplexity int) int
ID func(childComplexity int) int
JobID func(childComplexity int) int
- JobName func(childComplexity int) int
MetaData func(childComplexity int) int
MonitoringStatus func(childComplexity int) int
NumAcc func(childComplexity int) int
@@ -287,8 +286,6 @@ type ClusterResolver interface {
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
}
type JobResolver interface {
- JobName(ctx context.Context, obj *schema.Job) (*string, error)
-
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error)
@@ -489,13 +486,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Job.JobID(childComplexity), true
- case "Job.jobName":
- if e.complexity.Job.JobName == nil {
- break
- }
-
- return e.complexity.Job.JobName(childComplexity), true
-
case "Job.metaData":
if e.complexity.Job.MetaData == nil {
break
@@ -1498,7 +1488,6 @@ type Job {
jobId: Int!
user: String!
project: String!
- jobName: String
cluster: String!
subCluster: String!
startTime: Time!
@@ -1774,7 +1763,7 @@ type HistoPoint {
type JobsStatistics {
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
- name: String # if User-Statistics: Given Name of Account (ID) Owner
+ name: String! # if User-Statistics: Given Name of Account (ID) Owner
totalJobs: Int! # Number of jobs that matched
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
@@ -3177,47 +3166,6 @@ func (ec *executionContext) fieldContext_Job_project(ctx context.Context, field
return fc, nil
}
-func (ec *executionContext) _Job_jobName(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_Job_jobName(ctx, field)
- if err != nil {
- return graphql.Null
- }
- ctx = graphql.WithFieldContext(ctx, fc)
- defer func() {
- if r := recover(); r != nil {
- ec.Error(ctx, ec.Recover(ctx, r))
- ret = graphql.Null
- }
- }()
- resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
- ctx = rctx // use context from middleware stack in children
- return ec.resolvers.Job().JobName(rctx, obj)
- })
- if err != nil {
- ec.Error(ctx, err)
- return graphql.Null
- }
- if resTmp == nil {
- return graphql.Null
- }
- res := resTmp.(*string)
- fc.Result = res
- return ec.marshalOString2áš–string(ctx, field.Selections, res)
-}
-
-func (ec *executionContext) fieldContext_Job_jobName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
- fc = &graphql.FieldContext{
- Object: "Job",
- Field: field,
- IsMethod: true,
- IsResolver: true,
- Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
- return nil, errors.New("field of type String does not have child fields")
- },
- }
- return fc, nil
-}
-
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Job_cluster(ctx, field)
if err != nil {
@@ -4636,8 +4584,6 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context
return ec.fieldContext_Job_user(ctx, field)
case "project":
return ec.fieldContext_Job_project(ctx, field)
- case "jobName":
- return ec.fieldContext_Job_jobName(ctx, field)
case "cluster":
return ec.fieldContext_Job_cluster(ctx, field)
case "subCluster":
@@ -4871,11 +4817,14 @@ func (ec *executionContext) _JobsStatistics_name(ctx context.Context, field grap
return graphql.Null
}
if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
return graphql.Null
}
- res := resTmp.(*string)
+ res := resTmp.(string)
fc.Result = res
- return ec.marshalOString2áš–string(ctx, field.Selections, res)
+ return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobsStatistics_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -6635,8 +6584,6 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr
return ec.fieldContext_Job_user(ctx, field)
case "project":
return ec.fieldContext_Job_project(ctx, field)
- case "jobName":
- return ec.fieldContext_Job_jobName(ctx, field)
case "cluster":
return ec.fieldContext_Job_cluster(ctx, field)
case "subCluster":
@@ -11711,23 +11658,6 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
- case "jobName":
- field := field
-
- innerFunc := func(ctx context.Context) (res graphql.Marshaler) {
- defer func() {
- if r := recover(); r != nil {
- ec.Error(ctx, ec.Recover(ctx, r))
- }
- }()
- res = ec._Job_jobName(ctx, field, obj)
- return res
- }
-
- out.Concurrently(i, func() graphql.Marshaler {
- return innerFunc(ctx)
-
- })
case "cluster":
out.Values[i] = ec._Job_cluster(ctx, field, obj)
@@ -12125,6 +12055,9 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti
out.Values[i] = ec._JobsStatistics_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ invalids++
+ }
case "totalJobs":
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go
index 263db8f..552204e 100644
--- a/internal/graph/model/models_gen.go
+++ b/internal/graph/model/models_gen.go
@@ -88,7 +88,7 @@ type JobResultList struct {
type JobsStatistics struct {
ID string `json:"id"`
- Name *string `json:"name"`
+ Name string `json:"name"`
TotalJobs int `json:"totalJobs"`
ShortJobs int `json:"shortJobs"`
TotalWalltime int `json:"totalWalltime"`
diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go
index c881a78..91deb6d 100644
--- a/internal/graph/schema.resolvers.go
+++ b/internal/graph/schema.resolvers.go
@@ -27,11 +27,6 @@ func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) (
return r.Repo.Partitions(obj.Name)
}
-// JobName is the resolver for the jobName field.
-func (r *jobResolver) JobName(ctx context.Context, obj *schema.Job) (*string, error) {
- return r.Repo.FetchJobName(obj)
-}
-
// Tags is the resolver for the tags field.
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
return r.Repo.GetTags(&obj.ID)
diff --git a/internal/repository/job.go b/internal/repository/job.go
index 1756740..ece960a 100644
--- a/internal/repository/job.go
+++ b/internal/repository/job.go
@@ -123,6 +123,9 @@ func (r *JobRepository) Flush() error {
return err
}
case "mysql":
+ if _, err = r.DB.Exec(`SET FOREIGN_KEY_CHECKS = 0`); err != nil {
+ return err
+ }
if _, err = r.DB.Exec(`TRUNCATE TABLE jobtag`); err != nil {
return err
}
@@ -132,6 +135,9 @@ func (r *JobRepository) Flush() error {
if _, err = r.DB.Exec(`TRUNCATE TABLE job`); err != nil {
return err
}
+ if _, err = r.DB.Exec(`SET FOREIGN_KEY_CHECKS = 1`); err != nil {
+ return err
+ }
}
return nil
@@ -148,39 +154,6 @@ func scanJobLink(row interface{ Scan(...interface{}) error }) (*model.JobLink, e
return jobLink, nil
}
-func (r *JobRepository) FetchJobName(job *schema.Job) (*string, error) {
- start := time.Now()
- cachekey := fmt.Sprintf("metadata:%d", job.ID)
- if cached := r.cache.Get(cachekey, nil); cached != nil {
- job.MetaData = cached.(map[string]string)
- if jobName := job.MetaData["jobName"]; jobName != "" {
- return &jobName, nil
- }
- }
-
- if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
- RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
- return nil, err
- }
-
- if len(job.RawMetaData) == 0 {
- return nil, nil
- }
-
- if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
- return nil, err
- }
-
- r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
- log.Infof("Timer FetchJobName %s", time.Since(start))
-
- if jobName := job.MetaData["jobName"]; jobName != "" {
- return &jobName, nil
- } else {
- return new(string), nil
- }
-}
-
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
start := time.Now()
cachekey := fmt.Sprintf("metadata:%d", job.ID)
@@ -594,9 +567,18 @@ func (r *JobRepository) FindColumnValue(user *auth.User, searchterm string, tabl
query = "%" + searchterm + "%"
}
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
- err := sq.Select(table+"."+selectColumn).Distinct().From(table).
- Where(table+"."+whereColumn+compareStr, query).
- RunWith(r.stmtCache).QueryRow().Scan(&result)
+ theQuery := sq.Select(table+"."+selectColumn).Distinct().From(table).
+ Where(table+"."+whereColumn+compareStr, query)
+
+ // theSql, args, theErr := theQuery.ToSql()
+ // if theErr != nil {
+ // log.Warn("Error while converting query to sql")
+ // return "", err
+ // }
+ // log.Debugf("SQL query (FindColumnValue): `%s`, args: %#v", theSql, args)
+
+ err := theQuery.RunWith(r.stmtCache).QueryRow().Scan(&result)
+
if err != nil && err != sql.ErrNoRows {
return "", err
} else if err == nil {
diff --git a/internal/repository/migrations/mysql/01_init-schema.up.sql b/internal/repository/migrations/mysql/01_init-schema.up.sql
index d3369fc..3a6930c 100644
--- a/internal/repository/migrations/mysql/01_init-schema.up.sql
+++ b/internal/repository/migrations/mysql/01_init-schema.up.sql
@@ -48,13 +48,6 @@ CREATE TABLE IF NOT EXISTS jobtag (
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
-CREATE TABLE IF NOT EXISTS configuration (
- username varchar(255),
- confkey varchar(255),
- value varchar(255),
- PRIMARY KEY (username, confkey),
- FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
-
CREATE TABLE IF NOT EXISTS user (
username varchar(255) PRIMARY KEY NOT NULL,
password varchar(255) DEFAULT NULL,
@@ -62,3 +55,12 @@ CREATE TABLE IF NOT EXISTS user (
name varchar(255) DEFAULT NULL,
roles varchar(255) NOT NULL DEFAULT "[]",
email varchar(255) DEFAULT NULL);
+
+CREATE TABLE IF NOT EXISTS configuration (
+ username varchar(255),
+ confkey varchar(255),
+ value varchar(255),
+ PRIMARY KEY (username, confkey),
+ FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
+
+
diff --git a/internal/repository/migrations/mysql/04_alter-table-job.down.sql b/internal/repository/migrations/mysql/04_alter-table-job.down.sql
new file mode 100644
index 0000000..ebc7454
--- /dev/null
+++ b/internal/repository/migrations/mysql/04_alter-table-job.down.sql
@@ -0,0 +1,5 @@
+ALTER TABLE job
+ MODIFY `partition` VARCHAR(255) NOT NULL,
+ MODIFY array_job_id BIGINT NOT NULL,
+ MODIFY num_hwthreads INT NOT NULL,
+ MODIFY num_acc INT NOT NULL;
diff --git a/internal/repository/migrations/mysql/04_alter-table-job.up.sql b/internal/repository/migrations/mysql/04_alter-table-job.up.sql
new file mode 100644
index 0000000..9fe7620
--- /dev/null
+++ b/internal/repository/migrations/mysql/04_alter-table-job.up.sql
@@ -0,0 +1,5 @@
+ALTER TABLE job
+ MODIFY `partition` VARCHAR(255),
+ MODIFY array_job_id BIGINT,
+ MODIFY num_hwthreads INT,
+ MODIFY num_acc INT;
diff --git a/internal/repository/migrations/sqlite3/01_init-schema.up.sql b/internal/repository/migrations/sqlite3/01_init-schema.up.sql
index 646fa69..a3aa4ae 100644
--- a/internal/repository/migrations/sqlite3/01_init-schema.up.sql
+++ b/internal/repository/migrations/sqlite3/01_init-schema.up.sql
@@ -42,13 +42,6 @@ PRIMARY KEY (job_id, tag_id),
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
-CREATE TABLE IF NOT EXISTS configuration (
-username varchar(255),
-confkey varchar(255),
-value varchar(255),
-PRIMARY KEY (username, confkey),
-FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
-
CREATE TABLE IF NOT EXISTS user (
username varchar(255) PRIMARY KEY NOT NULL,
password varchar(255) DEFAULT NULL,
@@ -56,3 +49,12 @@ ldap tinyint NOT NULL DEFAULT 0, /* col called "ldap" for historic reas
name varchar(255) DEFAULT NULL,
roles varchar(255) NOT NULL DEFAULT "[]",
email varchar(255) DEFAULT NULL);
+
+CREATE TABLE IF NOT EXISTS configuration (
+username varchar(255),
+confkey varchar(255),
+value varchar(255),
+PRIMARY KEY (username, confkey),
+FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
+
+
diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte
index 59492fc..b0dd704 100644
--- a/web/frontend/src/Analysis.root.svelte
+++ b/web/frontend/src/Analysis.root.svelte
@@ -25,12 +25,13 @@
filterPresets.startTime = { from: hourAgo.toISOString(), to: now.toISOString() }
}
- let cluster
- let filters
- let rooflineMaxY
- let colWidth
- let numBins = 50
- let maxY = -1
+ let cluster;
+ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
+ let jobFilters = [];
+ let rooflineMaxY;
+ let colWidth;
+ let numBins = 50;
+ let maxY = -1;
const ccconfig = getContext('cc-config')
const metricConfig = getContext('metrics')
@@ -54,8 +55,8 @@
$: statsQuery = queryStore({
client: client,
query: gql`
- query($filters: [JobFilter!]!) {
- stats: jobsStatistics(filter: $filters) {
+ query($jobFilters: [JobFilter!]!) {
+ stats: jobsStatistics(filter: $jobFilters) {
totalJobs
shortJobs
totalWalltime
@@ -67,34 +68,34 @@
topUsers: jobsCount(filter: $filters, groupBy: USER, weight: NODE_HOURS, limit: 5) { name, count }
}
`,
- variables: { filters }
+ variables: { jobFilters }
})
$: footprintsQuery = queryStore({
client: client,
query: gql`
- query($filters: [JobFilter!]!, $metrics: [String!]!) {
- footprints: jobsFootprints(filter: $filters, metrics: $metrics) {
+ query($jobFilters: [JobFilter!]!, $metrics: [String!]!) {
+ footprints: jobsFootprints(filter: $jobFilters, metrics: $metrics) {
nodehours,
metrics { metric, data }
}
}`,
- variables: { filters, metrics }
+ variables: { jobFilters, metrics }
})
$: rooflineQuery = queryStore({
client: client,
query: gql`
- query($filters: [JobFilter!]!, $rows: Int!, $cols: Int!,
+ query($jobFilters: [JobFilter!]!, $rows: Int!, $cols: Int!,
$minX: Float!, $minY: Float!, $maxX: Float!, $maxY: Float!) {
- rooflineHeatmap(filter: $filters, rows: $rows, cols: $cols,
+ rooflineHeatmap(filter: $jobFilters, rows: $rows, cols: $cols,
minX: $minX, minY: $minY, maxX: $maxX, maxY: $maxY)
}
`,
- variables: { filters, rows: 50, cols: 50, minX: 0.01, minY: 1., maxX: 1000., maxY }
+ variables: { jobFilters, rows: 50, cols: 50, minX: 0.01, minY: 1., maxX: 1000., maxY }
})
- onMount(() => filters.update())
+ onMount(() => filterComponent.update())
@@ -115,12 +116,12 @@
{
- filters = detail.filters;
+ jobFilters = detail.filters;
}} />
diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte
index b96576b..07094b8 100644
--- a/web/frontend/src/Jobs.root.svelte
+++ b/web/frontend/src/Jobs.root.svelte
@@ -17,7 +17,7 @@
export let authlevel
export let roles
- let filters = []
+ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
let jobList, matchedJobs = null
let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false, isMetricsSelectionOpen = false
let metrics = filterPresets.cluster
@@ -25,12 +25,10 @@
: ccconfig.plot_list_selectedMetrics
let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null
- $: selectedCluster = filters[0]?.cluster ? filters[0].cluster.eq : null
-
// The filterPresets are handled by the Filters component,
// so we need to wait for it to be ready before we can start a query.
// This is also why JobList component starts out with a paused query.
- onMount(() => filters.update())
+ onMount(() => filterComponent.update())
@@ -61,15 +59,16 @@
{
- filters = detail.filters
- jobList.update(detail.filters)}
+ selectedCluster = detail.filters[0]?.cluster ? detail.filters[0].cluster.eq : null
+ jobList.update(detail.filters)
+ }
} />
- filters.update(detail)}/>
+ filterComponent.update(detail)}/>
jobList.refresh()} />
diff --git a/web/frontend/src/List.root.svelte b/web/frontend/src/List.root.svelte
index 371c743..b219f35 100644
--- a/web/frontend/src/List.root.svelte
+++ b/web/frontend/src/List.root.svelte
@@ -29,12 +29,17 @@
"Invalid list type provided!"
);
+ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
+ let jobFilters = [];
+ let nameFilter = "";
+ let sorting = { field: "totalJobs", direction: "down" };
+
const client = getContextClient();
$: stats = queryStore({
client: client,
query: gql`
- query($filters: [JobFilter!]!) {
- rows: jobsStatistics(filter: $filters, groupBy: ${type}) {
+ query($jobFilters: [JobFilter!]!) {
+ rows: jobsStatistics(filter: $jobFilters, groupBy: ${type}) {
id
name
totalJobs
@@ -42,13 +47,9 @@
totalCoreHours
}
}`,
- variables: { filters }
+ variables: { jobFilters }
});
- let filters;
- let nameFilter = "";
- let sorting = { field: "totalJobs", direction: "down" };
-
function changeSorting(event, field) {
let target = event.target;
while (target.tagName != "BUTTON") target = target.parentElement;
@@ -73,7 +74,7 @@
return stats.filter((u) => u.id.includes(nameFilter)).sort(cmp);
}
- onMount(() => filters.update());
+ onMount(() => filterComponent.update());
@@ -93,12 +94,12 @@
{
- filters = detail.filters;
+ jobFilters = detail.filters;
}}
/>
diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte
index 7aa6317..03d6a3d 100644
--- a/web/frontend/src/User.root.svelte
+++ b/web/frontend/src/User.root.svelte
@@ -18,8 +18,9 @@
export let user
export let filterPresets
- let filters = []
- let jobList
+ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
+ let jobList;
+ let jobFilters = [];
let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false
let metrics = ccconfig.plot_list_selectedMetrics, isMetricsSelectionOpen = false
let w1, w2, histogramHeight = 250
@@ -29,8 +30,8 @@
$: stats = queryStore({
client: client,
query: gql`
- query($filters: [JobFilter!]!) {
- jobsStatistics(filter: $filters) {
+ query($jobFilters: [JobFilter!]!) {
+ jobsStatistics(filter: $jobFilters) {
totalJobs
shortJobs
totalWalltime
@@ -38,12 +39,10 @@
histDuration { count, value }
histNumNodes { count, value }
}}`,
- variables: { filters }
+ variables: { jobFilters }
})
- $: selectedCluster = filters[0]?.cluster ? filters[0].cluster.eq : null
-
- onMount(() => filters.update())
+ onMount(() => filterComponent.update())
@@ -74,10 +73,10 @@
{
- let jobFilters = [...detail.filters, { user: { eq: user.username } }]
- filters = jobFilters
+ jobFilters = [...detail.filters, { user: { eq: user.username } }]
+ selectedCluster = jobFilters[0]?.cluster ? jobFilters[0].cluster.eq : null
jobList.update(jobFilters)
}} />
diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte
index 68e1a76..02caf3f 100644
--- a/web/frontend/src/joblist/JobList.svelte
+++ b/web/frontend/src/joblist/JobList.svelte
@@ -47,7 +47,6 @@
jobId
user
project
- jobName
cluster
subCluster
startTime