From fc1c54a1410544b5e483318c3ff7980c7a8e8e62 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 21 Nov 2024 14:39:03 +0100 Subject: [PATCH 1/9] fix: use left join to keep unmatched stats query result rows --- internal/repository/stats.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ba7a8aa..f5677ad 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -86,7 +86,7 @@ func (r *JobRepository) buildStatsQuery( fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s) as totalCoreHours`, time.Now().Unix(), castType), fmt.Sprintf(`CAST(SUM(job.num_acc) as %s) as totalAccs`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType), - ).From("job").Join("user ON user.username = job.user").GroupBy(col) + ).From("job").LeftJoin("user ON user.username = job.user").GroupBy(col) } else { // Scan columns: totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours query = sq.Select("COUNT(job.id)", @@ -226,6 +226,8 @@ func (r *JobRepository) JobsStatsGrouped( TotalAccHours: totalAccHours, }) } else { + log.Debugf(">>>> STATS ID %s", id.String) + log.Debugf(">>>> STATS TOTALNODES %d", totalNodes) stats = append(stats, &model.JobsStatistics{ ID: id.String, From 7f43c88a39db4aa75bb53b45190dfd3ffd1f6293 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 21 Nov 2024 14:54:04 +0100 Subject: [PATCH 2/9] Add example config for mariadb backend --- configs/config-mariadb.json | 69 +++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 configs/config-mariadb.json diff --git a/configs/config-mariadb.json b/configs/config-mariadb.json new file mode 100644 index 0000000..e068439 --- /dev/null +++ b/configs/config-mariadb.json @@ -0,0 +1,69 @@ +{ + "addr": "127.0.0.1:8080", + "short-running-jobs-duration": 300, + "archive": { + "kind": "file", + "path": "./var/job-archive" + }, + "jwts": { + "max-age": "2000h" + }, + "db-driver": "mysql", + "db": "clustercockpit:demo@tcp(127.0.0.1:3306)/clustercockpit", + "enable-resampling": { + "trigger": 30, + "resolutions": [ + 600, + 300, + 120, + 60 + ] + }, + "emission-constant": 317, + "clusters": [ + { + "name": "fritz", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "http://localhost:8082", + "token": "" + }, + "filterRanges": { + "numNodes": { + "from": 1, + "to": 64 + }, + "duration": { + "from": 0, + "to": 86400 + }, + "startTime": { + "from": "2022-01-01T00:00:00Z", + "to": null + } + } + }, + { + "name": "alex", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "http://localhost:8082", + "token": "" + }, + "filterRanges": { + "numNodes": { + "from": 1, + "to": 64 + }, + "duration": { + "from": 0, + "to": 86400 + }, + "startTime": { + "from": "2022-01-01T00:00:00Z", + "to": null + } + } + } + ] +} From 35bd7739c647d79be80fd762b2140a08ae79e7fd Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 21 Nov 2024 15:02:30 +0100 Subject: [PATCH 3/9] fix: Replace reserved keywords in database schemas Port migration to mariadb --- internal/repository/job.go | 6 +- internal/repository/jobCreate.go | 4 +- internal/repository/jobFind.go | 2 +- internal/repository/jobQuery.go | 10 +- internal/repository/migration.go | 8 ++ .../mysql/08_add-footprint.down.sql | 21 +++ .../migrations/mysql/08_add-footprint.up.sql | 123 ++++++++++++++++++ .../sqlite3/08_add-footprint.up.sql | 75 ++++++----- internal/repository/stats.go | 8 +- internal/repository/user.go | 26 ++-- pkg/schema/job.go | 4 +- 11 files changed, 222 insertions(+), 65 deletions(-) create mode 100644 internal/repository/migrations/mysql/08_add-footprint.down.sql create mode 100644 internal/repository/migrations/mysql/08_add-footprint.up.sql diff --git a/internal/repository/job.go b/internal/repository/job.go index ab65426..592997e 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -51,7 +51,7 @@ func GetJobRepository() *JobRepository { } var jobColumns []string = []string{ - "job.id", "job.job_id", "job.user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.partition", "job.array_job_id", + "job.id", "job.job_id", "job.hpc_user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.cluster_partition", "job.array_job_id", "job.num_nodes", "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state", "job.duration", "job.walltime", "job.resources", "job.footprint", "job.energy", } @@ -314,7 +314,7 @@ func (r *JobRepository) FindUserOrProjectOrJobname(user *schema.User, searchterm return "", uresult, "", "" } // Find username by name (like) - nresult, _ := r.FindColumnValue(user, searchterm, "user", "username", "name", true) + nresult, _ := r.FindColumnValue(user, searchterm, "hpc_user", "username", "name", true) if nresult != "" { return "", nresult, "", "" } @@ -400,7 +400,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) { start := time.Now() partitions := r.cache.Get("partitions:"+cluster, func() (interface{}, time.Duration, int) { parts := []string{} - if err = r.DB.Select(&parts, `SELECT DISTINCT job.partition FROM job WHERE job.cluster = ?;`, cluster); err != nil { + if err = r.DB.Select(&parts, `SELECT DISTINCT job.cluster_partition FROM job WHERE job.cluster = ?;`, cluster); err != nil { return nil, 0, 1000 } diff --git a/internal/repository/jobCreate.go b/internal/repository/jobCreate.go index 1b05b52..9e47974 100644 --- a/internal/repository/jobCreate.go +++ b/internal/repository/jobCreate.go @@ -14,10 +14,10 @@ import ( ) const NamedJobInsert string = `INSERT INTO job ( - job_id, user, project, cluster, subcluster, ` + "`partition`" + `, array_job_id, num_nodes, num_hwthreads, num_acc, + job_id, hpc_user, project, cluster, subcluster, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, footprint, energy, energy_footprint, resources, meta_data ) VALUES ( - :job_id, :user, :project, :cluster, :subcluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, + :job_id, :hpc_user, :project, :cluster, :subcluster, :cluster_partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :footprint, :energy, :energy_footprint, :resources, :meta_data );` diff --git a/internal/repository/jobFind.go b/internal/repository/jobFind.go index a383eb6..ff5a936 100644 --- a/internal/repository/jobFind.go +++ b/internal/repository/jobFind.go @@ -136,7 +136,7 @@ func (r *JobRepository) IsJobOwner(jobId int64, startTime int64, user string, cl q := sq.Select("id"). From("job"). Where("job.job_id = ?", jobId). - Where("job.user = ?", user). + Where("job.hpc_user = ?", user). Where("job.cluster = ?", cluster). Where("job.start_time = ?", startTime) diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index 5458043..c9812a3 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -121,13 +121,13 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde return query, nil case user.HasRole(schema.RoleManager): // Manager : Add filter for managed projects' jobs only + personal jobs if len(user.Projects) != 0 { - return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.user": user.Username}}), nil + return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.hpc_user": user.Username}}), nil } else { log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username) - return query.Where("job.user = ?", user.Username), nil + return query.Where("job.hpc_user = ?", user.Username), nil } case user.HasRole(schema.RoleUser): // User : Only personal jobs - return query.Where("job.user = ?", user.Username), nil + return query.Where("job.hpc_user = ?", user.Username), nil default: // No known Role, return error var qnil sq.SelectBuilder return qnil, fmt.Errorf("user has no or unknown roles") @@ -147,7 +147,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = query.Where("job.array_job_id = ?", *filter.ArrayJobID) } if filter.User != nil { - query = buildStringCondition("job.user", filter.User, query) + query = buildStringCondition("job.hpc_user", filter.User, query) } if filter.Project != nil { query = buildStringCondition("job.project", filter.Project, query) @@ -159,7 +159,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = buildStringCondition("job.cluster", filter.Cluster, query) } if filter.Partition != nil { - query = buildStringCondition("job.partition", filter.Partition, query) + query = buildStringCondition("job.cluster_partition", filter.Partition, query) } if filter.StartTime != nil { query = buildTimeCondition("job.start_time", filter.StartTime, query) diff --git a/internal/repository/migration.go b/internal/repository/migration.go index 970fbc2..d32a624 100644 --- a/internal/repository/migration.go +++ b/internal/repository/migration.go @@ -114,6 +114,14 @@ func MigrateDB(backend string, db string) error { return err } + v, dirty, err := m.Version() + + log.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version) + + if dirty { + return fmt.Errorf("last migration to version %d has failed, please fix the db manually and force version with -force-db flag", Version) + } + if err := m.Up(); err != nil { if err == migrate.ErrNoChange { log.Info("DB already up to date!") diff --git a/internal/repository/migrations/mysql/08_add-footprint.down.sql b/internal/repository/migrations/mysql/08_add-footprint.down.sql new file mode 100644 index 0000000..8c99eb5 --- /dev/null +++ b/internal/repository/migrations/mysql/08_add-footprint.down.sql @@ -0,0 +1,21 @@ +ALTER TABLE job DROP energy; +ALTER TABLE job DROP energy_footprint; +ALTER TABLE job ADD COLUMN flops_any_avg; +ALTER TABLE job ADD COLUMN mem_bw_avg; +ALTER TABLE job ADD COLUMN mem_used_max; +ALTER TABLE job ADD COLUMN load_avg; +ALTER TABLE job ADD COLUMN net_bw_avg; +ALTER TABLE job ADD COLUMN net_data_vol_total; +ALTER TABLE job ADD COLUMN file_bw_avg; +ALTER TABLE job ADD COLUMN file_data_vol_total; + +UPDATE job SET flops_any_avg = json_extract(footprint, '$.flops_any_avg'); +UPDATE job SET mem_bw_avg = json_extract(footprint, '$.mem_bw_avg'); +UPDATE job SET mem_used_max = json_extract(footprint, '$.mem_used_max'); +UPDATE job SET load_avg = json_extract(footprint, '$.cpu_load_avg'); +UPDATE job SET net_bw_avg = json_extract(footprint, '$.net_bw_avg'); +UPDATE job SET net_data_vol_total = json_extract(footprint, '$.net_data_vol_total'); +UPDATE job SET file_bw_avg = json_extract(footprint, '$.file_bw_avg'); +UPDATE job SET file_data_vol_total = json_extract(footprint, '$.file_data_vol_total'); + +ALTER TABLE job DROP footprint; diff --git a/internal/repository/migrations/mysql/08_add-footprint.up.sql b/internal/repository/migrations/mysql/08_add-footprint.up.sql new file mode 100644 index 0000000..207ccf9 --- /dev/null +++ b/internal/repository/migrations/mysql/08_add-footprint.up.sql @@ -0,0 +1,123 @@ +DROP INDEX IF EXISTS job_stats ON job; +DROP INDEX IF EXISTS job_by_user ON job; +DROP INDEX IF EXISTS job_by_starttime ON job; +DROP INDEX IF EXISTS job_by_job_id ON job; +DROP INDEX IF EXISTS job_list ON job; +DROP INDEX IF EXISTS job_list_user ON job; +DROP INDEX IF EXISTS job_list_users ON job; +DROP INDEX IF EXISTS job_list_users_start ON job; + +ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; +ALTER TABLE job ADD COLUMN energy_footprint JSON; + +ALTER TABLE job ADD COLUMN footprint JSON; +ALTER TABLE tag ADD COLUMN tag_scope TEXT NOT NULL DEFAULT 'global'; + +-- Do not use reserved keywords anymore +RENAME TABLE `user` TO hpc_user; +ALTER TABLE job RENAME COLUMN `user` TO hpc_user; +ALTER TABLE job RENAME COLUMN `partition` TO cluster_partition; + +ALTER TABLE job MODIFY COLUMN cluster VARCHAR(50); +ALTER TABLE job MODIFY COLUMN hpc_user VARCHAR(50); +ALTER TABLE job MODIFY COLUMN subcluster VARCHAR(50); +ALTER TABLE job MODIFY COLUMN project VARCHAR(50); +ALTER TABLE job MODIFY COLUMN cluster_partition VARCHAR(50); +ALTER TABLE job MODIFY COLUMN job_state VARCHAR(25); + +UPDATE job SET footprint = '{"flops_any_avg": 0.0}'; +UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_any_avg); +UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); +UPDATE job SET footprint = json_insert(footprint, '$.mem_used_max', job.mem_used_max); +UPDATE job SET footprint = json_insert(footprint, '$.cpu_load_avg', job.load_avg); +UPDATE job SET footprint = json_insert(footprint, '$.net_bw_avg', job.net_bw_avg) WHERE job.net_bw_avg != 0; +UPDATE job SET footprint = json_insert(footprint, '$.net_data_vol_total', job.net_data_vol_total) WHERE job.net_data_vol_total != 0; +UPDATE job SET footprint = json_insert(footprint, '$.file_bw_avg', job.file_bw_avg) WHERE job.file_bw_avg != 0; +UPDATE job SET footprint = json_insert(footprint, '$.file_data_vol_total', job.file_data_vol_total) WHERE job.file_data_vol_total != 0; + +ALTER TABLE job DROP flops_any_avg; +ALTER TABLE job DROP mem_bw_avg; +ALTER TABLE job DROP mem_used_max; +ALTER TABLE job DROP load_avg; +ALTER TABLE job DROP net_bw_avg; +ALTER TABLE job DROP net_data_vol_total; +ALTER TABLE job DROP file_bw_avg; +ALTER TABLE job DROP file_data_vol_total; + +-- Indices for: Single filters, combined filters, sorting, sorting with filters +-- Cluster Filter +CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster); +CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster); +-- Cluster Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (cluster, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_duration ON job (cluster, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_numnodes ON job (cluster, num_nodes); + +-- Cluster+Partition Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, cluster_partition); +-- Cluster+Partition Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, cluster_partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (cluster, cluster_partition, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (cluster, cluster_partition, num_nodes); + +-- Cluster+Partition+Jobstate Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, cluster_partition, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, cluster_partition, job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, cluster_partition, job_state, project); +-- Cluster+Partition+Jobstate Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, cluster_partition, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (cluster, cluster_partition, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (cluster, cluster_partition, job_state, num_nodes); + +-- Cluster+JobState Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); +-- Cluster+JobState Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_duration ON job (cluster, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numnodes ON job (cluster, job_state, num_nodes); + +-- User Filter +CREATE INDEX IF NOT EXISTS jobs_user ON job (hpc_user); +-- User Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (hpc_user, start_time); +CREATE INDEX IF NOT EXISTS jobs_user_duration ON job (hpc_user, duration); +CREATE INDEX IF NOT EXISTS jobs_user_numnodes ON job (hpc_user, num_nodes); + +-- Project Filter +CREATE INDEX IF NOT EXISTS jobs_project ON job (project); +CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, hpc_user); +-- Project Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); +CREATE INDEX IF NOT EXISTS jobs_project_duration ON job (project, duration); +CREATE INDEX IF NOT EXISTS jobs_project_numnodes ON job (project, num_nodes); + +-- JobState Filter +CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state); +CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project); +CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); +-- JobState Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_jobstate_duration ON job (job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_jobstate_numnodes ON job (job_state, num_nodes); + +-- ArrayJob Filter +CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); + +-- Sorting without active filters +CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); +CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); +CREATE INDEX IF NOT EXISTS jobs_numnodes ON job (num_nodes); + +-- Single filters with default starttime sorting +CREATE INDEX IF NOT EXISTS jobs_duration_starttime ON job (duration, start_time); +CREATE INDEX IF NOT EXISTS jobs_numnodes_starttime ON job (num_nodes, start_time); +CREATE INDEX IF NOT EXISTS jobs_numacc_starttime ON job (num_acc, start_time); +CREATE INDEX IF NOT EXISTS jobs_energy_starttime ON job (energy, start_time); + +-- Optimize DB index usage diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 9c9e53e..5c28da9 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -1,11 +1,11 @@ -DROP INDEX job_stats; -DROP INDEX job_by_user; -DROP INDEX job_by_starttime; -DROP INDEX job_by_job_id; -DROP INDEX job_list; -DROP INDEX job_list_user; -DROP INDEX job_list_users; -DROP INDEX job_list_users_start; +DROP INDEX IF EXISTS job_stats; +DROP INDEX IF EXISTS job_by_user; +DROP INDEX IF EXISTS job_by_starttime; +DROP INDEX IF EXISTS job_by_job_id; +DROP INDEX IF EXISTS job_list; +DROP INDEX IF EXISTS job_list_user; +DROP INDEX IF EXISTS job_list_users; +DROP INDEX IF EXISTS job_list_users_start; ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; @@ -13,6 +13,11 @@ ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; ALTER TABLE job ADD COLUMN footprint TEXT DEFAULT NULL; ALTER TABLE tag ADD COLUMN tag_scope TEXT NOT NULL DEFAULT 'global'; +-- Do not use reserved keywords anymore +ALTER TABLE "user" RENAME TO hpc_user; +ALTER TABLE job RENAME COLUMN "user" TO hpc_user; +ALTER TABLE job RENAME COLUMN "partition" TO cluster_partition; + UPDATE job SET footprint = '{"flops_any_avg": 0.0}'; UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_any_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); @@ -35,7 +40,7 @@ ALTER TABLE job DROP file_data_vol_total; -- Indices for: Single filters, combined filters, sorting, sorting with filters -- Cluster Filter CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster); -CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, hpc_user); CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project); CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster); -- Cluster Filter Sorting @@ -47,30 +52,30 @@ CREATE INDEX IF NOT EXISTS jobs_cluster_numacc ON job (cluster, num_acc); CREATE INDEX IF NOT EXISTS jobs_cluster_energy ON job (cluster, energy); -- Cluster+Partition Filter -CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, partition); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, cluster_partition); -- Cluster+Partition Filter Sorting -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, partition, start_time); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (cluster, partition, duration); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (cluster, partition, num_nodes); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numhwthreads ON job (cluster, partition, num_hwthreads); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numacc ON job (cluster, partition, num_acc); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_energy ON job (cluster, partition, energy); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, cluster_partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (cluster, cluster_partition, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (cluster, cluster_partition, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numhwthreads ON job (cluster, cluster_partition, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numacc ON job (cluster, cluster_partition, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_energy ON job (cluster, cluster_partition, energy); -- Cluster+Partition+Jobstate Filter -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, partition, job_state); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, partition, job_state, user); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, partition, job_state, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, cluster_partition, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, cluster_partition, job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, cluster_partition, job_state, project); -- Cluster+Partition+Jobstate Filter Sorting -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, partition, job_state, start_time); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (cluster, partition, job_state, duration); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (cluster, partition, job_state, num_nodes); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numhwthreads ON job (cluster, partition, job_state, num_hwthreads); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numacc ON job (cluster, partition, job_state, num_acc); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_energy ON job (cluster, partition, job_state, energy); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, cluster_partition, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (cluster, cluster_partition, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (cluster, cluster_partition, job_state, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numhwthreads ON job (cluster, cluster_partition, job_state, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numacc ON job (cluster, cluster_partition, job_state, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_energy ON job (cluster, cluster_partition, job_state, energy); -- Cluster+JobState Filter CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); -CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, hpc_user); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); -- Cluster+JobState Filter Sorting CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); @@ -81,18 +86,18 @@ CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numacc ON job (cluster, job_sta CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_energy ON job (cluster, job_state, energy); -- User Filter -CREATE INDEX IF NOT EXISTS jobs_user ON job (user); +CREATE INDEX IF NOT EXISTS jobs_user ON job (hpc_user); -- User Filter Sorting -CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (user, start_time); -CREATE INDEX IF NOT EXISTS jobs_user_duration ON job (user, duration); -CREATE INDEX IF NOT EXISTS jobs_user_numnodes ON job (user, num_nodes); -CREATE INDEX IF NOT EXISTS jobs_user_numhwthreads ON job (user, num_hwthreads); -CREATE INDEX IF NOT EXISTS jobs_user_numacc ON job (user, num_acc); -CREATE INDEX IF NOT EXISTS jobs_user_energy ON job (user, energy); +CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (hpc_user, start_time); +CREATE INDEX IF NOT EXISTS jobs_user_duration ON job (hpc_user, duration); +CREATE INDEX IF NOT EXISTS jobs_user_numnodes ON job (hpc_user, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_user_numhwthreads ON job (hpc_user, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_user_numacc ON job (hpc_user, num_acc); +CREATE INDEX IF NOT EXISTS jobs_user_energy ON job (hpc_user, energy); -- Project Filter CREATE INDEX IF NOT EXISTS jobs_project ON job (project); -CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, user); +CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, hpc_user); -- Project Filter Sorting CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); CREATE INDEX IF NOT EXISTS jobs_project_duration ON job (project, duration); @@ -103,7 +108,7 @@ CREATE INDEX IF NOT EXISTS jobs_project_energy ON job (project, energy); -- JobState Filter CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state); -CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, user); +CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, hpc_user); CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project); CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); -- JobState Filter Sorting diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ba7a8aa..ffc0e55 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -22,7 +22,7 @@ import ( // GraphQL validation should make sure that no unkown values can be specified. var groupBy2column = map[model.Aggregate]string{ - model.AggregateUser: "job.user", + model.AggregateUser: "job.hpc_user", model.AggregateProject: "job.project", model.AggregateCluster: "job.cluster", } @@ -86,7 +86,7 @@ func (r *JobRepository) buildStatsQuery( fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s) as totalCoreHours`, time.Now().Unix(), castType), fmt.Sprintf(`CAST(SUM(job.num_acc) as %s) as totalAccs`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType), - ).From("job").Join("user ON user.username = job.user").GroupBy(col) + ).From("job").Join("hpc_user ON hpc_user.username = job.hpc_user").GroupBy(col) } else { // Scan columns: totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours query = sq.Select("COUNT(job.id)", @@ -109,7 +109,7 @@ func (r *JobRepository) buildStatsQuery( // func (r *JobRepository) getUserName(ctx context.Context, id string) string { // user := GetUserFromContext(ctx) -// name, _ := r.FindColumnValue(user, id, "user", "name", "username", false) +// name, _ := r.FindColumnValue(user, id, "hpc_user", "name", "username", false) // if name != "" { // return name // } else { @@ -210,7 +210,7 @@ func (r *JobRepository) JobsStatsGrouped( totalAccHours = int(accHours.Int64) } - if col == "job.user" { + if col == "job.hpc_user" { // name := r.getUserName(ctx, id.String) stats = append(stats, &model.JobsStatistics{ diff --git a/internal/repository/user.go b/internal/repository/user.go index a851b6b..9b7e94e 100644 --- a/internal/repository/user.go +++ b/internal/repository/user.go @@ -46,8 +46,8 @@ func GetUserRepository() *UserRepository { func (r *UserRepository) GetUser(username string) (*schema.User, error) { user := &schema.User{Username: username} var hashedPassword, name, rawRoles, email, rawProjects sql.NullString - if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("user"). - Where("user.username = ?", username).RunWith(r.DB). + if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("hpc_user"). + Where("hpc_user.username = ?", username).RunWith(r.DB). QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil { log.Warnf("Error while querying user '%v' from database", username) return nil, err @@ -73,7 +73,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) { func (r *UserRepository) GetLdapUsernames() ([]string, error) { var users []string - rows, err := r.DB.Query(`SELECT username FROM user WHERE user.ldap = 1`) + rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE user.ldap = 1`) if err != nil { log.Warn("Error while querying usernames") return nil, err @@ -121,7 +121,7 @@ func (r *UserRepository) AddUser(user *schema.User) error { vals = append(vals, int(user.AuthSource)) } - if _, err := sq.Insert("user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Insert("hpc_user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil { log.Errorf("Error while inserting new user '%v' into DB", user.Username) return err } @@ -134,7 +134,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro // user contains updated info, apply to dbuser // TODO: Discuss updatable fields if dbUser.Name != user.Name { - if _, err := sq.Update("user").Set("name", user.Name).Where("user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("name", user.Name).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { log.Errorf("error while updating name of user '%s'", user.Username) return err } @@ -143,7 +143,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro // Toggled until greenlit // if dbUser.HasRole(schema.RoleManager) && !reflect.DeepEqual(dbUser.Projects, user.Projects) { // projects, _ := json.Marshal(user.Projects) - // if _, err := sq.Update("user").Set("projects", projects).Where("user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { + // if _, err := sq.Update("hpc_user").Set("projects", projects).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { // return err // } // } @@ -152,7 +152,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro } func (r *UserRepository) DelUser(username string) error { - _, err := r.DB.Exec(`DELETE FROM user WHERE user.username = ?`, username) + _, err := r.DB.Exec(`DELETE FROM hpc_user WHERE hpc_user.username = ?`, username) if err != nil { log.Errorf("Error while deleting user '%s' from DB", username) return err @@ -162,7 +162,7 @@ func (r *UserRepository) DelUser(username string) error { } func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) { - q := sq.Select("username", "name", "email", "roles", "projects").From("user") + q := sq.Select("username", "name", "email", "roles", "projects").From("hpc_user") if specialsOnly { q = q.Where("(roles != '[\"user\"]' AND roles != '[]')") } @@ -223,7 +223,7 @@ func (r *UserRepository) AddRole( } roles, _ := json.Marshal(append(user.Roles, newRole)) - if _, err := sq.Update("user").Set("roles", roles).Where("user.username = ?", username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("roles", roles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { log.Errorf("error while adding new role for user '%s'", user.Username) return err } @@ -259,7 +259,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr } mroles, _ := json.Marshal(newroles) - if _, err := sq.Update("user").Set("roles", mroles).Where("user.username = ?", username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("roles", mroles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { log.Errorf("Error while removing role for user '%s'", user.Username) return err } @@ -285,7 +285,7 @@ func (r *UserRepository) AddProject( } projects, _ := json.Marshal(append(user.Projects, project)) - if _, err := sq.Update("user").Set("projects", projects).Where("user.username = ?", username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("projects", projects).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { return err } @@ -323,7 +323,7 @@ func (r *UserRepository) RemoveProject(ctx context.Context, username string, pro } else { result, _ = json.Marshal(newprojects) } - if _, err := sq.Update("user").Set("projects", result).Where("user.username = ?", username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("projects", result).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { return err } return nil @@ -355,7 +355,7 @@ func (r *UserRepository) FetchUserInCtx(ctx context.Context, username string) (* user := &model.User{Username: username} var name, email sql.NullString - if err := sq.Select("name", "email").From("user").Where("user.username = ?", username). + if err := sq.Select("name", "email").From("hpc_user").Where("hpc_user.username = ?", username). RunWith(r.DB).QueryRow().Scan(&name, &email); err != nil { if err == sql.ErrNoRows { /* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */ diff --git a/pkg/schema/job.go b/pkg/schema/job.go index f5bcc62..5e3110b 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -18,9 +18,9 @@ import ( type BaseJob struct { Cluster string `json:"cluster" db:"cluster" example:"fritz"` SubCluster string `json:"subCluster" db:"subcluster" example:"main"` - Partition string `json:"partition,omitempty" db:"partition" example:"main"` + Partition string `json:"partition,omitempty" db:"cluster_partition" example:"main"` Project string `json:"project" db:"project" example:"abcd200"` - User string `json:"user" db:"user" example:"abcd100h"` + User string `json:"user" db:"hpc_user" example:"abcd100h"` State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` Tags []*Tag `json:"tags,omitempty"` RawEnergyFootprint []byte `json:"-" db:"energy_footprint"` From 311c088d3dda748556a6660519ef6a5d0f9a7cab Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 21 Nov 2024 15:47:09 +0100 Subject: [PATCH 4/9] removes debug logging --- internal/repository/stats.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index f5677ad..ebfb1fb 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -226,8 +226,6 @@ func (r *JobRepository) JobsStatsGrouped( TotalAccHours: totalAccHours, }) } else { - log.Debugf(">>>> STATS ID %s", id.String) - log.Debugf(">>>> STATS TOTALNODES %d", totalNodes) stats = append(stats, &model.JobsStatistics{ ID: id.String, From 17906ec0eb00be5c7b124cb0c3e83fadc5349438 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 21 Nov 2024 15:54:46 +0100 Subject: [PATCH 5/9] Add down migrations for documentation --- .../mysql/08_add-footprint.down.sql | 62 ++++++++++++++ .../sqlite3/08_add-footprint.down.sql | 82 +++++++++++++++++++ 2 files changed, 144 insertions(+) diff --git a/internal/repository/migrations/mysql/08_add-footprint.down.sql b/internal/repository/migrations/mysql/08_add-footprint.down.sql index 8c99eb5..57f2145 100644 --- a/internal/repository/migrations/mysql/08_add-footprint.down.sql +++ b/internal/repository/migrations/mysql/08_add-footprint.down.sql @@ -19,3 +19,65 @@ UPDATE job SET file_bw_avg = json_extract(footprint, '$.file_bw_avg'); UPDATE job SET file_data_vol_total = json_extract(footprint, '$.file_data_vol_total'); ALTER TABLE job DROP footprint; +-- Do not use reserved keywords anymore +RENAME TABLE hpc_user TO `user`; +ALTER TABLE job RENAME COLUMN hpc_user TO `user`; +ALTER TABLE job RENAME COLUMN cluster_partition TO `partition`; + +DROP INDEX IF EXISTS jobs_cluster; +DROP INDEX IF EXISTS jobs_cluster_user; +DROP INDEX IF EXISTS jobs_cluster_project; +DROP INDEX IF EXISTS jobs_cluster_subcluster; +DROP INDEX IF EXISTS jobs_cluster_starttime; +DROP INDEX IF EXISTS jobs_cluster_duration; +DROP INDEX IF EXISTS jobs_cluster_numnodes; + +DROP INDEX IF EXISTS jobs_cluster_partition; +DROP INDEX IF EXISTS jobs_cluster_partition_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_numnodes; + +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_project; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numnodes; + +DROP INDEX IF EXISTS jobs_cluster_jobstate; +DROP INDEX IF EXISTS jobs_cluster_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_jobstate_project; + +DROP INDEX IF EXISTS jobs_cluster_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numnodes; + +DROP INDEX IF EXISTS jobs_user; +DROP INDEX IF EXISTS jobs_user_starttime; +DROP INDEX IF EXISTS jobs_user_duration; +DROP INDEX IF EXISTS jobs_user_numnodes; + +DROP INDEX IF EXISTS jobs_project; +DROP INDEX IF EXISTS jobs_project_user; +DROP INDEX IF EXISTS jobs_project_starttime; +DROP INDEX IF EXISTS jobs_project_duration; +DROP INDEX IF EXISTS jobs_project_numnodes; + +DROP INDEX IF EXISTS jobs_jobstate; +DROP INDEX IF EXISTS jobs_jobstate_user; +DROP INDEX IF EXISTS jobs_jobstate_project; +DROP INDEX IF EXISTS jobs_jobstate_starttime; +DROP INDEX IF EXISTS jobs_jobstate_duration; +DROP INDEX IF EXISTS jobs_jobstate_numnodes; + +DROP INDEX IF EXISTS jobs_arrayjobid_starttime; +DROP INDEX IF EXISTS jobs_cluster_arrayjobid_starttime; + +DROP INDEX IF EXISTS jobs_starttime; +DROP INDEX IF EXISTS jobs_duration; +DROP INDEX IF EXISTS jobs_numnodes; + +DROP INDEX IF EXISTS jobs_duration_starttime; +DROP INDEX IF EXISTS jobs_numnodes_starttime; +DROP INDEX IF EXISTS jobs_numacc_starttime; +DROP INDEX IF EXISTS jobs_energy_starttime; diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.down.sql b/internal/repository/migrations/sqlite3/08_add-footprint.down.sql index 8c99eb5..cc2d3e9 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.down.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.down.sql @@ -19,3 +19,85 @@ UPDATE job SET file_bw_avg = json_extract(footprint, '$.file_bw_avg'); UPDATE job SET file_data_vol_total = json_extract(footprint, '$.file_data_vol_total'); ALTER TABLE job DROP footprint; + +DROP INDEX IF EXISTS jobs_cluster; +DROP INDEX IF EXISTS jobs_cluster_user; +DROP INDEX IF EXISTS jobs_cluster_project; +DROP INDEX IF EXISTS jobs_cluster_subcluster; +DROP INDEX IF EXISTS jobs_cluster_starttime; +DROP INDEX IF EXISTS jobs_cluster_duration; +DROP INDEX IF EXISTS jobs_cluster_numnodes; +DROP INDEX IF EXISTS jobs_cluster_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_numacc; +DROP INDEX IF EXISTS jobs_cluster_energy; + +DROP INDEX IF EXISTS jobs_cluster_partition; +DROP INDEX IF EXISTS jobs_cluster_partition_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_numnodes; +DROP INDEX IF EXISTS jobs_cluster_partition_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_partition_numacc; +DROP INDEX IF EXISTS jobs_cluster_partition_energy; + +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_project; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numnodes; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numacc; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_energy; + +DROP INDEX IF EXISTS jobs_cluster_jobstate; +DROP INDEX IF EXISTS jobs_cluster_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_jobstate_project; + +DROP INDEX IF EXISTS jobs_cluster_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numnodes; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numacc; +DROP INDEX IF EXISTS jobs_cluster_jobstate_energy; + +DROP INDEX IF EXISTS jobs_user; +DROP INDEX IF EXISTS jobs_user_starttime; +DROP INDEX IF EXISTS jobs_user_duration; +DROP INDEX IF EXISTS jobs_user_numnodes; +DROP INDEX IF EXISTS jobs_user_numhwthreads; +DROP INDEX IF EXISTS jobs_user_numacc; +DROP INDEX IF EXISTS jobs_user_energy; + +DROP INDEX IF EXISTS jobs_project; +DROP INDEX IF EXISTS jobs_project_user; +DROP INDEX IF EXISTS jobs_project_starttime; +DROP INDEX IF EXISTS jobs_project_duration; +DROP INDEX IF EXISTS jobs_project_numnodes; +DROP INDEX IF EXISTS jobs_project_numhwthreads; +DROP INDEX IF EXISTS jobs_project_numacc; +DROP INDEX IF EXISTS jobs_project_energy; + +DROP INDEX IF EXISTS jobs_jobstate; +DROP INDEX IF EXISTS jobs_jobstate_user; +DROP INDEX IF EXISTS jobs_jobstate_project; +DROP INDEX IF EXISTS jobs_jobstate_starttime; +DROP INDEX IF EXISTS jobs_jobstate_duration; +DROP INDEX IF EXISTS jobs_jobstate_numnodes; +DROP INDEX IF EXISTS jobs_jobstate_numhwthreads; +DROP INDEX IF EXISTS jobs_jobstate_numacc; + +DROP INDEX IF EXISTS jobs_arrayjobid_starttime; +DROP INDEX IF EXISTS jobs_cluster_arrayjobid_starttime; + +DROP INDEX IF EXISTS jobs_starttime; +DROP INDEX IF EXISTS jobs_duration; +DROP INDEX IF EXISTS jobs_numnodes; +DROP INDEX IF EXISTS jobs_numhwthreads; +DROP INDEX IF EXISTS jobs_numacc; +DROP INDEX IF EXISTS jobs_energy; + +DROP INDEX IF EXISTS jobs_duration_starttime; +DROP INDEX IF EXISTS jobs_numnodes_starttime; +DROP INDEX IF EXISTS jobs_numhwthreads_starttime; +DROP INDEX IF EXISTS jobs_numacc_starttime; +DROP INDEX IF EXISTS jobs_energy_starttime; From a11f165f2adb30ba788990d6b5784871be813090 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sun, 24 Nov 2024 07:09:31 +0100 Subject: [PATCH 6/9] Cleanup --- internal/repository/testdata/job.db-shm | Bin 32768 -> 0 bytes internal/repository/testdata/job.db-wal | 0 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 internal/repository/testdata/job.db-shm delete mode 100644 internal/repository/testdata/job.db-wal diff --git a/internal/repository/testdata/job.db-shm b/internal/repository/testdata/job.db-shm deleted file mode 100644 index fe9ac2845eca6fe6da8a63cd096d9cf9e24ece10..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32768 zcmeIuAr62r3 Date: Sun, 24 Nov 2024 07:41:39 +0100 Subject: [PATCH 7/9] Update test sqlite db --- internal/repository/testdata/job.db | Bin 118784 -> 118784 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/internal/repository/testdata/job.db b/internal/repository/testdata/job.db index 23eba6fb5066702a5bf6c48539e75e69da8cfd53..43ec9d3c7f36c7ea505a96cc1208c4ce7a148eed 100644 GIT binary patch delta 1028 zcmaJy~Gdc6_nYnZCxhHpY0gW!8 zNQTux5QJ+RJU+{=A1ZP?i2T7zCd_y`N(eK8#|)-dd@)baQR6%A4L6%~EvZqpTCP&u zbiEZhY&Mv&%12l?H|^2s&7!|_s!ykKy-rW*^hTdfo!iuj={LHyqgj#?u^L=z+H{b~+&$Sh61!E1YzgYtJhLrn<(HsqTa5=F z;=va^c!)p`G-OFcBK#Ec@N(E-b7wg!c_;goea23(h0H2*nGxydv_v&fZqf>yc;IEw z=9Zv`R4b_qq|CBj?QRKtViRs@*sb*wi1cHJNI(9eh~&b<;lXf!xR-V+cNW3U;z`-m z8RdxrUhU}^+(vs^LnW-ywx(c;@%+!0*rJ005eB4j5%_@;eHp!P%wf6UXs))Qz&sLn zFSSzyYwt(O_##|M6k`7!K#2|D(KZ%g4wdXtOc*72ZjKbzgi`Zo^Q5`h^x8CLvhyj% zH7?4{Y2rO-5NNFXtbCstzO@1<&xjd^k%Ezl5&IewWyTONEEradWDFbD_(J#tU(gkO delta 1852 zcmcgtTTCNW7@mJ-I)_fT9Y_mZ0&ItFxOdC69Vk;Zz;2rCMOj?|BgX9xP*^sl1(w?? z$TdE!Y(jUUqYtq7WMZOGCuSmUA}^bm_#oK?6HIh5`{3(tVxkch&kQ#UM)AeJ>CE46 z&N=^ozWKh>$t5(ogysu!@(6+`+WC_BUYYiVq&&iZZPkK*0k?^-L?O57*n%p@efyX8 zTeeNxedZLySU#|HSzNS6%QQuONS!7&h>djmEYn!XxLc~#M*%plEVB+zGCmRubfl79 zJ>5eC@?cLY8N<+Vylj@Swg!$8V5d?vayy+c6IH)nhWulLayMVj@;G`rl})RVm^|FQ|A1!k4E47N_?(H9+`0d7Z>suB1nw$NSWET%C*gA-C9AT? z*4fx;l!e@7Wia!M@&oJfSkGLFHa183=El=cNBImEF+Wy?x4FTpu7tpyZylBsc&|8y z<$;c?@<4g8rl!J&2fb`72CMR#&8vQQ7iyU*#JKD8)zw-ka2SjPuBF=XJ*&i`E-gce zQp&9v@7|U3NjA+>3^$@7;g5@mS|hCAQfpdHwn9bbipXMu%5~C3xlSM{k5WG8BKZS| zjr1HqNb-cTG@FOan5xh|n@w5Lx63A8^=1(1jR=0B*5@^7Zyzz(^F~7cKyVw`eG%p; zpSPoLD7H(o360O4gcF&CrwhT1?Yd$1%2RS8J}Qes$HR5Cw@)2E4|Z+zX2NKu7!M(|q8m2T2`95QOP94pdE_M8EIL~0uUO0kN@vwcT()L?) z8`eN8tv{}XhYXDmhN!Z{*4kKnT(JM2bhG~|T`bqKbo!)TS`QBz5R4Jl?_uoYbOfaN zS7LMCOW~b8?z%HjCt}Ce9C72@B^L;^fR!|g)amzNLYoxE5Cdm% zGgLZVe)vcT2tvqTC+*bOFC6fTQr&+jY-blqZk?OtB*%}A*Bzq$8~c0SvO(#XoU>0!M=at^fc4 From c523e93564dd2e31ddf8c2bff93fdecdb11b6c71 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sun, 24 Nov 2024 07:48:30 +0100 Subject: [PATCH 8/9] Update to new db schema --- internal/repository/tags.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/internal/repository/tags.go b/internal/repository/tags.go index 48ea9ec..6239495 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -17,7 +17,6 @@ import ( // Add the tag with id `tagId` to the job with the database id `jobId`. func (r *JobRepository) AddTag(ctx context.Context, job int64, tag int64) ([]*schema.Tag, error) { - j, err := r.FindById(ctx, job) if err != nil { log.Warn("Error while finding job by id") @@ -49,7 +48,6 @@ func (r *JobRepository) AddTag(ctx context.Context, job int64, tag int64) ([]*sc // Removes a tag from a job func (r *JobRepository) RemoveTag(ctx context.Context, job, tag int64) ([]*schema.Tag, error) { - j, err := r.FindById(ctx, job) if err != nil { log.Warn("Error while finding job by id") @@ -81,7 +79,6 @@ func (r *JobRepository) RemoveTag(ctx context.Context, job, tag int64) ([]*schem // CreateTag creates a new tag with the specified type and name and returns its database id. func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope string) (tagId int64, err error) { - // Default to "Global" scope if none defined if tagScope == "" { tagScope = "global" @@ -147,9 +144,9 @@ func (r *JobRepository) CountTags(ctx context.Context) (tags []schema.Tag, count // Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case } else if user != nil && user.HasRole(schema.RoleManager) { // MANAGER: Count own jobs plus project's jobs // Build ("project1", "project2", ...) list of variable length directly in SQL string - q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.user = ? OR job.project IN (\""+strings.Join(user.Projects, "\",\"")+"\"))", user.Username) + q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.hpc_user = ? OR job.project IN (\""+strings.Join(user.Projects, "\",\"")+"\"))", user.Username) } else if user != nil { // USER OR NO ROLE (Compatibility): Only count own jobs - q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.user = ?)", user.Username) + q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.hpc_user = ?)", user.Username) } rows, err := q.RunWith(r.stmtCache).Query() @@ -176,7 +173,6 @@ func (r *JobRepository) CountTags(ctx context.Context) (tags []schema.Tag, count // AddTagOrCreate adds the tag with the specified type and name to the job with the database id `jobId`. // If such a tag does not yet exist, it is created. func (r *JobRepository) AddTagOrCreate(ctx context.Context, jobId int64, tagType string, tagName string, tagScope string) (tagId int64, err error) { - // Default to "Global" scope if none defined if tagScope == "" { tagScope = "global" From 0d923cc9206ecd8a363a8eef807d6564aae84f5e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sun, 24 Nov 2024 07:49:26 +0100 Subject: [PATCH 9/9] Ignore generated test artefacts --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 2f7c206..e23a17b 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ var/job.db-wal dist/ *.db +internal/repository/testdata/job.db-shm +internal/repository/testdata/job.db-wal