From e733688fd03b41ccc6bb6ffac47aa8c1abd69bad Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 3 Mar 2025 17:54:34 +0100 Subject: [PATCH 01/12] add new subCluster prop to statsTable metric select --- web/frontend/src/job/StatsTable.svelte | 1 + 1 file changed, 1 insertion(+) diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index 21d9b3b..b6b0f85 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -169,6 +169,7 @@ Date: Thu, 27 Feb 2025 15:11:07 +0100 Subject: [PATCH 02/12] allow /start_job/ with 0 second duration Apparently it is possible to get this for very short jobs. --- internal/api/rest.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index b76da0b..fd2f86d 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1008,8 +1008,8 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo return } - if job == nil || job.StartTime.Unix() >= req.StopTime { - handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger than startTime %d", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix()), http.StatusBadRequest, rw) + if job == nil || job.StartTime.Unix() > req.StopTime { + handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger/equal than startTime %d", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix()), http.StatusBadRequest, rw) return } From 6454576417ca9048435390a6a3c30415d1a15951 Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 17:39:38 +0100 Subject: [PATCH 03/12] add node_fail job state --- api/swagger.json | 6 ++++-- api/swagger.yaml | 2 ++ internal/api/docs.go | 6 ++++-- pkg/schema/job.go | 4 +++- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 51b22c8..9035beb 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -1786,7 +1786,8 @@ "stopped", "timeout", "preempted", - "out_of_memory" + "out_of_memory", + "node_fail" ], "x-enum-varnames": [ "JobStateRunning", @@ -1796,7 +1797,8 @@ "JobStateStopped", "JobStateTimeout", "JobStatePreempted", - "JobStateOutOfMemory" + "JobStateOutOfMemory", + "JobStateNodeFail" ] }, "schema.JobStatistics": { diff --git a/api/swagger.yaml b/api/swagger.yaml index f5f0081..20fa031 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -395,6 +395,7 @@ definitions: - timeout - preempted - out_of_memory + - node_fail type: string x-enum-varnames: - JobStateRunning @@ -405,6 +406,7 @@ definitions: - JobStateTimeout - JobStatePreempted - JobStateOutOfMemory + - JobStateNodeFail schema.JobStatistics: description: Specification for job metric statistics. properties: diff --git a/internal/api/docs.go b/internal/api/docs.go index 642003f..6f034b4 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1792,7 +1792,8 @@ const docTemplate = `{ "stopped", "timeout", "preempted", - "out_of_memory" + "out_of_memory", + "node_fail" ], "x-enum-varnames": [ "JobStateRunning", @@ -1802,7 +1803,8 @@ const docTemplate = `{ "JobStateStopped", "JobStateTimeout", "JobStatePreempted", - "JobStateOutOfMemory" + "JobStateOutOfMemory", + "JobStateNodeFail" ] }, "schema.JobStatistics": { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index 5e3110b..b6ac44d 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -143,6 +143,7 @@ const ( JobStateTimeout JobState = "timeout" JobStatePreempted JobState = "preempted" JobStateOutOfMemory JobState = "out_of_memory" + JobStateNodeFail JobState = "node_fail" ) func (e *JobState) UnmarshalGQL(v interface{}) error { @@ -171,5 +172,6 @@ func (e JobState) Valid() bool { e == JobStateStopped || e == JobStateTimeout || e == JobStatePreempted || - e == JobStateOutOfMemory + e == JobStateOutOfMemory || + e == JobStateNodeFail } From 65d2698af4a104fbd5ff1faf0f462e6a50b6a466 Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 17:47:49 +0100 Subject: [PATCH 04/12] add node_fail state to database schema --- internal/repository/migrations/mysql/01_init-schema.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/mysql/01_init-schema.up.sql b/internal/repository/migrations/mysql/01_init-schema.up.sql index 3a6930c..16f7627 100644 --- a/internal/repository/migrations/mysql/01_init-schema.up.sql +++ b/internal/repository/migrations/mysql/01_init-schema.up.sql @@ -13,7 +13,7 @@ CREATE TABLE IF NOT EXISTS job ( walltime INT NOT NULL DEFAULT 0, job_state VARCHAR(255) NOT NULL CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', - 'stopped', 'timeout', 'preempted', 'out_of_memory')), + 'stopped', 'timeout', 'preempted', 'out_of_memory', 'node_fail')), meta_data TEXT, -- JSON resources TEXT NOT NULL, -- JSON From d4336b0dcb4e054a39033fc681634c285d08d4d8 Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:00:02 +0100 Subject: [PATCH 05/12] add missing node_fail to db constraints --- .../repository/migrations/sqlite3/04_add-constraints.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/04_add-constraints.up.sql b/internal/repository/migrations/sqlite3/04_add-constraints.up.sql index 06b1a9b..a6898c3 100644 --- a/internal/repository/migrations/sqlite3/04_add-constraints.up.sql +++ b/internal/repository/migrations/sqlite3/04_add-constraints.up.sql @@ -11,7 +11,7 @@ array_job_id BIGINT, duration INT NOT NULL, walltime INT NOT NULL, job_state VARCHAR(255) NOT NULL -CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory')), +CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory', 'node_fail')), meta_data TEXT, -- JSON resources TEXT NOT NULL, -- JSON num_nodes INT NOT NULL, From 0a3e678329bc7162bffde549a2e85ac69b63e11b Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:03:01 +0100 Subject: [PATCH 06/12] add more missing node_fail states --- api/swagger.json | 6 ++++-- api/swagger.yaml | 2 ++ internal/api/docs.go | 6 ++++-- pkg/schema/job.go | 2 +- pkg/schema/schemas/job-meta.schema.json | 1 + web/frontend/src/generic/filters/JobStates.svelte | 1 + 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 9035beb..5cd4a5e 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -1512,7 +1512,8 @@ "cancelled", "stopped", "timeout", - "out_of_memory" + "out_of_memory", + "node_fail" ], "allOf": [ { @@ -1670,7 +1671,8 @@ "cancelled", "stopped", "timeout", - "out_of_memory" + "out_of_memory", + "node_fail" ], "allOf": [ { diff --git a/api/swagger.yaml b/api/swagger.yaml index 20fa031..3f188c2 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -201,6 +201,7 @@ definitions: - stopped - timeout - out_of_memory + - node_fail example: completed metaData: additionalProperties: @@ -314,6 +315,7 @@ definitions: - stopped - timeout - out_of_memory + - node_fail example: completed metaData: additionalProperties: diff --git a/internal/api/docs.go b/internal/api/docs.go index 6f034b4..99a8a14 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1518,7 +1518,8 @@ const docTemplate = `{ "cancelled", "stopped", "timeout", - "out_of_memory" + "out_of_memory", + "node_fail" ], "allOf": [ { @@ -1676,7 +1677,8 @@ const docTemplate = `{ "cancelled", "stopped", "timeout", - "out_of_memory" + "out_of_memory", + "node_fail" ], "allOf": [ { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index b6ac44d..7a2d950 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -21,7 +21,7 @@ type BaseJob struct { Partition string `json:"partition,omitempty" db:"cluster_partition" example:"main"` Project string `json:"project" db:"project" example:"abcd200"` User string `json:"user" db:"hpc_user" example:"abcd100h"` - State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` + State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory,node_fail"` Tags []*Tag `json:"tags,omitempty"` RawEnergyFootprint []byte `json:"-" db:"energy_footprint"` RawFootprint []byte `json:"-" db:"footprint"` diff --git a/pkg/schema/schemas/job-meta.schema.json b/pkg/schema/schemas/job-meta.schema.json index db7475c..a12057b 100644 --- a/pkg/schema/schemas/job-meta.schema.json +++ b/pkg/schema/schemas/job-meta.schema.json @@ -76,6 +76,7 @@ "cancelled", "stopped", "out_of_memory", + "node_fail", "timeout" ] }, diff --git a/web/frontend/src/generic/filters/JobStates.svelte b/web/frontend/src/generic/filters/JobStates.svelte index d903abc..b9a747d 100644 --- a/web/frontend/src/generic/filters/JobStates.svelte +++ b/web/frontend/src/generic/filters/JobStates.svelte @@ -23,6 +23,7 @@ "timeout", "preempted", "out_of_memory", + "node_fail", ]; From a61ff915ac0517261b8ac2be3e3cc3b8e7f40e7c Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:15:39 +0100 Subject: [PATCH 07/12] Revert "add more missing node_fail states" This reverts commit 0a3e678329bc7162bffde549a2e85ac69b63e11b. --- api/swagger.json | 6 ++---- api/swagger.yaml | 2 -- internal/api/docs.go | 6 ++---- pkg/schema/job.go | 2 +- pkg/schema/schemas/job-meta.schema.json | 1 - web/frontend/src/generic/filters/JobStates.svelte | 1 - 6 files changed, 5 insertions(+), 13 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 5cd4a5e..9035beb 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -1512,8 +1512,7 @@ "cancelled", "stopped", "timeout", - "out_of_memory", - "node_fail" + "out_of_memory" ], "allOf": [ { @@ -1671,8 +1670,7 @@ "cancelled", "stopped", "timeout", - "out_of_memory", - "node_fail" + "out_of_memory" ], "allOf": [ { diff --git a/api/swagger.yaml b/api/swagger.yaml index 3f188c2..20fa031 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -201,7 +201,6 @@ definitions: - stopped - timeout - out_of_memory - - node_fail example: completed metaData: additionalProperties: @@ -315,7 +314,6 @@ definitions: - stopped - timeout - out_of_memory - - node_fail example: completed metaData: additionalProperties: diff --git a/internal/api/docs.go b/internal/api/docs.go index 99a8a14..6f034b4 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1518,8 +1518,7 @@ const docTemplate = `{ "cancelled", "stopped", "timeout", - "out_of_memory", - "node_fail" + "out_of_memory" ], "allOf": [ { @@ -1677,8 +1676,7 @@ const docTemplate = `{ "cancelled", "stopped", "timeout", - "out_of_memory", - "node_fail" + "out_of_memory" ], "allOf": [ { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index 7a2d950..b6ac44d 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -21,7 +21,7 @@ type BaseJob struct { Partition string `json:"partition,omitempty" db:"cluster_partition" example:"main"` Project string `json:"project" db:"project" example:"abcd200"` User string `json:"user" db:"hpc_user" example:"abcd100h"` - State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory,node_fail"` + State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` Tags []*Tag `json:"tags,omitempty"` RawEnergyFootprint []byte `json:"-" db:"energy_footprint"` RawFootprint []byte `json:"-" db:"footprint"` diff --git a/pkg/schema/schemas/job-meta.schema.json b/pkg/schema/schemas/job-meta.schema.json index a12057b..db7475c 100644 --- a/pkg/schema/schemas/job-meta.schema.json +++ b/pkg/schema/schemas/job-meta.schema.json @@ -76,7 +76,6 @@ "cancelled", "stopped", "out_of_memory", - "node_fail", "timeout" ] }, diff --git a/web/frontend/src/generic/filters/JobStates.svelte b/web/frontend/src/generic/filters/JobStates.svelte index b9a747d..d903abc 100644 --- a/web/frontend/src/generic/filters/JobStates.svelte +++ b/web/frontend/src/generic/filters/JobStates.svelte @@ -23,7 +23,6 @@ "timeout", "preempted", "out_of_memory", - "node_fail", ]; From aa3fe2b8726634800a36d6dc4153ab6c7c9f93f9 Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:15:46 +0100 Subject: [PATCH 08/12] Revert "add missing node_fail to db constraints" This reverts commit d4336b0dcb4e054a39033fc681634c285d08d4d8. --- .../repository/migrations/sqlite3/04_add-constraints.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/04_add-constraints.up.sql b/internal/repository/migrations/sqlite3/04_add-constraints.up.sql index a6898c3..06b1a9b 100644 --- a/internal/repository/migrations/sqlite3/04_add-constraints.up.sql +++ b/internal/repository/migrations/sqlite3/04_add-constraints.up.sql @@ -11,7 +11,7 @@ array_job_id BIGINT, duration INT NOT NULL, walltime INT NOT NULL, job_state VARCHAR(255) NOT NULL -CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory', 'node_fail')), +CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory')), meta_data TEXT, -- JSON resources TEXT NOT NULL, -- JSON num_nodes INT NOT NULL, From bd93b8be8efd2440d3eab6d5c43e9c4e7d4c164b Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:15:53 +0100 Subject: [PATCH 09/12] Revert "add node_fail state to database schema" This reverts commit 65d2698af4a104fbd5ff1faf0f462e6a50b6a466. --- internal/repository/migrations/mysql/01_init-schema.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/mysql/01_init-schema.up.sql b/internal/repository/migrations/mysql/01_init-schema.up.sql index 16f7627..3a6930c 100644 --- a/internal/repository/migrations/mysql/01_init-schema.up.sql +++ b/internal/repository/migrations/mysql/01_init-schema.up.sql @@ -13,7 +13,7 @@ CREATE TABLE IF NOT EXISTS job ( walltime INT NOT NULL DEFAULT 0, job_state VARCHAR(255) NOT NULL CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', - 'stopped', 'timeout', 'preempted', 'out_of_memory', 'node_fail')), + 'stopped', 'timeout', 'preempted', 'out_of_memory')), meta_data TEXT, -- JSON resources TEXT NOT NULL, -- JSON From 4b2d7068b334c99bca3b77cc6a34371d5cb4416e Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:16:02 +0100 Subject: [PATCH 10/12] Revert "add node_fail job state" This reverts commit 6454576417ca9048435390a6a3c30415d1a15951. --- api/swagger.json | 6 ++---- api/swagger.yaml | 2 -- internal/api/docs.go | 6 ++---- pkg/schema/job.go | 4 +--- 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 9035beb..51b22c8 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -1786,8 +1786,7 @@ "stopped", "timeout", "preempted", - "out_of_memory", - "node_fail" + "out_of_memory" ], "x-enum-varnames": [ "JobStateRunning", @@ -1797,8 +1796,7 @@ "JobStateStopped", "JobStateTimeout", "JobStatePreempted", - "JobStateOutOfMemory", - "JobStateNodeFail" + "JobStateOutOfMemory" ] }, "schema.JobStatistics": { diff --git a/api/swagger.yaml b/api/swagger.yaml index 20fa031..f5f0081 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -395,7 +395,6 @@ definitions: - timeout - preempted - out_of_memory - - node_fail type: string x-enum-varnames: - JobStateRunning @@ -406,7 +405,6 @@ definitions: - JobStateTimeout - JobStatePreempted - JobStateOutOfMemory - - JobStateNodeFail schema.JobStatistics: description: Specification for job metric statistics. properties: diff --git a/internal/api/docs.go b/internal/api/docs.go index 6f034b4..642003f 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1792,8 +1792,7 @@ const docTemplate = `{ "stopped", "timeout", "preempted", - "out_of_memory", - "node_fail" + "out_of_memory" ], "x-enum-varnames": [ "JobStateRunning", @@ -1803,8 +1802,7 @@ const docTemplate = `{ "JobStateStopped", "JobStateTimeout", "JobStatePreempted", - "JobStateOutOfMemory", - "JobStateNodeFail" + "JobStateOutOfMemory" ] }, "schema.JobStatistics": { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index b6ac44d..5e3110b 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -143,7 +143,6 @@ const ( JobStateTimeout JobState = "timeout" JobStatePreempted JobState = "preempted" JobStateOutOfMemory JobState = "out_of_memory" - JobStateNodeFail JobState = "node_fail" ) func (e *JobState) UnmarshalGQL(v interface{}) error { @@ -172,6 +171,5 @@ func (e JobState) Valid() bool { e == JobStateStopped || e == JobStateTimeout || e == JobStatePreempted || - e == JobStateOutOfMemory || - e == JobStateNodeFail + e == JobStateOutOfMemory } From 2b56b40e6d2b69d49f666f0753e131f34a13aa83 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 6 Mar 2025 12:46:25 +0100 Subject: [PATCH 11/12] Review energyFootprint calculation, fix missing numNodes factor, add log --- internal/importer/handleImport.go | 24 ++++++++++++++++-------- internal/importer/initDB.go | 24 ++++++++++++++++-------- internal/repository/job.go | 24 +++++++++++++++--------- 3 files changed, 47 insertions(+), 25 deletions(-) diff --git a/internal/importer/handleImport.go b/internal/importer/handleImport.go index 01773a5..623291c 100644 --- a/internal/importer/handleImport.go +++ b/internal/importer/handleImport.go @@ -96,27 +96,35 @@ func HandleImportFlag(flag string) error { } job.EnergyFootprint = make(map[string]float64) - var totalEnergy float64 - var energy float64 + // Total Job Energy Outside Loop + totalEnergy := 0.0 for _, fp := range sc.EnergyFootprint { + // Always Init Metric Energy Inside Loop + metricEnergy := 0.0 if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh - // Energy: Power (in Watts) * Time (in Seconds) if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) + log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp) + // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) - // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits - energy = math.Round(((repository.LoadJobStat(&job, fp, "avg")*float64(job.Duration))/3600/1000)*100) / 100 + // Energy: Power (in Watts) * Time (in Seconds) + // Unit: (W * (s / 3600)) / 1000 = kWh + // Round 2 Digits: round(Energy * 100) / 100 + // Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000 + // Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1 + rawEnergy := ((repository.LoadJobStat(&job, fp, "avg") * float64(job.NumNodes)) * (float64(job.Duration) / 3600.0)) / 1000.0 + metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID) } - job.EnergyFootprint[fp] = energy - totalEnergy += energy + job.EnergyFootprint[fp] = metricEnergy + totalEnergy += metricEnergy } - job.Energy = (math.Round(totalEnergy*100) / 100) + job.Energy = (math.Round(totalEnergy*100.0) / 100.0) if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID) return err diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index fa2ee6e..9a2ccdf 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -93,27 +93,35 @@ func InitDB() error { } job.EnergyFootprint = make(map[string]float64) - var totalEnergy float64 - var energy float64 + // Total Job Energy Outside Loop + totalEnergy := 0.0 for _, fp := range sc.EnergyFootprint { + // Always Init Metric Energy Inside Loop + metricEnergy := 0.0 if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh - // Energy: Power (in Watts) * Time (in Seconds) if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) + log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp) + // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) - // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits - energy = math.Round(((repository.LoadJobStat(jobMeta, fp, "avg")*float64(jobMeta.Duration))/3600/1000)*100) / 100 + // Energy: Power (in Watts) * Time (in Seconds) + // Unit: (W * (s / 3600)) / 1000 = kWh + // Round 2 Digits: round(Energy * 100) / 100 + // Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000 + // Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1 + rawEnergy := ((repository.LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes)) * (float64(jobMeta.Duration) / 3600.0)) / 1000.0 + metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) } - job.EnergyFootprint[fp] = energy - totalEnergy += energy + job.EnergyFootprint[fp] = metricEnergy + totalEnergy += metricEnergy } - job.Energy = (math.Round(totalEnergy*100) / 100) + job.Energy = (math.Round(totalEnergy*100.0) / 100.0) if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) return err diff --git a/internal/repository/job.go b/internal/repository/job.go index 020c3c2..84de6f7 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -590,28 +590,34 @@ func (r *JobRepository) UpdateEnergy( return stmt, err } energyFootprint := make(map[string]float64) - var totalEnergy float64 - var energy float64 + // Total Job Energy Outside Loop + totalEnergy := 0.0 for _, fp := range sc.EnergyFootprint { + // Always Init Metric Energy Inside Loop + metricEnergy := 0.0 if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh) + log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp) // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) // Energy: Power (in Watts) * Time (in Seconds) - // Unit: (( W * s ) / 3600) / 1000 = kWh ; Rounded to 2 nearest digits: (Energy * 100) / 100 - // Here: All-Node Metric Average * Number of Nodes * Job Runtime + // Unit: (W * (s / 3600)) / 1000 = kWh + // Round 2 Digits: round(Energy * 100) / 100 + // Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000 // Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1 - metricNodeSum := LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes) * float64(jobMeta.Duration) - energy = math.Round(((metricNodeSum/3600)/1000)*100) / 100 + rawEnergy := ((LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes)) * (float64(jobMeta.Duration) / 3600.0)) / 1000.0 + metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) } - energyFootprint[fp] = energy - totalEnergy += energy + energyFootprint[fp] = metricEnergy + totalEnergy += metricEnergy + + // log.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy) } var rawFootprint []byte @@ -620,7 +626,7 @@ func (r *JobRepository) UpdateEnergy( return stmt, err } - return stmt.Set("energy_footprint", string(rawFootprint)).Set("energy", (math.Round(totalEnergy*100) / 100)), nil + return stmt.Set("energy_footprint", string(rawFootprint)).Set("energy", (math.Round(totalEnergy*100.0) / 100.0)), nil } func (r *JobRepository) UpdateFootprint( From d0af933b350d3e50cc64c648b5fbfd2fd4d1a0cf Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 6 Mar 2025 15:39:15 +0100 Subject: [PATCH 12/12] feat: add subCluster level frontend keys for metric selections - applies to jobView and nodeList --- web/frontend/src/Job.root.svelte | 25 +++++++---- web/frontend/src/Jobs.root.svelte | 2 +- web/frontend/src/Systems.root.svelte | 10 +++-- web/frontend/src/User.root.svelte | 2 +- .../src/generic/select/MetricSelection.svelte | 42 ++++++++++++------- web/frontend/src/job/StatsTable.svelte | 7 ++-- web/frontend/src/systems/NodeList.svelte | 16 +++---- 7 files changed, 65 insertions(+), 39 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index f2df916..6980230 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -128,15 +128,24 @@ if (!job) return; const pendingMetrics = [ - ...(ccconfig[`job_view_selectedMetrics:${job.cluster}`] || - $initq.data.globalMetrics.reduce((names, gm) => { - if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { - names.push(gm.name); - } - return names; - }, []) + ...( + ( + ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || + ccconfig[`job_view_selectedMetrics:${job.cluster}`] + ) || + $initq.data.globalMetrics + .reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { + names.push(gm.name); + } + return names; + }, []) ), - ...(ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] || + ...( + ( + ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] || + ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] + ) || ccconfig[`job_view_nodestats_selectedMetrics`] ), ]; diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index df928d0..7faa8b8 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -137,5 +137,5 @@ bind:metrics bind:isOpen={isMetricsSelectionOpen} bind:showFootprint - footprintSelect={true} + footprintSelect /> diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 8089bbe..1589cac 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -29,8 +29,8 @@ import Refresher from "./generic/helper/Refresher.svelte"; export let displayType; - export let cluster; - export let subCluster = ""; + export let cluster = null; + export let subCluster = null; export let from = null; export let to = null; @@ -60,7 +60,10 @@ let hostnameFilter = ""; let pendingHostnameFilter = ""; let selectedMetric = ccconfig.system_view_selectedMetric || ""; - let selectedMetrics = ccconfig[`node_list_selectedMetrics:${cluster}`] || [ccconfig.system_view_selectedMetric]; + let selectedMetrics = ( + ccconfig[`node_list_selectedMetrics:${cluster}:${subCluster}`] || + ccconfig[`node_list_selectedMetrics:${cluster}`] + ) || [ccconfig.system_view_selectedMetric]; let isMetricsSelectionOpen = false; /* @@ -191,6 +194,7 @@ av.cluster === cluster)) allMetrics.add(gm.name); } else { if (gm.availability.find((av) => av.cluster === cluster && av.subClusters.includes(subCluster))) allMetrics.add(gm.name); @@ -67,7 +67,7 @@ function printAvailability(metric, cluster) { const avail = globalMetrics.find((gm) => gm.name === metric)?.availability - if (cluster == null) { + if (!cluster) { return avail.map((av) => av.cluster).join(',') } else { return avail.find((av) => av.cluster === cluster).subClusters.join(',') @@ -112,10 +112,17 @@ metrics = newMetricsOrder.filter((m) => unorderedMetrics.includes(m)); isOpen = false; - showFootprint = !!pendingShowFootprint; + let configKey; + if (cluster && subCluster) { + configKey = `${configName}:${cluster}:${subCluster}`; + } else if (cluster && !subCluster) { + configKey = `${configName}:${cluster}`; + } else { + configKey = `${configName}`; + } updateConfigurationMutation({ - name: cluster == null ? configName : `${configName}:${cluster}`, + name: configKey, value: JSON.stringify(metrics), }).subscribe((res) => { if (res.fetching === false && res.error) { @@ -123,17 +130,20 @@ } }); - updateConfigurationMutation({ - name: - cluster == null - ? "plot_list_showFootprint" - : `plot_list_showFootprint:${cluster}`, - value: JSON.stringify(showFootprint), - }).subscribe((res) => { - if (res.fetching === false && res.error) { - throw res.error; - } - }); + if (footprintSelect) { + showFootprint = !!pendingShowFootprint; + updateConfigurationMutation({ + name: + !cluster + ? "plot_list_showFootprint" + : `plot_list_showFootprint:${cluster}`, + value: JSON.stringify(showFootprint), + }).subscribe((res) => { + if (res.fetching === false && res.error) { + throw res.error; + } + }); + }; dispatch('update-metrics', metrics); } diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index b6b0f85..c8f12f2 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -37,9 +37,10 @@ sorting = {}, isMetricSelectionOpen = false, availableMetrics = new Set(), - selectedMetrics = - getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`] || - getContext("cc-config")["job_view_nodestats_selectedMetrics"]; + selectedMetrics = ( + getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] || + getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`] + ) || getContext("cc-config")["job_view_nodestats_selectedMetrics"]; for (let metric of sortedJobMetrics) { // Not Exclusive or Multi-Node: get maxScope directly (mostly: node) diff --git a/web/frontend/src/systems/NodeList.svelte b/web/frontend/src/systems/NodeList.svelte index ad64a1f..ca22d57 100644 --- a/web/frontend/src/systems/NodeList.svelte +++ b/web/frontend/src/systems/NodeList.svelte @@ -217,13 +217,15 @@
-

- Loading nodes {nodes.length + 1} to - { matchedNodes - ? `${(nodes.length + paging.itemsPerPage) > matchedNodes ? matchedNodes : (nodes.length + paging.itemsPerPage)} of ${matchedNodes} total` - : (nodes.length + paging.itemsPerPage) - } -

+ {#if !usePaging} +

+ Loading nodes {nodes.length + 1} to + { matchedNodes + ? `${(nodes.length + paging.itemsPerPage) > matchedNodes ? matchedNodes : (nodes.length + paging.itemsPerPage)} of ${matchedNodes} total` + : (nodes.length + paging.itemsPerPage) + } +

+ {/if}