From d9f9c8aaf51f2a1b46d30c7392fec0094972fee2 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 3 Nov 2023 17:09:16 +0100 Subject: [PATCH 01/93] fix: retrigger gql api at manual refresh - solves #221 --- web/frontend/src/joblist/JobList.svelte | 2 +- web/frontend/src/joblist/Row.svelte | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index 02caf3f..2484f29 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -89,7 +89,7 @@ // Force refresh list with existing unchanged variables (== usually would not trigger reactivity) export function refresh() { - queryStore({ + jobs = queryStore({ client: client, query: query, variables: { paging, sorting, filter }, diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 2117b91..6573b57 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -64,11 +64,12 @@ variables: { id, metrics, scopes } }); - function refresh() { - queryStore({ + export function refresh() { + metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes } + variables: { id, metrics, scopes }, + // requestPolicy: 'network-only' // use default cache-first for refresh }); } From bf64fc5213729673eff19a24f5c694b4419bfe27 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 13 Nov 2023 13:43:44 +0100 Subject: [PATCH 02/93] Add completed state indicator --- web/frontend/src/joblist/JobInfo.svelte | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/web/frontend/src/joblist/JobInfo.svelte b/web/frontend/src/joblist/JobInfo.svelte index 83841c6..b7ca32a 100644 --- a/web/frontend/src/joblist/JobInfo.svelte +++ b/web/frontend/src/joblist/JobInfo.svelte @@ -28,6 +28,17 @@ return `${hours}:${('0' + minutes).slice(-2)}:${('0' + seconds).slice(-2)}`; } + function getStateColor(state) { + switch (state) { + case 'running': + return 'success' + case 'completed': + return 'primary' + default: + return 'danger' + } + } +
@@ -86,12 +97,7 @@

Start: {(new Date(job.startTime)).toLocaleString()}
- Duration: {formatDuration(job.duration)} - {#if job.state == 'running'} - running - {:else if job.state != 'completed'} - {job.state} - {/if} + Duration: {formatDuration(job.duration)} {job.state} {#if job.walltime}
Walltime: {formatDuration(job.walltime)} From 84d6b4835360e7435a79df5918b944be73ae9ab2 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 15 Nov 2023 15:03:58 +0100 Subject: [PATCH 03/93] Fix: default values and new option for time filter --- web/frontend/src/filters/Duration.svelte | 99 ++++++++++++++++++----- web/frontend/src/filters/Filters.svelte | 24 +++++- web/frontend/src/filters/StartTime.svelte | 36 ++++----- 3 files changed, 120 insertions(+), 39 deletions(-) diff --git a/web/frontend/src/filters/Duration.svelte b/web/frontend/src/filters/Duration.svelte index b482b9c..ca2ce45 100644 --- a/web/frontend/src/filters/Duration.svelte +++ b/web/frontend/src/filters/Duration.svelte @@ -1,18 +1,23 @@ (isOpen = !isOpen)}> - Select Start Time + Select Job Duration -

Between

+

Duration more than

- +
h
@@ -46,7 +56,49 @@
- + +
+
m
+
+
+ + +
+ +

Duration less than

+ + +
+ +
+
h
+
+
+ + +
+ +
+
m
+
+
+ +
+
+ +

Duration between

+ + +
+ +
+
h
+
+
+ + +
+
m
@@ -57,7 +109,7 @@
- +
h
@@ -65,7 +117,7 @@
- +
m
@@ -77,19 +129,30 @@ - + }}>Reset Values + diff --git a/web/frontend/src/filters/Filters.svelte b/web/frontend/src/filters/Filters.svelte index 38d7e7a..49eaed6 100644 --- a/web/frontend/src/filters/Filters.svelte +++ b/web/frontend/src/filters/Filters.svelte @@ -41,7 +41,7 @@ states: filterPresets.states || filterPresets.state ? [filterPresets.state].flat() : allJobStates, startTime: filterPresets.startTime || { from: null, to: null }, tags: filterPresets.tags || [], - duration: filterPresets.duration || { from: null, to: null }, + duration: filterPresets.duration || { lessThan: null, moreThan: null, from: null, to: null }, jobId: filterPresets.jobId || '', arrayJobId: filterPresets.arrayJobId || null, user: filterPresets.user || '', @@ -88,6 +88,10 @@ items.push({ tags: filters.tags }) if (filters.duration.from || filters.duration.to) items.push({ duration: { from: filters.duration.from, to: filters.duration.to } }) + if (filters.duration.lessThan) + items.push({ duration: { from: 0, to: filters.duration.lessThan } }) + if (filters.duration.moreThan) + items.push({ duration: { from: filters.duration.moreThan, to: 604800 } }) // 7 days to include special jobs with long runtimes if (filters.jobId) items.push({ jobId: { [filters.jobIdMatch]: filters.jobId } }) if (filters.arrayJobId != null) @@ -144,6 +148,10 @@ opts.push(`tag=${tag}`) if (filters.duration.from && filters.duration.to) opts.push(`duration=${filters.duration.from}-${filters.duration.to}`) + if (filters.duration.lessThan) + opts.push(`duration=0-${filters.duration.lessThan}`) + if (filters.duration.moreThan) + opts.push(`duration=${filters.duration.moreThan}-604800`) if (filters.numNodes.from && filters.numNodes.to) opts.push(`numNodes=${filters.numNodes.from}-${filters.numNodes.to}`) if (filters.numAccelerators.from && filters.numAccelerators.to) @@ -267,6 +275,18 @@ {/if} + {#if filters.duration.lessThan} + (isDurationOpen = true)}> + Duration less than {Math.floor(filters.duration.lessThan / 3600)}h:{Math.floor(filters.duration.lessThan % 3600 / 60)}m + + {/if} + + {#if filters.duration.moreThan} + (isDurationOpen = true)}> + Duration more than {Math.floor(filters.duration.moreThan / 3600)}h:{Math.floor(filters.duration.moreThan % 3600 / 60)}m + + {/if} + {#if filters.tags.length != 0} (isTagsOpen = true)}> {#each filters.tags as tagId} @@ -325,6 +345,8 @@ update()} /> diff --git a/web/frontend/src/filters/StartTime.svelte b/web/frontend/src/filters/StartTime.svelte index c89851d..59f8513 100644 --- a/web/frontend/src/filters/StartTime.svelte +++ b/web/frontend/src/filters/StartTime.svelte @@ -1,5 +1,6 @@ @@ -73,7 +69,7 @@ on:click={() => { isOpen = false from = toRFC3339(pendingFrom) - to = toRFC3339(pendingTo, 59) + to = toRFC3339(pendingTo, '59') dispatch('update', { from, to }) }}> Close & Apply From 9689f95ea11dd910b293392a1373d0c683965338 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 16 Nov 2023 12:49:20 +0100 Subject: [PATCH 04/93] Initial implementaion --- web/frontend/src/Job.root.svelte | 16 ++- web/frontend/src/JobFootprint.svelte | 172 +++++++++++++++++++++++++++ 2 files changed, 187 insertions(+), 1 deletion(-) create mode 100644 web/frontend/src/JobFootprint.svelte diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 93c5873..3d80916 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -27,6 +27,7 @@ import TagManagement from "./TagManagement.svelte"; import MetricSelection from "./MetricSelection.svelte"; import StatsTable from "./StatsTable.svelte"; + import JobFootprint from "./JobFootprint.svelte"; import { getContext } from "svelte"; export let dbid; @@ -132,7 +133,9 @@ let plots = {}, jobTags, - statsTable; + statsTable, + jobFootprint; + $: document.title = $initq.fetching ? "Loading..." : $initq.error @@ -200,6 +203,17 @@ {/if} + {#if $jobMetrics.data} + {#key $jobMetrics.data} + + + + {/key} + {/if} {#if $jobMetrics.data && $initq.data} {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} {#if authlevel > roles.manager} diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte new file mode 100644 index 0000000..748d6a4 --- /dev/null +++ b/web/frontend/src/JobFootprint.svelte @@ -0,0 +1,172 @@ + + +
+ +
+ + + + From a2c99fb56d0068cf123f3ad9996213eeae944a09 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 16 Nov 2023 15:07:17 +0100 Subject: [PATCH 05/93] Add colors based on thresholds --- web/frontend/src/JobFootprint.svelte | 50 ++++++++++++++++++---------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 748d6a4..5cd3b04 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -29,7 +29,7 @@ export let jobMetrics export let size = 200 - export let displayLegend = true + export let displayLegend = false const footprintMetrics = ['mem_used', 'mem_bw','flops_any', 'cpu_load', 'acc_utilization'] // missing: energy , move to central config before deployment @@ -56,7 +56,7 @@ console.log("MVs", meanVals) - const footprintLabels = meanVals.map((mv) => [mv.name, mv.name+' Threshold']) + const footprintLabels = meanVals.map((mv) => [mv.name, 'Threshold']) const footprintData = meanVals.map((mv) => { const metricConfig = footprintMetricConfigs.find((fmc) => fmc.name === mv.name) @@ -65,35 +65,49 @@ const levelCaution = metricConfig.caution - mv.avg const levelAlert = metricConfig.alert - mv.avg - if (levelAlert > 0) { - return [mv.avg, levelAlert] - } else if (levelCaution > 0) { - return [mv.avg, levelCaution] - } else if (levelNormal > 0) { - return [mv.avg, levelNormal] - } else { - return [mv.avg, levelPeak] + if (mv.name !== 'mem_used') { // Alert if usage is low, peak is high good usage + if (levelAlert > 0) { + return {data: [mv.avg, levelAlert], color: ['hsl(0, 100%, 60%)', '#AAA']} // 'hsl(0, 100%, 35%)' + } else if (levelCaution > 0) { + return {data: [mv.avg, levelCaution], color: ['hsl(56, 100%, 50%)', '#AAA']} // '#d5b60a' + } else if (levelNormal > 0) { + return {data: [mv.avg, levelNormal], color: ['hsl(100, 100%, 60%)', '#AAA']} // 'hsl(100, 100%, 35%)' + } else { + return {data: [mv.avg, levelPeak], color: ['hsl(180, 100%, 60%)', '#AAA']} // 'hsl(180, 100%, 35%)' + } + } else { // Inverse Logic: Alert if usage is high, Peak is bad and limits execution + if (levelPeak > 0 && (levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0)) { + return {data: [mv.avg, levelPeak], color: ['#7F00FF', '#AAA']} // '#5D3FD3' + } else if (levelAlert > 0 && (levelCaution <= 0 && levelNormal <= 0)) { + return {data: [mv.avg, levelAlert], color: ['hsl(0, 100%, 60%)', '#AAA']} // 'hsl(0, 100%, 35%)' + } else if (levelCaution > 0 && levelNormal <= 0) { + return {data: [mv.avg, levelCaution], color: ['hsl(56, 100%, 50%)', '#AAA']} // '#d5b60a' + } else { + return {data: [mv.avg, levelNormal], color: ['hsl(100, 100%, 60%)', '#AAA']} // 'hsl(100, 100%, 35%)' + } } }) + console.log("FPD", footprintData) + $: data = { labels: footprintLabels.flat(), datasets: [ { - backgroundColor: ['#AAA', '#777'], - data: footprintData[0] + backgroundColor: footprintData[0].color, + data: footprintData[0].data }, { - backgroundColor: ['hsl(0, 100%, 60%)', 'hsl(0, 100%, 35%)'], - data: footprintData[1] + backgroundColor: footprintData[1].color, + data: footprintData[1].data }, { - backgroundColor: ['hsl(100, 100%, 60%)', 'hsl(100, 100%, 35%)'], - data: footprintData[2] + backgroundColor: footprintData[2].color, + data: footprintData[2].data }, { - backgroundColor: ['hsl(180, 100%, 60%)', 'hsl(180, 100%, 35%)'], - data: footprintData[3] + backgroundColor: footprintData[3].color, + data: footprintData[3].data } ] } From 8bc43baf2c7c66915be94dc4e41680cbcd36933a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 16 Nov 2023 16:45:29 +0100 Subject: [PATCH 06/93] Fix units and labels --- web/frontend/src/JobFootprint.svelte | 39 +++++++++++++++++++++------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 5cd3b04..5454e59 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -31,7 +31,7 @@ export let size = 200 export let displayLegend = false - const footprintMetrics = ['mem_used', 'mem_bw','flops_any', 'cpu_load', 'acc_utilization'] // missing: energy , move to central config before deployment + const footprintMetrics = ['mem_used', 'mem_bw','flops_any', 'cpu_load'] // 'acc_utilization' / missing: energy , move to central config before deployment const footprintMetricConfigs = footprintMetrics.map((fm) => { return getContext('metrics')(job.cluster, fm) @@ -47,16 +47,25 @@ const meanVals = footprintMetrics.map((fm) => { let jm = jobMetrics.find((jm) => jm.name === fm) + let mv = null if (jm?.metric?.statisticsSeries) { - return {name: jm.name, scope: jm.scope, avg: round(mean(jm.metric.statisticsSeries.mean), 2)} + mv = {name: jm.name, scope: jm.scope, avg: round(mean(jm.metric.statisticsSeries.mean), 2)} } else if (jm?.metric?.series[0]) { - return {name: jm.name, scope: jm.scope, avg: jm.metric.series[0].statistics.avg} + mv = {name: jm.name, scope: jm.scope, avg: jm.metric.series[0].statistics.avg} } + + if (jm?.metric?.unit?.base) { + return {...mv, unit: jm.metric.unit.prefix + jm.metric.unit.base} + } else { + return {...mv, unit: ''} + } + }).filter( Boolean ) console.log("MVs", meanVals) - const footprintLabels = meanVals.map((mv) => [mv.name, 'Threshold']) + const footprintLabels = meanVals.map((mv) => [mv.name, 'Threshold']).flat() + const footprintUnits = meanVals.map((mv) => [mv.unit, mv.unit]).flat() const footprintData = meanVals.map((mv) => { const metricConfig = footprintMetricConfigs.find((fmc) => fmc.name === mv.name) @@ -72,11 +81,15 @@ return {data: [mv.avg, levelCaution], color: ['hsl(56, 100%, 50%)', '#AAA']} // '#d5b60a' } else if (levelNormal > 0) { return {data: [mv.avg, levelNormal], color: ['hsl(100, 100%, 60%)', '#AAA']} // 'hsl(100, 100%, 35%)' - } else { + } else if (levelPeak > 0) { return {data: [mv.avg, levelPeak], color: ['hsl(180, 100%, 60%)', '#AAA']} // 'hsl(180, 100%, 35%)' + } else { // If avg greater than configured peak: render negative diff as zero + return {data: [mv.avg, 0], color: ['hsl(180, 100%, 60%)', '#AAA']} // 'hsl(180, 100%, 35%)' } } else { // Inverse Logic: Alert if usage is high, Peak is bad and limits execution - if (levelPeak > 0 && (levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0)) { + if (levelPeak <= 0 && levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { // If avg greater than configured peak: render negative diff as zero + return {data: [mv.avg, 0], color: ['#7F00FF', '#AAA']} // '#5D3FD3' + } else if (levelPeak > 0 && (levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0)) { return {data: [mv.avg, levelPeak], color: ['#7F00FF', '#AAA']} // '#5D3FD3' } else if (levelAlert > 0 && (levelCaution <= 0 && levelNormal <= 0)) { return {data: [mv.avg, levelAlert], color: ['hsl(0, 100%, 60%)', '#AAA']} // 'hsl(0, 100%, 35%)' @@ -91,7 +104,7 @@ console.log("FPD", footprintData) $: data = { - labels: footprintLabels.flat(), + labels: footprintLabels, datasets: [ { backgroundColor: footprintData[0].color, @@ -157,11 +170,19 @@ callbacks: { label: function(context) { const labelIndex = (context.datasetIndex * 2) + context.dataIndex; - return context.chart.data.labels[labelIndex] + ': ' + context.formattedValue; + if (context.chart.data.labels[labelIndex] === 'Threshold') { + return ' -' + context.formattedValue + ' ' + footprintUnits[labelIndex] + } else { + return ' ' + context.formattedValue + ' ' + footprintUnits[labelIndex] + } }, title: function(context) { const labelIndex = (context[0].datasetIndex * 2) + context[0].dataIndex; - return context[0].chart.data.labels[labelIndex]; + if (context[0].chart.data.labels[labelIndex] === 'Threshold') { + return 'Until ' + context[0].chart.data.labels[labelIndex] + } else { + return 'Average ' + context[0].chart.data.labels[labelIndex] + } } } } From 5acd9ece7fdfd9971430b4020808e7e81c79cb84 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 16 Nov 2023 18:31:45 +0100 Subject: [PATCH 07/93] Adds messages to footprint --- web/frontend/src/JobFootprint.svelte | 46 ++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 5454e59..fa6b982 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -76,31 +76,45 @@ if (mv.name !== 'mem_used') { // Alert if usage is low, peak is high good usage if (levelAlert > 0) { - return {data: [mv.avg, levelAlert], color: ['hsl(0, 100%, 60%)', '#AAA']} // 'hsl(0, 100%, 35%)' + return {data: [mv.avg, levelAlert], color: ['hsl(0, 100%, 60%)', '#AAA'], valueMessage: 'Metric strongly below recommended level!', thresholdMessage: 'Difference towards caution threshold', impact: 2} // 'hsl(0, 100%, 35%)' } else if (levelCaution > 0) { - return {data: [mv.avg, levelCaution], color: ['hsl(56, 100%, 50%)', '#AAA']} // '#d5b60a' + return {data: [mv.avg, levelCaution], color: ['hsl(56, 100%, 50%)', '#AAA'], valueMessage: 'Metric below recommended level!', thresholdMessage: 'Difference towards normal threshold', impact: 1} // '#d5b60a' } else if (levelNormal > 0) { - return {data: [mv.avg, levelNormal], color: ['hsl(100, 100%, 60%)', '#AAA']} // 'hsl(100, 100%, 35%)' + return {data: [mv.avg, levelNormal], color: ['hsl(100, 100%, 60%)', '#AAA'], valueMessage: 'Metric within recommended level!', thresholdMessage: 'Difference towards peak threshold', impact: 0} // 'hsl(100, 100%, 35%)' } else if (levelPeak > 0) { - return {data: [mv.avg, levelPeak], color: ['hsl(180, 100%, 60%)', '#AAA']} // 'hsl(180, 100%, 35%)' + return {data: [mv.avg, levelPeak], color: ['hsl(180, 100%, 60%)', '#AAA'], valueMessage: 'Metric above recommended level!', thresholdMessage: 'Difference towards maximum', impact: 0} // 'hsl(180, 100%, 35%)' } else { // If avg greater than configured peak: render negative diff as zero - return {data: [mv.avg, 0], color: ['hsl(180, 100%, 60%)', '#AAA']} // 'hsl(180, 100%, 35%)' + return {data: [mv.avg, 0], color: ['hsl(180, 100%, 60%)', '#AAA'], valueMessage: 'Metric above recommended level!', thresholdMessage: 'Maximum reached!', impact: 0} // 'hsl(180, 100%, 35%)' } } else { // Inverse Logic: Alert if usage is high, Peak is bad and limits execution if (levelPeak <= 0 && levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { // If avg greater than configured peak: render negative diff as zero - return {data: [mv.avg, 0], color: ['#7F00FF', '#AAA']} // '#5D3FD3' + return {data: [mv.avg, 0], color: ['#7F00FF', '#AAA'], valueMessage: 'Memory usage at maximum!', thresholdMessage: 'Maximum reached!', impact: 4} // '#5D3FD3' } else if (levelPeak > 0 && (levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0)) { - return {data: [mv.avg, levelPeak], color: ['#7F00FF', '#AAA']} // '#5D3FD3' + return {data: [mv.avg, levelPeak], color: ['#7F00FF', '#AAA'], valueMessage: 'Memory usage extremely above recommended level!', thresholdMessage: 'Difference towards maximum', impact: 2} // '#5D3FD3' } else if (levelAlert > 0 && (levelCaution <= 0 && levelNormal <= 0)) { - return {data: [mv.avg, levelAlert], color: ['hsl(0, 100%, 60%)', '#AAA']} // 'hsl(0, 100%, 35%)' + return {data: [mv.avg, levelAlert], color: ['hsl(0, 100%, 60%)', '#AAA'], valueMessage: 'Memory usage strongly above recommended level!', thresholdMessage: 'Difference towards peak threshold', impact: 2} // 'hsl(0, 100%, 35%)' } else if (levelCaution > 0 && levelNormal <= 0) { - return {data: [mv.avg, levelCaution], color: ['hsl(56, 100%, 50%)', '#AAA']} // '#d5b60a' + return {data: [mv.avg, levelCaution], color: ['hsl(56, 100%, 50%)', '#AAA'], valueMessage: 'Memory usage above recommended level!', thresholdMessage: 'Difference towards alert threshold', impact: 1} // '#d5b60a' } else { - return {data: [mv.avg, levelNormal], color: ['hsl(100, 100%, 60%)', '#AAA']} // 'hsl(100, 100%, 35%)' + return {data: [mv.avg, levelNormal], color: ['hsl(100, 100%, 60%)', '#AAA'], valueMessage: 'Memory usage within recommended level!', thresholdMessage: 'Difference towards caution threshold', impact: 0} // 'hsl(100, 100%, 35%)' } } }) + const footprintMessages = footprintData.map((fpd) => [fpd.valueMessage, fpd.thresholdMessage]).flat() + const footprintResultSum = footprintData.map((fpd) => fpd.impact).reduce((accumulator, currentValue) => { return accumulator + currentValue }, 0) + let footprintResult = '' + + if (footprintResultSum <= 1) { + footprintResult = 'good.' + } else if (footprintResultSum > 1 && footprintResultSum <= 3) { + footprintResult = 'well.' + } else if (footprintResultSum > 3 && footprintResultSum <= 5) { + footprintResult = 'acceptable.' + } else { + footprintResult = 'bad.' + } + console.log("FPD", footprintData) $: data = { @@ -183,6 +197,14 @@ } else { return 'Average ' + context[0].chart.data.labels[labelIndex] } + }, + footer: function(context) { + const labelIndex = (context[0].datasetIndex * 2) + context[0].dataIndex; + if (context[0].chart.data.labels[labelIndex] === 'Threshold') { + return footprintMessages[labelIndex] + } else { + return footprintMessages[labelIndex] + } } } } @@ -194,6 +216,10 @@
+
+ Overall Job Performance:  Your job {job.state === 'running' ? 'performs' : 'performed'} {footprintResult} +
+ + From 8d409eed0f6e77c5d77f253fbade1a98098cee52 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 20 Nov 2023 17:53:12 +0100 Subject: [PATCH 12/93] Footprint in jobList as selectable --- web/frontend/src/Job.root.svelte | 11 +++++++--- web/frontend/src/JobFootprintBars.svelte | 9 +++++--- web/frontend/src/Jobs.root.svelte | 8 +++++--- web/frontend/src/MetricSelection.svelte | 8 ++++++++ web/frontend/src/joblist/JobList.svelte | 12 ++++++++++- web/frontend/src/joblist/Row.svelte | 26 ++++++++++++++++++++++++ 6 files changed, 64 insertions(+), 10 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 3d80916..da09841 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -27,7 +27,7 @@ import TagManagement from "./TagManagement.svelte"; import MetricSelection from "./MetricSelection.svelte"; import StatsTable from "./StatsTable.svelte"; - import JobFootprint from "./JobFootprint.svelte"; + import JobFootprintBars from "./JobFootprintBars.svelte"; import { getContext } from "svelte"; export let dbid; @@ -135,7 +135,7 @@ jobTags, statsTable, jobFootprint; - + $: document.title = $initq.fetching ? "Loading..." : $initq.error @@ -206,7 +206,12 @@ {#if $jobMetrics.data} {#key $jobMetrics.data} - --> + footprintMetrics.includes(jm.name))) + // console.log('JMs', jobMetrics.filter((jm) => footprintMetrics.includes(jm.name))) const footprintMetricConfigs = footprintMetrics.map((fm) => { return getContext('metrics')(job.cluster, fm) }).filter( Boolean ) // Filter only "truthy" vals, see: https://stackoverflow.com/questions/28607451/removing-undefined-values-from-array - console.log("FMCs", footprintMetricConfigs) + // console.log("FMCs", footprintMetricConfigs) // const footprintMetricThresholds = footprintMetricConfigs.map((fmc) => { // Only required if scopes smaller than node required // return {name: fmc.name, ...findThresholds(fmc, 'node', job?.subCluster ? job.subCluster : '')} // Merge 2 objects @@ -149,16 +150,18 @@ } }).filter( Boolean ) - console.log("FPD", footprintData) + // console.log("FPD", footprintData) + {#if view === 'job'} Core Metrics Footprint + {/if} {#each footprintData as fpd}
diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index 07094b8..ffad9df 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -19,7 +19,7 @@ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the let jobList, matchedJobs = null - let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false, isMetricsSelectionOpen = false + let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false, isMetricsSelectionOpen = false, showFootprint let metrics = filterPresets.cluster ? ccconfig[`plot_list_selectedMetrics:${filterPresets.cluster}`] || ccconfig.plot_list_selectedMetrics : ccconfig.plot_list_selectedMetrics @@ -81,7 +81,8 @@ bind:metrics={metrics} bind:sorting={sorting} bind:matchedJobs={matchedJobs} - bind:this={jobList} /> + bind:this={jobList} + bind:showFootprint={showFootprint} /> @@ -93,4 +94,5 @@ bind:cluster={selectedCluster} configName="plot_list_selectedMetrics" bind:metrics={metrics} - bind:isOpen={isMetricsSelectionOpen} /> + bind:isOpen={isMetricsSelectionOpen} + bind:showFootprint={showFootprint}/> diff --git a/web/frontend/src/MetricSelection.svelte b/web/frontend/src/MetricSelection.svelte index 59fe263..63101d4 100644 --- a/web/frontend/src/MetricSelection.svelte +++ b/web/frontend/src/MetricSelection.svelte @@ -17,12 +17,14 @@ export let configName export let allMetrics = null export let cluster = null + export let showFootprint const clusters = getContext('clusters'), onInit = getContext('on-init') let newMetricsOrder = [] let unorderedMetrics = [...metrics] + let pendingShowFootprint = showFootprint || false onInit(() => { if (allMetrics == null) allMetrics = new Set() @@ -90,6 +92,8 @@ metrics = newMetricsOrder.filter(m => unorderedMetrics.includes(m)) isOpen = false + showFootprint = pendingShowFootprint ? true : false + updateConfigurationMutation({ name: cluster == null ? configName : `${configName}:${cluster}`, value: JSON.stringify(metrics) @@ -121,6 +125,10 @@ +
  • + Show Footprint +
  • +
    {#each newMetricsOrder as metric, index (metric)}
  • Job Info + {#if showFootprint} + + Job Footprint + + {/if} {#each metrics as metric (metric)} {:else if $jobs.data && $initialized} {#each $jobs.data.jobs.items as job (job)} - + {:else} diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 6573b57..bae86d8 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -14,16 +14,23 @@ import { Card, Spinner } from "sveltestrap"; import MetricPlot from "../plots/MetricPlot.svelte"; import JobInfo from "./JobInfo.svelte"; + import JobFootprint from "../JobFootprint.svelte"; + import JobFootprintBars from "../JobFootprintBars.svelte"; import { maxScope, checkMetricDisabled } from "../utils.js"; export let job; export let metrics; export let plotWidth; export let plotHeight = 275; + export let showFootprint; let { id } = job; let scopes = [job.numNodes == 1 ? "core" : "node"]; + function distinct(value, index, array) { + return array.indexOf(value) === index; + } + const cluster = getContext("clusters").find((c) => c.name == job.cluster); const metricConfig = getContext("metrics"); // Get all MetricConfs which include subCluster-specific settings for this job const client = getContextClient(); @@ -64,6 +71,10 @@ variables: { id, metrics, scopes } }); + $: if (showFootprint) { + metrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', ...metrics].filter(distinct) + } + export function refresh() { metricsQuery = queryStore({ client: client, @@ -122,6 +133,21 @@ {:else} + {#if showFootprint} + + + + + {/if} {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} From f8f900151af502b73ad5454f12160a9fc13936cc Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 20 Nov 2023 18:08:33 +0100 Subject: [PATCH 13/93] Fix width, spacing, render --- web/frontend/src/JobFootprintBars.svelte | 5 ++--- web/frontend/src/joblist/JobList.svelte | 17 ++++++++++++----- web/frontend/src/joblist/Row.svelte | 14 +++++++++----- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/web/frontend/src/JobFootprintBars.svelte b/web/frontend/src/JobFootprintBars.svelte index d5ba081..36818ef 100644 --- a/web/frontend/src/JobFootprintBars.svelte +++ b/web/frontend/src/JobFootprintBars.svelte @@ -18,8 +18,7 @@ export let job export let jobMetrics export let view = 'job' - - // export let size = 200 + export let width = 200 const footprintMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] // 'acc_utilization' / missing: energy , move to central config before deployment @@ -154,7 +153,7 @@ - + {#if view === 'job'} diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index e6acaf8..698b9ca 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -28,7 +28,7 @@ export let sorting = { field: "startTime", order: "DESC" }; export let matchedJobs = 0; export let metrics = ccconfig.plot_list_selectedMetrics; - export let showFootprint; + export let showFootprint = false; let itemsPerPage = ccconfig.plot_list_jobsPerPage; let page = 1; @@ -135,12 +135,19 @@ }) }; + let plotWidth = null; let tableWidth = null; let jobInfoColumnWidth = 250; - $: plotWidth = Math.floor( - (tableWidth - jobInfoColumnWidth) / metrics.length - 10 - ); + $: if (showFootprint) { + plotWidth = Math.floor( + (tableWidth - jobInfoColumnWidth) / (metrics.length + 1) - 10 + ) + } else { + plotWidth = Math.floor( + (tableWidth - jobInfoColumnWidth) / metrics.length - 10 + ) + } let headerPaddingTop = 0; stickyHeader( @@ -165,7 +172,7 @@ Job Footprint diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index bae86d8..61d8cb6 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -35,8 +35,8 @@ const metricConfig = getContext("metrics"); // Get all MetricConfs which include subCluster-specific settings for this job const client = getContextClient(); const query = gql` - query ($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!) { - jobMetrics(id: $id, metrics: $metrics, scopes: $scopes) { + query ($id: ID!, $queryMetrics: [String!]!, $scopes: [MetricScope!]!) { + jobMetrics(id: $id, metrics: $queryMetrics, scopes: $scopes) { name scope metric { @@ -68,18 +68,21 @@ $: metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes } + variables: { id, queryMetrics, scopes } }); + let queryMetrics = null $: if (showFootprint) { - metrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', ...metrics].filter(distinct) + queryMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', ...metrics].filter(distinct) + } else { + queryMetrics = [...metrics] } export function refresh() { metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes }, + variables: { id, queryMetrics, scopes }, // requestPolicy: 'network-only' // use default cache-first for refresh }); } @@ -144,6 +147,7 @@ From dc860f8fd903c2a0d8f52434ac1af0d5b50aa877 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 21 Nov 2023 10:27:16 +0100 Subject: [PATCH 14/93] Handle artifacts, fix single node footprint flops --- web/frontend/src/JobFootprintBars.svelte | 82 +++++++++++++++++++++--- web/frontend/src/joblist/Row.svelte | 2 + 2 files changed, 74 insertions(+), 10 deletions(-) diff --git a/web/frontend/src/JobFootprintBars.svelte b/web/frontend/src/JobFootprintBars.svelte index 36818ef..2948613 100644 --- a/web/frontend/src/JobFootprintBars.svelte +++ b/web/frontend/src/JobFootprintBars.svelte @@ -1,9 +1,6 @@ - + {#if view === 'job'} @@ -172,6 +226,10 @@ {:else if fpd.impact === 2} + {:else if fpd.impact === -1} + + {:else if fpd.impact === -2} + {/if} {#if fpd.impact === 4} @@ -184,6 +242,10 @@ {:else if fpd.impact === 0} + {:else if fpd.impact === -1} + + {:else if fpd.impact === -2} + {/if}
  • diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 61d8cb6..359f263 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -74,8 +74,10 @@ let queryMetrics = null $: if (showFootprint) { queryMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', ...metrics].filter(distinct) + scopes = ["node"] } else { queryMetrics = [...metrics] + scopes = [job.numNodes == 1 ? "core" : "node"] } export function refresh() { From f342a65aba12d591cbb6f1cb60aa6c09999ae1c5 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 21 Nov 2023 15:38:28 +0100 Subject: [PATCH 15/93] Adds persistance to showfootprint selection --- web/frontend/src/Jobs.root.svelte | 5 ++++- web/frontend/src/MetricSelection.svelte | 14 ++++++++++++-- web/frontend/src/joblist/JobList.svelte | 2 +- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index ffad9df..2f2f9dc 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -19,10 +19,13 @@ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the let jobList, matchedJobs = null - let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false, isMetricsSelectionOpen = false, showFootprint + let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false, isMetricsSelectionOpen = false let metrics = filterPresets.cluster ? ccconfig[`plot_list_selectedMetrics:${filterPresets.cluster}`] || ccconfig.plot_list_selectedMetrics : ccconfig.plot_list_selectedMetrics + let showFootprint = filterPresets.cluster + ? !!ccconfig[`plot_list_showFootprint:${filterPresets.cluster}`] + : !!ccconfig.plot_list_showFootprint let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null // The filterPresets are handled by the Filters component, diff --git a/web/frontend/src/MetricSelection.svelte b/web/frontend/src/MetricSelection.svelte index 63101d4..5b54ba8 100644 --- a/web/frontend/src/MetricSelection.svelte +++ b/web/frontend/src/MetricSelection.svelte @@ -24,7 +24,7 @@ let newMetricsOrder = [] let unorderedMetrics = [...metrics] - let pendingShowFootprint = showFootprint || false + let pendingShowFootprint = !!showFootprint onInit(() => { if (allMetrics == null) allMetrics = new Set() @@ -92,7 +92,7 @@ metrics = newMetricsOrder.filter(m => unorderedMetrics.includes(m)) isOpen = false - showFootprint = pendingShowFootprint ? true : false + showFootprint = !!pendingShowFootprint updateConfigurationMutation({ name: cluster == null ? configName : `${configName}:${cluster}`, @@ -103,6 +103,16 @@ // console.log('Error on subscription: ' + res.error) } }) + + updateConfigurationMutation({ + name: cluster == null ? 'plot_list_showFootprint' : `plot_list_showFootprint:${cluster}`, + value: JSON.stringify(showFootprint) + }).subscribe(res => { + if (res.fetching === false && res.error) { + console.log('Error on footprint subscription: ' + res.error) + throw res.error + } + }) } diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index 698b9ca..8036361 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -28,7 +28,7 @@ export let sorting = { field: "startTime", order: "DESC" }; export let matchedJobs = 0; export let metrics = ccconfig.plot_list_selectedMetrics; - export let showFootprint = false; + export let showFootprint; let itemsPerPage = ccconfig.plot_list_jobsPerPage; let page = 1; From 6b78b4e12bc4280f9380e512413fee4b34ea256d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 21 Nov 2023 15:38:57 +0100 Subject: [PATCH 16/93] Adds message display in jobView --- web/frontend/src/JobFootprintBars.svelte | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/web/frontend/src/JobFootprintBars.svelte b/web/frontend/src/JobFootprintBars.svelte index 2948613..c21e2b2 100644 --- a/web/frontend/src/JobFootprintBars.svelte +++ b/web/frontend/src/JobFootprintBars.svelte @@ -262,6 +262,14 @@ />
    {/each} + {#if job?.metaData?.message} +
    +
    +
    + Note: {job.metaData.message} +
    +
    + {/if}
    From 709880ff5a074dede35ef5c9e051e4fbf0dbc3c9 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 22 Nov 2023 10:53:18 +0100 Subject: [PATCH 17/93] Use html tag for metadata message - remove old footprint version based on chartjs pie --- web/frontend/src/Job.root.svelte | 9 +- web/frontend/src/JobFootprint.svelte | 447 +++++++++++------------ web/frontend/src/JobFootprintBars.svelte | 281 -------------- web/frontend/src/joblist/Row.svelte | 9 +- 4 files changed, 223 insertions(+), 523 deletions(-) delete mode 100644 web/frontend/src/JobFootprintBars.svelte diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index da09841..7bd40f8 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -27,7 +27,7 @@ import TagManagement from "./TagManagement.svelte"; import MetricSelection from "./MetricSelection.svelte"; import StatsTable from "./StatsTable.svelte"; - import JobFootprintBars from "./JobFootprintBars.svelte"; + import JobFootprint from "./JobFootprint.svelte"; import { getContext } from "svelte"; export let dbid; @@ -206,12 +206,7 @@ {#if $jobMetrics.data} {#key $jobMetrics.data} - - import { getContext } from 'svelte' - // import { Button, Table, InputGroup, InputGroupText, Icon } from 'sveltestrap' + import { + Card, + CardHeader, + CardTitle, + CardBody, + Progress, + Icon, + } from "sveltestrap"; import { mean, round } from 'mathjs' // import { findThresholds } from './plots/MetricPlot.svelte' - // import { formatNumber } from './units.js' - - import { Pie } from 'svelte-chartjs'; - import { - Chart as ChartJS, - Title, - Tooltip, - Legend, - Filler, - ArcElement, - CategoryScale - } from 'chart.js'; - - ChartJS.register( - Title, - Tooltip, - Legend, - Filler, - ArcElement, - CategoryScale - ); + // import { formatNumber, scaleNumbers } from './units.js' export let job export let jobMetrics + export let view = 'job' + export let width = 'auto' - export let size = 200 - export let displayLegend = false + // console.log('CLUSTER', job.cluster) - const footprintMetrics = ['mem_used', 'mem_bw','flops_any', 'cpu_load'] // 'acc_utilization' / missing: energy , move to central config before deployment + const footprintMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] // 'acc_utilization' / missing: energy , move to central config before deployment - console.log('JMs', jobMetrics.filter((jm) => footprintMetrics.includes(jm.name))) + // console.log('JMs', jobMetrics.filter((jm) => footprintMetrics.includes(jm.name))) const footprintMetricConfigs = footprintMetrics.map((fm) => { return getContext('metrics')(job.cluster, fm) }).filter( Boolean ) // Filter only "truthy" vals, see: https://stackoverflow.com/questions/28607451/removing-undefined-values-from-array - console.log("FMCs", footprintMetricConfigs) + // console.log("FMCs", footprintMetricConfigs) // const footprintMetricThresholds = footprintMetricConfigs.map((fmc) => { // Only required if scopes smaller than node required // return {name: fmc.name, ...findThresholds(fmc, 'node', job?.subCluster ? job.subCluster : '')} // Merge 2 objects @@ -47,239 +35,244 @@ // console.log("FMTs", footprintMetricThresholds) - const meanVals = footprintMetrics.map((fm) => { - let jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === 'node') // Only Node Scope + const footprintData = footprintMetrics.map((fm) => { + const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === 'node') + // ... get Mean let mv = null if (jm?.metric?.statisticsSeries) { - mv = {name: jm.name, avg: round(mean(jm.metric.statisticsSeries.mean), 2)} + mv = round(mean(jm.metric.statisticsSeries.mean), 2) } else if (jm?.metric?.series[0]) { - mv = {name: jm.name, avg: jm.metric.series[0].statistics.avg} + mv = jm.metric.series[0].statistics.avg } - + // ... get Unit + let unit = null if (jm?.metric?.unit?.base) { - return {...mv, unit: jm.metric.unit.prefix + jm.metric.unit.base} + unit = jm.metric.unit.prefix + jm.metric.unit.base } else { - return {...mv, unit: ''} + unit = '' } - - }).filter( Boolean ) - - console.log("MVs", meanVals) - - const footprintData = meanVals.map((mv) => { - const metricConfig = footprintMetricConfigs.find((fmc) => fmc.name === mv.name) - const levelPeak = metricConfig.peak - mv.avg - const levelNormal = metricConfig.normal - mv.avg - const levelCaution = metricConfig.caution - mv.avg - const levelAlert = metricConfig.alert - mv.avg - - if (mv.name !== 'mem_used') { // Alert if usage is low, peak is high good usage + // From MetricConfig: Scope only for scaling -> Not of interest here + const metricConfig = footprintMetricConfigs.find((fmc) => fmc.name === fm) + // ... get Thresholds + const levelPeak = fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) - mv : metricConfig.peak - mv // Scale flops_any down + const levelNormal = metricConfig.normal - mv + const levelCaution = metricConfig.caution - mv + const levelAlert = metricConfig.alert - mv + // Collect + if (fm !== 'mem_used') { // Alert if usage is low, peak as maxmimum possible (scaled down for flops_any) if (levelAlert > 0) { return { - data: [mv.avg, levelAlert], - color: ['hsl(0, 100%, 60%)', '#AAA'], - messages: ['Metric strongly below recommended levels!', 'Difference towards acceptable performace'], - impact: 2 - } // 'hsl(0, 100%, 35%)' + name: fm, + unit: unit, + avg: mv, + max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak, + color: 'danger', + message: 'Metric strongly below common levels!', + impact: 3 + } } else if (levelCaution > 0) { return { - data: [mv.avg, levelCaution], - color: ['hsl(56, 100%, 50%)', '#AAA'], - messages: ['Metric below recommended levels', 'Difference towards normal performance'], - impact: 1 - } // '#d5b60a' + name: fm, + unit: unit, + avg: mv, + max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak, + color: 'warning', + message: 'Metric below common levels', + impact: 2 + } } else if (levelNormal > 0) { return { - data: [mv.avg, levelNormal], - color: ['hsl(100, 100%, 60%)', '#AAA'], - messages: ['Metric within recommended levels', 'Difference towards optimal performance'], - impact: 0 - } // 'hsl(100, 100%, 35%)' + name: fm, + unit: unit, + avg: mv, + max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak, + color: 'success', + message: 'Metric within common levels', + impact: 1 + } } else if (levelPeak > 0) { return { - data: [mv.avg, levelPeak], - color: ['hsl(180, 100%, 60%)', '#AAA'], - messages: ['Metric performs better than recommended levels', 'Difference towards maximum capacity'], // "Perfomrs optimal"? + name: fm, + unit: unit, + avg: mv, + max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak, + color: 'info', + message: 'Metric performs better than common levels', impact: 0 - } // 'hsl(180, 100%, 35%)' - } else { // If avg greater than configured peak: render negative diff as zero - return { - data: [mv.avg, 0], - color: ['hsl(180, 100%, 60%)', '#AAA'], - messages: ['Metric performs at maximum capacity', 'Maximum reached'], - impact: 0 - } // 'hsl(180, 100%, 35%)' + } + } else { // Possible artifacts - <5% Margin OK, >5% warning, > 50% danger + const checkData = { + name: fm, + unit: unit, + avg: mv, + max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak + } + + if (checkData.avg >= (1.5 * checkData.max)) { + return { + ...checkData, + color: 'danger', + message: 'Metric average at least 50% above common peak value: Check data for artifacts!', + impact: -2 + } + } else if (checkData.avg >= (1.05 * checkData.max)) { + return { + ...checkData, + color: 'warning', + message: 'Metric average at least 5% above common peak value: Check data for artifacts', + impact: -1 + } + } else { + return { + ...checkData, + color: 'info', + message: 'Metric performs better than common levels', + impact: 0 + } + } } } else { // Inverse Logic: Alert if usage is high, Peak is bad and limits execution - if (levelPeak <= 0 && levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { // If avg greater than configured peak: render negative diff as zero + if (levelPeak <= 0 && levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { // Possible artifacts - <5% Margin OK, >5% warning, > 50% danger + const checkData = { + name: fm, + unit: unit, + avg: mv, + max: metricConfig.peak + } + if (checkData.avg >= (1.5 * checkData.max)) { + return { + ...checkData, + color: 'danger', + message: 'Memory usage at least 50% above possible maximum value: Check data for artifacts!', + impact: -2 + } + } else if (checkData.avg >= (1.05 * checkData.max)) { + return { + ...checkData, + color: 'warning', + message: 'Memory usage at least 5% above possible maximum value: Check data for artifacts!', + impact: -1 + } + } else { + return { + ...checkData, + color: 'danger', + message: 'Memory usage extremely above common levels!', + impact: 4 + } + } + } else if (levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { return { - data: [mv.avg, 0], - color: ['#7F00FF', '#AAA'], - messages: ['Memory usage at maximum capacity!', 'Maximum reached'], + name: fm, + unit: unit, + avg: mv, + max: metricConfig.peak, + color: 'danger', + message: 'Memory usage extremely above common levels!', impact: 4 - } // '#5D3FD3' - } else if (levelPeak > 0 && (levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0)) { - return { - data: [mv.avg, levelPeak], - color: ['#7F00FF', '#AAA'], - messages: ['Memory usage extremely above recommended levels!', 'Difference towards maximum memory capacity'], - impact: 2 - } // '#5D3FD3' + } } else if (levelAlert > 0 && (levelCaution <= 0 && levelNormal <= 0)) { return { - data: [mv.avg, levelAlert], - color: ['hsl(0, 100%, 60%)', '#AAA'], - messages: ['Memory usage strongly above recommended levels!', 'Difference towards highly alerting memory usage'], - impact: 2 - } // 'hsl(0, 100%, 35%)' + name: fm, + unit: unit, + avg: mv, + max: metricConfig.peak, + color: 'danger', + message: 'Memory usage strongly above common levels!', + impact: 3 + } } else if (levelCaution > 0 && levelNormal <= 0) { return { - data: [mv.avg, levelCaution], - color: ['hsl(56, 100%, 50%)', '#AAA'], - messages: ['Memory usage above recommended levels', 'Difference towards alerting memory usage'], - impact: 1 - } // '#d5b60a' + name: fm, + unit: unit, + avg: mv, + max: metricConfig.peak, + color: 'warning', + message: 'Memory usage above common levels', + impact: 2 + } } else { return { - data: [mv.avg, levelNormal], - color: ['hsl(100, 100%, 60%)', '#AAA'], - messages: ['Memory usage within recommended levels', 'Difference towards increased memory usage'], - impact: 0 - } // 'hsl(100, 100%, 35%)' - } - } - }) - - console.log("FPD", footprintData) - - // Collect data for chartjs - const footprintLabels = meanVals.map((mv) => [mv.name, 'Threshold']).flat() - const footprintUnits = meanVals.map((mv) => [mv.unit, mv.unit]).flat() - const footprintMessages = footprintData.map((fpd) => fpd.messages).flat() - const footprintResultSum = footprintData.map((fpd) => fpd.impact).reduce((accumulator, currentValue) => { return accumulator + currentValue }, 0) - let footprintResult = '' - - if (footprintResultSum <= 1) { - footprintResult = 'good' - } else if (footprintResultSum > 1 && footprintResultSum <= 3) { - footprintResult = 'well' - } else if (footprintResultSum > 3 && footprintResultSum <= 5) { - footprintResult = 'acceptable' - } else { - footprintResult = 'badly' - } - - $: data = { - labels: footprintLabels, - datasets: [ - { - backgroundColor: footprintData[0].color, - data: footprintData[0].data - }, - { - backgroundColor: footprintData[1].color, - data: footprintData[1].data - }, - { - backgroundColor: footprintData[2].color, - data: footprintData[2].data - }, - { - backgroundColor: footprintData[3].color, - data: footprintData[3].data - } - ] - } - - const options = { - maintainAspectRatio: false, - animation: false, - plugins: { - legend: { - display: displayLegend, - labels: { // see: https://www.chartjs.org/docs/latest/samples/other-charts/multi-series-pie.html - generateLabels: function(chart) { - // Get the default label list - const original = ChartJS.overrides.pie.plugins.legend.labels.generateLabels; - const labelsOriginal = original.call(this, chart); - - // Build an array of colors used in the datasets of the chart - let datasetColors = chart.data.datasets.map(function(e) { - return e.backgroundColor; - }); - datasetColors = datasetColors.flat(); - - // Modify the color and hide state of each label - labelsOriginal.forEach(label => { - // There are twice as many labels as there are datasets. This converts the label index into the corresponding dataset index - label.datasetIndex = (label.index - label.index % 2) / 2; - - // The hidden state must match the dataset's hidden state - label.hidden = !chart.isDatasetVisible(label.datasetIndex); - - // Change the color to match the dataset - label.fillStyle = datasetColors[label.index]; - }); - - return labelsOriginal; - } - }, - onClick: function(mouseEvent, legendItem, legend) { - // toggle the visibility of the dataset from what it currently is - legend.chart.getDatasetMeta( - legendItem.datasetIndex - ).hidden = legend.chart.isDatasetVisible(legendItem.datasetIndex); - legend.chart.update(); - } - }, - tooltip: { - callbacks: { - label: function(context) { - const labelIndex = (context.datasetIndex * 2) + context.dataIndex; - if (context.chart.data.labels[labelIndex] === 'Threshold') { - return ' -' + context.formattedValue + ' ' + footprintUnits[labelIndex] - } else { - return ' ' + context.formattedValue + ' ' + footprintUnits[labelIndex] - } - }, - title: function(context) { - const labelIndex = (context[0].datasetIndex * 2) + context[0].dataIndex; - if (context[0].chart.data.labels[labelIndex] === 'Threshold') { - return 'Until ' + context[0].chart.data.labels[labelIndex] - } else { - return 'Average ' + context[0].chart.data.labels[labelIndex] - } - }, - footer: function(context) { - const labelIndex = (context[0].datasetIndex * 2) + context[0].dataIndex; - if (context[0].chart.data.labels[labelIndex] === 'Threshold') { - return footprintMessages[labelIndex] - } else { - return footprintMessages[labelIndex] - } - } + name: fm, + unit: unit, + avg: mv, + max: metricConfig.peak, + color: 'success', + message: 'Memory usage within common levels', + impact: 1 } } } - } + }).filter( Boolean ) + + // console.log("FPD", footprintData) -
    - -
    -
    - Overall Job Performance:  Your job {job.state === 'running' ? 'performs' : 'performed'} {footprintResult}. -
    - + + {#if view === 'job'} + + + Core Metrics Footprint + + + {/if} + + {#each footprintData as fpd} +
    +
    {fpd.name}
    +
    +
    + + {#if fpd.impact === 3} + + {:else if fpd.impact === 2} + + {:else if fpd.impact === -1} + + {:else if fpd.impact === -2} + + {/if} + + {#if fpd.impact === 4} + + {:else if fpd.impact === 3} + + {:else if fpd.impact === 2} + + {:else if fpd.impact === 1} + + {:else if fpd.impact === 0} + + {:else if fpd.impact === -1} + + {:else if fpd.impact === -2} + + {/if} +
    +
    + + {fpd.avg} / {fpd.max} {fpd.unit} +
    +
    +
    +
    + +
    + {/each} + {#if job?.metaData?.message} +
    + {@html job.metaData.message} + {/if} +
    +
    - - diff --git a/web/frontend/src/JobFootprintBars.svelte b/web/frontend/src/JobFootprintBars.svelte deleted file mode 100644 index c21e2b2..0000000 --- a/web/frontend/src/JobFootprintBars.svelte +++ /dev/null @@ -1,281 +0,0 @@ - - - - {#if view === 'job'} - - - Core Metrics Footprint - - - {/if} - - {#each footprintData as fpd} -
    -
    {fpd.name}
    -
    -
    - - {#if fpd.impact === 3} - - {:else if fpd.impact === 2} - - {:else if fpd.impact === -1} - - {:else if fpd.impact === -2} - - {/if} - - {#if fpd.impact === 4} - - {:else if fpd.impact === 3} - - {:else if fpd.impact === 2} - - {:else if fpd.impact === 1} - - {:else if fpd.impact === 0} - - {:else if fpd.impact === -1} - - {:else if fpd.impact === -2} - - {/if} -
    -
    - - {fpd.avg} / {fpd.max} {fpd.unit} -
    -
    -
    -
    - -
    - {/each} - {#if job?.metaData?.message} -
    -
    -
    - Note: {job.metaData.message} -
    -
    - {/if} -
    -
    - - - diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 359f263..3ecfc51 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -15,7 +15,6 @@ import MetricPlot from "../plots/MetricPlot.svelte"; import JobInfo from "./JobInfo.svelte"; import JobFootprint from "../JobFootprint.svelte"; - import JobFootprintBars from "../JobFootprintBars.svelte"; import { maxScope, checkMetricDisabled } from "../utils.js"; export let job; @@ -139,14 +138,8 @@ {:else} {#if showFootprint} - - Date: Wed, 22 Nov 2023 12:12:36 +0100 Subject: [PATCH 18/93] Switch from title to sveltestrap tooltip --- web/frontend/src/JobFootprint.svelte | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 9a06d2f..069e3d8 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -7,6 +7,7 @@ CardBody, Progress, Icon, + Tooltip } from "sveltestrap"; import { mean, round } from 'mathjs' // import { findThresholds } from './plots/MetricPlot.svelte' @@ -17,17 +18,17 @@ export let view = 'job' export let width = 'auto' - // console.log('CLUSTER', job.cluster) + console.log('CLUSTER', job.cluster) const footprintMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] // 'acc_utilization' / missing: energy , move to central config before deployment - // console.log('JMs', jobMetrics.filter((jm) => footprintMetrics.includes(jm.name))) + console.log('JMs', jobMetrics.filter((jm) => footprintMetrics.includes(jm.name))) const footprintMetricConfigs = footprintMetrics.map((fm) => { return getContext('metrics')(job.cluster, fm) }).filter( Boolean ) // Filter only "truthy" vals, see: https://stackoverflow.com/questions/28607451/removing-undefined-values-from-array - // console.log("FMCs", footprintMetricConfigs) + console.log("FMCs", footprintMetricConfigs) // const footprintMetricThresholds = footprintMetricConfigs.map((fmc) => { // Only required if scopes smaller than node required // return {name: fmc.name, ...findThresholds(fmc, 'node', job?.subCluster ? job.subCluster : '')} // Merge 2 objects @@ -205,7 +206,7 @@ } }).filter( Boolean ) - // console.log("FPD", footprintData) + console.log("FPD", footprintData) @@ -218,10 +219,10 @@ {/if} - {#each footprintData as fpd} + {#each footprintData as fpd, index}
    -
    {fpd.name}
    -
    +
     {fpd.name}
    +
    {#if fpd.impact === 3} @@ -252,11 +253,12 @@
    - {fpd.avg} / {fpd.max} {fpd.unit} + {fpd.avg} / {fpd.max} {fpd.unit}  
    + {fpd.message}
    -
    +
    Date: Thu, 23 Nov 2023 12:15:35 +0100 Subject: [PATCH 19/93] Add threshold scaling based on used resources - required for shared jobs --- web/frontend/src/JobFootprint.svelte | 135 +++++++++++++++++++++------ 1 file changed, 104 insertions(+), 31 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 069e3d8..5b5e3b1 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -10,7 +10,6 @@ Tooltip } from "sveltestrap"; import { mean, round } from 'mathjs' - // import { findThresholds } from './plots/MetricPlot.svelte' // import { formatNumber, scaleNumbers } from './units.js' export let job @@ -18,9 +17,29 @@ export let view = 'job' export let width = 'auto' - console.log('CLUSTER', job.cluster) + const isAcceleratedJob = (job.numAcc !== 0) + const isSharedJob = (job.exclusive !== 1) - const footprintMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] // 'acc_utilization' / missing: energy , move to central config before deployment + // console.log('JOB', job) + console.log('ACCELERATED?', isAcceleratedJob) + console.log('SHARED?', isSharedJob) + + const clusters = getContext('clusters') + const subclusterConfig = clusters.find((c) => c.name == job.cluster).subClusters.find((sc) => sc.name == job.subCluster) + + console.log('SCC', subclusterConfig) + + /* NOTES: + - 'mem_allocated' für shared jobs (noch todo / nicht in den jobdaten enthalten bisher) + > For now: 'acc_util' gegen 'mem_used' für alex + - Energy Metric Missiing, muss eingebaut werden + - Diese Config in config.json? + - Erste 5 / letzte 5 pts für avg auslassen? (Wenn minimallänge erreicht?) // Peak limited => Hier eigentlich nicht mein Proble, Ich zeige nur daten an die geliefert werden + */ + + const footprintMetrics = isAcceleratedJob ? + ['cpu_load', 'flops_any', 'acc_utilization', 'mem_bw'] : + ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] console.log('JMs', jobMetrics.filter((jm) => footprintMetrics.includes(jm.name))) @@ -30,20 +49,20 @@ console.log("FMCs", footprintMetricConfigs) - // const footprintMetricThresholds = footprintMetricConfigs.map((fmc) => { // Only required if scopes smaller than node required - // return {name: fmc.name, ...findThresholds(fmc, 'node', job?.subCluster ? job.subCluster : '')} // Merge 2 objects - // }).filter( Boolean ) + const footprintMetricThresholds = footprintMetricConfigs.map((fmc) => { + return {name: fmc.name, ...findJobThresholds(fmc, job, subclusterConfig)} + }).filter( Boolean ) - // console.log("FMTs", footprintMetricThresholds) + console.log("FMTs", footprintMetricThresholds) const footprintData = footprintMetrics.map((fm) => { const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === 'node') // ... get Mean let mv = null if (jm?.metric?.statisticsSeries) { - mv = round(mean(jm.metric.statisticsSeries.mean), 2) + mv = round(mean(jm.metric.statisticsSeries.mean), 2) // see above } else if (jm?.metric?.series[0]) { - mv = jm.metric.series[0].statistics.avg + mv = jm.metric.series[0].statistics.avg // see above } // ... get Unit let unit = null @@ -52,13 +71,12 @@ } else { unit = '' } - // From MetricConfig: Scope only for scaling -> Not of interest here - const metricConfig = footprintMetricConfigs.find((fmc) => fmc.name === fm) - // ... get Thresholds - const levelPeak = fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) - mv : metricConfig.peak - mv // Scale flops_any down - const levelNormal = metricConfig.normal - mv - const levelCaution = metricConfig.caution - mv - const levelAlert = metricConfig.alert - mv + // Get Threshold Limits from scaled Thresholds per Metric + const scaledThresholds = footprintMetricThresholds.find((fmc) => fmc.name === fm) + const levelPeak = fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) - mv : scaledThresholds.peak - mv // Scale flops_any down + const levelNormal = scaledThresholds.normal - mv + const levelCaution = scaledThresholds.caution - mv + const levelAlert = scaledThresholds.alert - mv // Collect if (fm !== 'mem_used') { // Alert if usage is low, peak as maxmimum possible (scaled down for flops_any) if (levelAlert > 0) { @@ -66,7 +84,7 @@ name: fm, unit: unit, avg: mv, - max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak, + max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak, color: 'danger', message: 'Metric strongly below common levels!', impact: 3 @@ -76,7 +94,7 @@ name: fm, unit: unit, avg: mv, - max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak, + max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak, color: 'warning', message: 'Metric below common levels', impact: 2 @@ -86,7 +104,7 @@ name: fm, unit: unit, avg: mv, - max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak, + max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak, color: 'success', message: 'Metric within common levels', impact: 1 @@ -96,7 +114,7 @@ name: fm, unit: unit, avg: mv, - max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak, + max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak, color: 'info', message: 'Metric performs better than common levels', impact: 0 @@ -106,20 +124,20 @@ name: fm, unit: unit, avg: mv, - max: fm === 'flops_any' ? round((metricConfig.peak * 0.85), 0) : metricConfig.peak + max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak } if (checkData.avg >= (1.5 * checkData.max)) { return { ...checkData, - color: 'danger', + color: 'secondary', message: 'Metric average at least 50% above common peak value: Check data for artifacts!', impact: -2 } } else if (checkData.avg >= (1.05 * checkData.max)) { return { ...checkData, - color: 'warning', + color: 'secondary', message: 'Metric average at least 5% above common peak value: Check data for artifacts', impact: -1 } @@ -138,19 +156,19 @@ name: fm, unit: unit, avg: mv, - max: metricConfig.peak + max: scaledThresholds.peak } if (checkData.avg >= (1.5 * checkData.max)) { return { ...checkData, - color: 'danger', + color: 'secondary', message: 'Memory usage at least 50% above possible maximum value: Check data for artifacts!', impact: -2 } } else if (checkData.avg >= (1.05 * checkData.max)) { return { ...checkData, - color: 'warning', + color: 'secondary', message: 'Memory usage at least 5% above possible maximum value: Check data for artifacts!', impact: -1 } @@ -167,7 +185,7 @@ name: fm, unit: unit, avg: mv, - max: metricConfig.peak, + max: scaledThresholds.peak, color: 'danger', message: 'Memory usage extremely above common levels!', impact: 4 @@ -177,7 +195,7 @@ name: fm, unit: unit, avg: mv, - max: metricConfig.peak, + max: scaledThresholds.peak, color: 'danger', message: 'Memory usage strongly above common levels!', impact: 3 @@ -187,7 +205,7 @@ name: fm, unit: unit, avg: mv, - max: metricConfig.peak, + max: scaledThresholds.peak, color: 'warning', message: 'Memory usage above common levels', impact: 2 @@ -197,7 +215,7 @@ name: fm, unit: unit, avg: mv, - max: metricConfig.peak, + max: scaledThresholds.peak, color: 'success', message: 'Memory usage within common levels', impact: 1 @@ -210,11 +228,66 @@ + + {#if view === 'job'} - Core Metrics Footprint + Core Metrics Footprint {isSharedJob ? '(Scaled)' : ''} {/if} From 4e375ff32bd7c024cb9d3515696f54deaa258f80 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 24 Nov 2023 10:36:22 +0100 Subject: [PATCH 20/93] Handle accelerated and shared jobs --- web/frontend/src/JobFootprint.svelte | 87 +++++++++++++++++----------- web/frontend/src/joblist/Row.svelte | 2 +- 2 files changed, 53 insertions(+), 36 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 5b5e3b1..cf3e227 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -20,7 +20,7 @@ const isAcceleratedJob = (job.numAcc !== 0) const isSharedJob = (job.exclusive !== 1) - // console.log('JOB', job) + console.log('JOB', job) console.log('ACCELERATED?', isAcceleratedJob) console.log('SHARED?', isSharedJob) @@ -34,12 +34,15 @@ > For now: 'acc_util' gegen 'mem_used' für alex - Energy Metric Missiing, muss eingebaut werden - Diese Config in config.json? - - Erste 5 / letzte 5 pts für avg auslassen? (Wenn minimallänge erreicht?) // Peak limited => Hier eigentlich nicht mein Proble, Ich zeige nur daten an die geliefert werden */ - const footprintMetrics = isAcceleratedJob ? - ['cpu_load', 'flops_any', 'acc_utilization', 'mem_bw'] : - ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] + const footprintMetrics = isAcceleratedJob + ? isSharedJob + ? ['cpu_load', 'flops_any', 'acc_utilization'] + : ['cpu_load', 'flops_any', 'acc_utilization', 'mem_bw'] + : isSharedJob + ? ['cpu_load', 'flops_any', 'mem_used'] + : ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] console.log('JMs', jobMetrics.filter((jm) => footprintMetrics.includes(jm.name))) @@ -60,9 +63,12 @@ // ... get Mean let mv = null if (jm?.metric?.statisticsSeries) { - mv = round(mean(jm.metric.statisticsSeries.mean), 2) // see above - } else if (jm?.metric?.series[0]) { - mv = jm.metric.series[0].statistics.avg // see above + mv = round(mean(jm.metric.statisticsSeries.mean), 2) + } else if (jm?.metric?.series?.length > 1) { + const avgs = jm.metric.series.map(jms => jms.statistics.avg) + mv = round(mean(avgs), 2) + } else { + mv = jm.metric.series[0].statistics.avg } // ... get Unit let unit = null @@ -238,15 +244,11 @@ return null } - if (job.numHWThreads == subClusterConfig.topology.node.length || // Job uses all available HWTs of one node - job.numAcc == subClusterConfig.topology.accelerators.length || // Job uses all available GPUs of one node - metricConfig.aggregation == 'avg' ){ // Metric uses "average" aggregation method - - console.log('Job uses all available Resources of one node OR uses "average" aggregation method, use unscaled thresholds') - - let subclusterThresholds = metricConfig.subClusters.find(sc => sc.name == subClusterConfig.name) + let subclusterThresholds = metricConfig.subClusters.find(sc => sc.name == subClusterConfig.name) + if (job.exclusive === 1) { // Exclusive: Use as defined + console.log('Job is exclusive: Use as defined') if (subclusterThresholds) { - console.log('subClusterThresholds found, use subCluster specific thresholds:', subclusterThresholds) + console.log('subClusterThresholds found: use subCluster specific thresholds', subclusterThresholds) return { peak: subclusterThresholds.peak, normal: subclusterThresholds.normal, @@ -254,32 +256,47 @@ alert: subclusterThresholds.alert } } - return { peak: metricConfig.peak, normal: metricConfig.normal, caution: metricConfig.caution, alert: metricConfig.alert } - } + } else { // Shared + if (metricConfig.aggregation === 'avg' ){ + console.log('metric uses "average" aggregation method: use unscaled thresholds except if cpu_load') + if (subclusterThresholds) { + console.log('subClusterThresholds found: use subCluster specific thresholds', subclusterThresholds) + console.log('PEAK/NORMAL USED', metricConfig.name === 'cpu_load' ? job.numHWThreads : subclusterThresholds.peak) + return { // If 'cpu_load': Peak/Normal === #HWThreads, keep other thresholds + peak: metricConfig.name === 'cpu_load' ? job.numHWThreads : subclusterThresholds.peak, + normal: metricConfig.name === 'cpu_load' ? job.numHWThreads : subclusterThresholds.normal, + caution: subclusterThresholds.caution, + alert: subclusterThresholds.alert + } + } + console.log('PEAK/NORMAL USED', metricConfig.name === 'cpu_load' ? job.numHWThreads : metricConfig.peak) + return { + peak: metricConfig.name === 'cpu_load' ? job.numHWThreads : metricConfig.peak, + normal: metricConfig.name === 'cpu_load' ? job.numHWThreads : metricConfig.normal, + caution: metricConfig.caution, + alert: metricConfig.alert + } + } else if (metricConfig.aggregation === 'sum' ){ + const jobFraction = job.numHWThreads / subClusterConfig.topology.node.length + console.log('Fraction', jobFraction) - if (metricConfig.aggregation != 'sum') { - console.warn('Missing or unkown aggregation mode (sum/avg) for metric:', metricConfig) - return null - } - - /* Adapt based on numAccs? */ - const jobFraction = job.numHWThreads / subClusterConfig.topology.node.length - //const fractionAcc = job.numAcc / subClusterConfig.topology.accelerators.length - - console.log('Fraction', jobFraction) - - return { - peak: round((metricConfig.peak * jobFraction), 0), - normal: round((metricConfig.normal * jobFraction), 0), - caution: round((metricConfig.caution * jobFraction), 0), - alert: round((metricConfig.alert * jobFraction), 0) - } + return { + peak: round((metricConfig.peak * jobFraction), 0), + normal: round((metricConfig.normal * jobFraction), 0), + caution: round((metricConfig.caution * jobFraction), 0), + alert: round((metricConfig.alert * jobFraction), 0) + } + } else { + console.warn('Missing or unkown aggregation mode (sum/avg) for metric:', metricConfig) + return null + } + } // Other job.exclusive cases? } diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 3ecfc51..71bc805 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -72,7 +72,7 @@ let queryMetrics = null $: if (showFootprint) { - queryMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', ...metrics].filter(distinct) + queryMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', 'acc_utilization', ...metrics].filter(distinct) scopes = ["node"] } else { queryMetrics = [...metrics] From e34623b1ceeae2124ffe4f0ffddf83dfad943b50 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 24 Nov 2023 15:11:38 +0100 Subject: [PATCH 21/93] Add db average stats to gql, use in footprint --- api/schema.graphqls | 5 + internal/graph/generated/generated.go | 226 ++++++++++++++++++++++++ internal/repository/job.go | 5 +- pkg/schema/job.go | 8 +- tools/archive-migration/job.go | 8 +- web/frontend/src/Job.root.svelte | 3 +- web/frontend/src/JobFootprint.svelte | 21 ++- web/frontend/src/joblist/JobList.svelte | 3 + 8 files changed, 264 insertions(+), 15 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 69e32e2..01eabc2 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -28,6 +28,11 @@ type Job { resources: [Resource!]! concurrentJobs: JobLinkResultList + memUsedMax: Float + flopsAnyAvg: Float + memBwAvg: Float + loadAvg: Float + metaData: Any userData: User } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index f29e2a0..6778e76 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -88,8 +88,12 @@ type ComplexityRoot struct { ConcurrentJobs func(childComplexity int) int Duration func(childComplexity int) int Exclusive func(childComplexity int) int + FlopsAnyAvg func(childComplexity int) int ID func(childComplexity int) int JobID func(childComplexity int) int + LoadAvg func(childComplexity int) int + MemBwAvg func(childComplexity int) int + MemUsedMax func(childComplexity int) int MetaData func(childComplexity int) int MonitoringStatus func(childComplexity int) int NumAcc func(childComplexity int) int @@ -303,6 +307,7 @@ type JobResolver interface { Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) + MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) } @@ -485,6 +490,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Job.Exclusive(childComplexity), true + case "Job.flopsAnyAvg": + if e.complexity.Job.FlopsAnyAvg == nil { + break + } + + return e.complexity.Job.FlopsAnyAvg(childComplexity), true + case "Job.id": if e.complexity.Job.ID == nil { break @@ -499,6 +511,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Job.JobID(childComplexity), true + case "Job.loadAvg": + if e.complexity.Job.LoadAvg == nil { + break + } + + return e.complexity.Job.LoadAvg(childComplexity), true + + case "Job.memBwAvg": + if e.complexity.Job.MemBwAvg == nil { + break + } + + return e.complexity.Job.MemBwAvg(childComplexity), true + + case "Job.memUsedMax": + if e.complexity.Job.MemUsedMax == nil { + break + } + + return e.complexity.Job.MemUsedMax(childComplexity), true + case "Job.metaData": if e.complexity.Job.MetaData == nil { break @@ -1628,6 +1661,11 @@ type Job { resources: [Resource!]! concurrentJobs: JobLinkResultList + memUsedMax: Float + flopsAnyAvg: Float + memBwAvg: Float + loadAvg: Float + metaData: Any userData: User } @@ -4054,6 +4092,170 @@ func (ec *executionContext) fieldContext_Job_concurrentJobs(ctx context.Context, return fc, nil } +func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_memUsedMax(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemUsedMax, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_memUsedMax(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_flopsAnyAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.FlopsAnyAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_memBwAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_memBwAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemBwAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_memBwAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_loadAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_loadAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.LoadAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_loadAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Job_metaData(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Job_metaData(ctx, field) if err != nil { @@ -4778,6 +4980,14 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context return ec.fieldContext_Job_resources(ctx, field) case "concurrentJobs": return ec.fieldContext_Job_concurrentJobs(ctx, field) + case "memUsedMax": + return ec.fieldContext_Job_memUsedMax(ctx, field) + case "flopsAnyAvg": + return ec.fieldContext_Job_flopsAnyAvg(ctx, field) + case "memBwAvg": + return ec.fieldContext_Job_memBwAvg(ctx, field) + case "loadAvg": + return ec.fieldContext_Job_loadAvg(ctx, field) case "metaData": return ec.fieldContext_Job_metaData(ctx, field) case "userData": @@ -7152,6 +7362,14 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr return ec.fieldContext_Job_resources(ctx, field) case "concurrentJobs": return ec.fieldContext_Job_concurrentJobs(ctx, field) + case "memUsedMax": + return ec.fieldContext_Job_memUsedMax(ctx, field) + case "flopsAnyAvg": + return ec.fieldContext_Job_flopsAnyAvg(ctx, field) + case "memBwAvg": + return ec.fieldContext_Job_memBwAvg(ctx, field) + case "loadAvg": + return ec.fieldContext_Job_loadAvg(ctx, field) case "metaData": return ec.fieldContext_Job_metaData(ctx, field) case "userData": @@ -12504,6 +12722,14 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "memUsedMax": + out.Values[i] = ec._Job_memUsedMax(ctx, field, obj) + case "flopsAnyAvg": + out.Values[i] = ec._Job_flopsAnyAvg(ctx, field, obj) + case "memBwAvg": + out.Values[i] = ec._Job_memBwAvg(ctx, field, obj) + case "loadAvg": + out.Values[i] = ec._Job_loadAvg(ctx, field, obj) case "metaData": field := field diff --git a/internal/repository/job.go b/internal/repository/job.go index 76834d1..e1a997a 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -60,7 +60,7 @@ func GetJobRepository() *JobRepository { var jobColumns []string = []string{ "job.id", "job.job_id", "job.user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.partition", "job.array_job_id", "job.num_nodes", "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state", - "job.duration", "job.walltime", "job.resources", // "job.meta_data", + "job.duration", "job.walltime", "job.resources", "job.mem_used_max", "job.flops_any_avg", "job.mem_bw_avg", "job.load_avg", // "job.meta_data", } func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) { @@ -68,7 +68,7 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) { if err := row.Scan( &job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State, - &job.Duration, &job.Walltime, &job.RawResources /*&job.RawMetaData*/); err != nil { + &job.Duration, &job.Walltime, &job.RawResources, &job.MemUsedMax, &job.FlopsAnyAvg, &job.MemBwAvg, &job.LoadAvg /*&job.RawMetaData*/); err != nil { log.Warnf("Error while scanning rows (Job): %v", err) return nil, err } @@ -483,6 +483,7 @@ func (r *JobRepository) MarkArchived( case "mem_bw": stmt = stmt.Set("mem_bw_avg", stats.Avg) case "load": + stmt = stmt.Set("load_avg", stats.Avg) case "cpu_load": stmt = stmt.Set("load_avg", stats.Avg) case "net_bw": diff --git a/pkg/schema/job.go b/pkg/schema/job.go index ed3a8b6..90bf2cb 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -54,10 +54,10 @@ type Job struct { BaseJob StartTimeUnix int64 `json:"-" db:"start_time" example:"1649723812"` // Start epoch time stamp in seconds StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type - MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64 - FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64 - MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64 - LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64 + MemUsedMax float64 `json:"memUsedMax" db:"mem_used_max"` // MemUsedMax as Float64 + FlopsAnyAvg float64 `json:"flopsAnyAvg" db:"flops_any_avg"` // FlopsAnyAvg as Float64 + MemBwAvg float64 `json:"memBwAvg" db:"mem_bw_avg"` // MemBwAvg as Float64 + LoadAvg float64 `json:"loadAvg" db:"load_avg"` // LoadAvg as Float64 NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64 NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64 FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64 diff --git a/tools/archive-migration/job.go b/tools/archive-migration/job.go index cd54d6c..0dff4b4 100644 --- a/tools/archive-migration/job.go +++ b/tools/archive-migration/job.go @@ -52,10 +52,10 @@ type Job struct { BaseJob StartTimeUnix int64 `json:"-" db:"start_time" example:"1649723812"` // Start epoch time stamp in seconds StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type - MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64 - FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64 - MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64 - LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64 + MemUsedMax float64 `json:"memUsedMax" db:"mem_used_max"` // MemUsedMax as Float64 + FlopsAnyAvg float64 `json:"flopsAnyAvg" db:"flops_any_avg"` // FlopsAnyAvg as Float64 + MemBwAvg float64 `json:"memBwAvg" db:"mem_bw_avg"` // MemBwAvg as Float64 + LoadAvg float64 `json:"loadAvg" db:"load_avg"` // LoadAvg as Float64 NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64 NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64 FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64 diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 7bd40f8..1b66e33 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -47,7 +47,8 @@ resources { hostname, hwthreads, accelerators }, metaData, userData { name, email }, - concurrentJobs { items { id, jobId }, count, listQuery } + concurrentJobs { items { id, jobId }, count, listQuery }, + flopsAnyAvg, memBwAvg, loadAvg } `); diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index cf3e227..4313030 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -31,9 +31,9 @@ /* NOTES: - 'mem_allocated' für shared jobs (noch todo / nicht in den jobdaten enthalten bisher) - > For now: 'acc_util' gegen 'mem_used' für alex + > For now: 'acc_util' gegen 'mem_used' für alex: Mem bw für shared weggefallen: dann wieder vier bars - Energy Metric Missiing, muss eingebaut werden - - Diese Config in config.json? + - footprintMetrics Config in config.json? */ const footprintMetrics = isAcceleratedJob @@ -60,9 +60,15 @@ const footprintData = footprintMetrics.map((fm) => { const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === 'node') - // ... get Mean + // ... get Mean: Primarily use backend sourced avgs from job.*, secondarily calculate/read from metricdata let mv = null - if (jm?.metric?.statisticsSeries) { + if (fm === 'cpu_load' && job.loadAvg !== 0) { + mv = round(job.loadAvg, 2) + } else if (fm === 'flops_any' && job.flopsAnyAvg !== 0) { + mv = round(job.flopsAnyAvg, 2) + } else if (fm === 'mem_bw' && job.memBwAvg !== 0) { + mv = round(job.memBwAvg, 2) + } else if (jm?.metric?.statisticsSeries) { mv = round(mean(jm.metric.statisticsSeries.mean), 2) } else if (jm?.metric?.series?.length > 1) { const avgs = jm.metric.series.map(jms => jms.statistics.avg) @@ -356,6 +362,13 @@ />
    {/each} + {#if job?.metaData?.message}
    {@html job.metaData.message} diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index 8036361..5f8d89b 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -74,6 +74,9 @@ name } metaData + flopsAnyAvg + memBwAvg + loadAvg } count } From b8213ef6bea754ba6ff79875c86a8435679683c2 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 24 Nov 2023 17:22:06 +0100 Subject: [PATCH 22/93] Remove logs, reduce code --- web/frontend/src/JobFootprint.svelte | 246 +++++++++------------------ 1 file changed, 80 insertions(+), 166 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 4313030..20b03d6 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -10,57 +10,25 @@ Tooltip } from "sveltestrap"; import { mean, round } from 'mathjs' - // import { formatNumber, scaleNumbers } from './units.js' export let job export let jobMetrics export let view = 'job' export let width = 'auto' - const isAcceleratedJob = (job.numAcc !== 0) - const isSharedJob = (job.exclusive !== 1) - - console.log('JOB', job) - console.log('ACCELERATED?', isAcceleratedJob) - console.log('SHARED?', isSharedJob) - - const clusters = getContext('clusters') + const clusters = getContext('clusters') const subclusterConfig = clusters.find((c) => c.name == job.cluster).subClusters.find((sc) => sc.name == job.subCluster) - console.log('SCC', subclusterConfig) - - /* NOTES: - - 'mem_allocated' für shared jobs (noch todo / nicht in den jobdaten enthalten bisher) - > For now: 'acc_util' gegen 'mem_used' für alex: Mem bw für shared weggefallen: dann wieder vier bars - - Energy Metric Missiing, muss eingebaut werden - - footprintMetrics Config in config.json? - */ - - const footprintMetrics = isAcceleratedJob - ? isSharedJob + const footprintMetrics = (job.numAcc !== 0) + ? (job.exclusive !== 1) ? ['cpu_load', 'flops_any', 'acc_utilization'] : ['cpu_load', 'flops_any', 'acc_utilization', 'mem_bw'] - : isSharedJob + : (job.exclusive !== 1) ? ['cpu_load', 'flops_any', 'mem_used'] : ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] - console.log('JMs', jobMetrics.filter((jm) => footprintMetrics.includes(jm.name))) - - const footprintMetricConfigs = footprintMetrics.map((fm) => { - return getContext('metrics')(job.cluster, fm) - }).filter( Boolean ) // Filter only "truthy" vals, see: https://stackoverflow.com/questions/28607451/removing-undefined-values-from-array - - console.log("FMCs", footprintMetricConfigs) - - const footprintMetricThresholds = footprintMetricConfigs.map((fmc) => { - return {name: fmc.name, ...findJobThresholds(fmc, job, subclusterConfig)} - }).filter( Boolean ) - - console.log("FMTs", footprintMetricThresholds) - const footprintData = footprintMetrics.map((fm) => { - const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === 'node') - // ... get Mean: Primarily use backend sourced avgs from job.*, secondarily calculate/read from metricdata + // Mean: Primarily use backend sourced avgs from job.*, secondarily calculate/read from metricdata let mv = null if (fm === 'cpu_load' && job.loadAvg !== 0) { mv = round(job.loadAvg, 2) @@ -68,94 +36,90 @@ mv = round(job.flopsAnyAvg, 2) } else if (fm === 'mem_bw' && job.memBwAvg !== 0) { mv = round(job.memBwAvg, 2) - } else if (jm?.metric?.statisticsSeries) { - mv = round(mean(jm.metric.statisticsSeries.mean), 2) - } else if (jm?.metric?.series?.length > 1) { - const avgs = jm.metric.series.map(jms => jms.statistics.avg) - mv = round(mean(avgs), 2) - } else { - mv = jm.metric.series[0].statistics.avg + } else { // Calculate from jobMetrics + const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === 'node') + if (jm?.metric?.statisticsSeries) { + mv = round(mean(jm.metric.statisticsSeries.mean), 2) + } else if (jm?.metric?.series?.length > 1) { + const avgs = jm.metric.series.map(jms => jms.statistics.avg) + mv = round(mean(avgs), 2) + } else { + mv = jm.metric.series[0].statistics.avg + } } - // ... get Unit + + // Unit + const fmc = getContext('metrics')(job.cluster, fm) let unit = null - if (jm?.metric?.unit?.base) { - unit = jm.metric.unit.prefix + jm.metric.unit.base + if (fmc?.unit?.base) { + unit = fmc.unit.prefix + fmc.unit.base } else { unit = '' } - // Get Threshold Limits from scaled Thresholds per Metric - const scaledThresholds = footprintMetricThresholds.find((fmc) => fmc.name === fm) - const levelPeak = fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) - mv : scaledThresholds.peak - mv // Scale flops_any down - const levelNormal = scaledThresholds.normal - mv - const levelCaution = scaledThresholds.caution - mv - const levelAlert = scaledThresholds.alert - mv + + // Threshold / -Differences + const fmt = findJobThresholds(job, fmc, subclusterConfig) + const levelPeak = fm === 'flops_any' ? round((fmt.peak * 0.85), 0) - mv : fmt.peak - mv // Scale flops_any down + const levelNormal = fmt.normal - mv + const levelCaution = fmt.caution - mv + const levelAlert = fmt.alert - mv + + // Define basic data + const fmBase = { + name: fm, + unit: unit, + avg: mv, + max: fm === 'flops_any' ? round((fmt.peak * 0.85), 0) : fmt.peak + } + // Collect if (fm !== 'mem_used') { // Alert if usage is low, peak as maxmimum possible (scaled down for flops_any) if (levelAlert > 0) { return { - name: fm, - unit: unit, - avg: mv, - max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak, + ...fmBase, color: 'danger', message: 'Metric strongly below common levels!', impact: 3 } } else if (levelCaution > 0) { return { - name: fm, - unit: unit, - avg: mv, - max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak, + ...fmBase, color: 'warning', message: 'Metric below common levels', impact: 2 } } else if (levelNormal > 0) { return { - name: fm, - unit: unit, - avg: mv, - max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak, + ...fmBase, color: 'success', message: 'Metric within common levels', impact: 1 } } else if (levelPeak > 0) { return { - name: fm, - unit: unit, - avg: mv, - max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak, + ...fmBase, color: 'info', message: 'Metric performs better than common levels', impact: 0 } } else { // Possible artifacts - <5% Margin OK, >5% warning, > 50% danger - const checkData = { - name: fm, - unit: unit, - avg: mv, - max: fm === 'flops_any' ? round((scaledThresholds.peak * 0.85), 0) : scaledThresholds.peak - } - - if (checkData.avg >= (1.5 * checkData.max)) { + if (fmBase.avg >= (1.5 * fmBase.max)) { return { - ...checkData, + ...fmBase, color: 'secondary', message: 'Metric average at least 50% above common peak value: Check data for artifacts!', impact: -2 } - } else if (checkData.avg >= (1.05 * checkData.max)) { + } else if (fmBase.avg >= (1.05 * fmBase.max)) { return { - ...checkData, + ...fmBase, color: 'secondary', message: 'Metric average at least 5% above common peak value: Check data for artifacts', impact: -1 } } else { return { - ...checkData, + ...fmBase, color: 'info', message: 'Metric performs better than common levels', impact: 0 @@ -164,29 +128,23 @@ } } else { // Inverse Logic: Alert if usage is high, Peak is bad and limits execution if (levelPeak <= 0 && levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { // Possible artifacts - <5% Margin OK, >5% warning, > 50% danger - const checkData = { - name: fm, - unit: unit, - avg: mv, - max: scaledThresholds.peak - } - if (checkData.avg >= (1.5 * checkData.max)) { + if (fmBase.avg >= (1.5 * fmBase.max)) { return { - ...checkData, + ...fmBase, color: 'secondary', message: 'Memory usage at least 50% above possible maximum value: Check data for artifacts!', impact: -2 } - } else if (checkData.avg >= (1.05 * checkData.max)) { + } else if (fmBase.avg >= (1.05 * fmBase.max)) { return { - ...checkData, + ...fmBase, color: 'secondary', message: 'Memory usage at least 5% above possible maximum value: Check data for artifacts!', impact: -1 } } else { return { - ...checkData, + ...fmBase, color: 'danger', message: 'Memory usage extremely above common levels!', impact: 4 @@ -194,109 +152,72 @@ } } else if (levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { return { - name: fm, - unit: unit, - avg: mv, - max: scaledThresholds.peak, + ...fmBase, color: 'danger', message: 'Memory usage extremely above common levels!', impact: 4 } } else if (levelAlert > 0 && (levelCaution <= 0 && levelNormal <= 0)) { return { - name: fm, - unit: unit, - avg: mv, - max: scaledThresholds.peak, + ...fmBase, color: 'danger', message: 'Memory usage strongly above common levels!', impact: 3 } } else if (levelCaution > 0 && levelNormal <= 0) { return { - name: fm, - unit: unit, - avg: mv, - max: scaledThresholds.peak, + ...fmBase, color: 'warning', message: 'Memory usage above common levels', impact: 2 } } else { return { - name: fm, - unit: unit, - avg: mv, - max: scaledThresholds.peak, + ...fmBase, color: 'success', message: 'Memory usage within common levels', impact: 1 } } } - }).filter( Boolean ) - - console.log("FPD", footprintData) - + }) + + + + (isHistogramConfigOpen = !isHistogramConfigOpen)}> + + Select metrics presented in histograms + + + + + {#each availableMetrics as metric (metric)} + + updateConfiguration({ + name: cluster ? `user_view_histogramMetrics:${cluster}` : 'user_view_histogramMetrics', + value: metricsInHistograms + })} /> + + {metric} + + {/each} + + + + + + diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index fffbfde..563978d 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -63,6 +63,8 @@ option.key == ccconfig.status_view_selectedTopUserCategory ); + let metricsInHistograms = ccconfig[`status_view_histogramMetrics:${cluster}`] || ccconfig.status_view_histogramMetrics + const client = getContextClient(); $: mainQuery = queryStore({ client: client, diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 3871f60..34c5615 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -1,7 +1,7 @@ - - - (isHistogramConfigOpen = !isHistogramConfigOpen)}> + (isOpen = !isOpen)}> Select metrics presented in histograms - {#each availableMetrics as metric (metric)} - updateConfiguration({ - name: cluster ? `user_view_histogramMetrics:${cluster}` : 'user_view_histogramMetrics', - value: metricsInHistograms - })} /> - + {metric} {/each} - + diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 16f22d6..e216aa6 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -25,9 +25,10 @@ let jobFilters = []; let sorting = { field: 'startTime', order: 'DESC' }, isSortingOpen = false let metrics = ccconfig.plot_list_selectedMetrics, isMetricsSelectionOpen = false - let w1, w2, histogramHeight = 250 + let w1, w2, histogramHeight = 250, isHistogramSelectionOpen = false let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null - let metricsInHistograms = ccconfig[`user_view_histogramMetrics:${selectedCluster}`] || ccconfig.user_view_histogramMetrics || [] + + $: metricsInHistograms = selectedCluster ? ccconfig[`user_view_histogramMetrics:${selectedCluster}`] : (ccconfig.user_view_histogramMetrics || []) const client = getContextClient(); $: stats = queryStore({ @@ -73,9 +74,11 @@ Metrics - + {/key} @@ -219,4 +222,9 @@ bind:cluster={selectedCluster} configName="plot_list_selectedMetrics" bind:metrics={metrics} - bind:isOpen={isMetricsSelectionOpen} /> \ No newline at end of file + bind:isOpen={isMetricsSelectionOpen} /> + + \ No newline at end of file From 1185737eaa1b48713bae45556dc77b79e9675454 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 8 Dec 2023 12:03:04 +0100 Subject: [PATCH 36/93] Add metrics to histoselect, add userfilters - edit struct to make only count return required --- api/schema.graphqls | 6 +- internal/graph/generated/generated.go | 150 +++++++++------------ internal/graph/model/models_gen.go | 8 +- internal/repository/query.go | 2 +- internal/repository/stats.go | 78 +++++++---- web/frontend/src/HistogramSelection.svelte | 2 +- web/frontend/src/User.root.svelte | 2 +- web/frontend/src/utils.js | 18 +-- 8 files changed, 136 insertions(+), 130 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 21a9ad2..8a43a54 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -293,10 +293,10 @@ type MetricHistoPoints { } type MetricHistoPoint { - min: Int! - max: Int! + bin: Int count: Int! - bin: Int! + min: Int + max: Int } type JobsStatistics { diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index f3d4f8a..12d829a 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -1969,10 +1969,10 @@ type MetricHistoPoints { } type MetricHistoPoint { - min: Int! - max: Int! + bin: Int count: Int! - bin: Int! + min: Int + max: Int } type JobsStatistics { @@ -6336,8 +6336,8 @@ func (ec *executionContext) fieldContext_MetricFootprints_data(ctx context.Conte return fc, nil } -func (ec *executionContext) _MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_MetricHistoPoint_min(ctx, field) +func (ec *executionContext) _MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_bin(ctx, field) if err != nil { return graphql.Null } @@ -6350,68 +6350,21 @@ func (ec *executionContext) _MetricHistoPoint_min(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Min, nil + return obj.Bin, nil }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } - res := resTmp.(int) + res := resTmp.(*int) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "MetricHistoPoint", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type Int does not have child fields") - }, - } - return fc, nil -} - -func (ec *executionContext) _MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_MetricHistoPoint_max(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Max, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(int) - fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoint", Field: field, @@ -6468,8 +6421,8 @@ func (ec *executionContext) fieldContext_MetricHistoPoint_count(ctx context.Cont return fc, nil } -func (ec *executionContext) _MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_MetricHistoPoint_bin(ctx, field) +func (ec *executionContext) _MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_min(ctx, field) if err != nil { return graphql.Null } @@ -6482,24 +6435,62 @@ func (ec *executionContext) _MetricHistoPoint_bin(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Bin, nil + return obj.Min, nil }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } - res := resTmp.(int) + res := resTmp.(*int) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_max(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Max, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoint", Field: field, @@ -6636,14 +6627,14 @@ func (ec *executionContext) fieldContext_MetricHistoPoints_data(ctx context.Cont IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { + case "bin": + return ec.fieldContext_MetricHistoPoint_bin(ctx, field) + case "count": + return ec.fieldContext_MetricHistoPoint_count(ctx, field) case "min": return ec.fieldContext_MetricHistoPoint_min(ctx, field) case "max": return ec.fieldContext_MetricHistoPoint_max(ctx, field) - case "count": - return ec.fieldContext_MetricHistoPoint_count(ctx, field) - case "bin": - return ec.fieldContext_MetricHistoPoint_bin(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type MetricHistoPoint", field.Name) }, @@ -13542,26 +13533,17 @@ func (ec *executionContext) _MetricHistoPoint(ctx context.Context, sel ast.Selec switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("MetricHistoPoint") - case "min": - out.Values[i] = ec._MetricHistoPoint_min(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - case "max": - out.Values[i] = ec._MetricHistoPoint_max(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } + case "bin": + out.Values[i] = ec._MetricHistoPoint_bin(ctx, field, obj) case "count": out.Values[i] = ec._MetricHistoPoint_count(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } - case "bin": - out.Values[i] = ec._MetricHistoPoint_bin(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } + case "min": + out.Values[i] = ec._MetricHistoPoint_min(ctx, field, obj) + case "max": + out.Values[i] = ec._MetricHistoPoint_max(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index eb35bda..7b8ebd2 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -110,10 +110,10 @@ type MetricFootprints struct { } type MetricHistoPoint struct { - Min int `json:"min"` - Max int `json:"max"` - Count int `json:"count"` - Bin int `json:"bin"` + Bin *int `json:"bin,omitempty"` + Count int `json:"count"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` } type MetricHistoPoints struct { diff --git a/internal/repository/query.go b/internal/repository/query.go index 84b8048..317302b 100644 --- a/internal/repository/query.go +++ b/internal/repository/query.go @@ -96,7 +96,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde user := GetUserFromContext(ctx) if user == nil { var qnil sq.SelectBuilder - return qnil, fmt.Errorf("user context is nil!") + return qnil, fmt.Errorf("user context is nil") } else if user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport, schema.RoleApi}) { // Admin & Co. : All jobs return query, nil } else if user.HasRole(schema.RoleManager) { // Manager : Add filter for managed projects' jobs only + personal jobs diff --git a/internal/repository/stats.go b/internal/repository/stats.go index bd870a4..ab70427 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -460,13 +460,8 @@ func (r *JobRepository) AddMetricHistograms( stat *model.JobsStatistics) (*model.JobsStatistics, error) { start := time.Now() - for i, m := range metrics { - // DEBUG - fmt.Println(i, m) - var err error - var metricHisto *model.MetricHistoPoints - - metricHisto, err = r.jobsMetricStatisticsHistogram(ctx, m, filter) + for _, m := range metrics { + metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter) if err != nil { log.Warnf("Error while loading job metric statistics histogram: %s", m) continue @@ -529,6 +524,12 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( dbMetric = "flops_any_avg" case "mem_bw": dbMetric = "mem_bw_avg" + case "mem_used": + dbMetric = "mem_used_max" + case "net_bw": + dbMetric = "net_bw_avg" + case "file_bw": + dbMetric = "file_bw_avg" default: return nil, fmt.Errorf("%s not implemented", metric) } @@ -562,46 +563,67 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( } } + // log.Debugf("Metric %s: DB %s, Peak %f, Unit %s", metric, dbMetric, peak, unit) // Make bins, see https://jereze.com/code/sql-histogram/ - // Diffs: - // CAST(X AS INTEGER) instead of floor(X), used also for for Min , Max selection - // renamed to bin for simplicity and model struct - // Ditched rename from job to data, as it conflicts with security check afterwards - start := time.Now() - prepQuery := sq.Select( - fmt.Sprintf(`CAST(min(job.%s) as INTEGER) as min`, dbMetric), - fmt.Sprintf(`CAST(max(job.%s) as INTEGER) as max`, dbMetric), - fmt.Sprintf(`count(job.%s) as count`, dbMetric), - fmt.Sprintf(`CAST((case when job.%s = value.max then value.max*0.999999999 else job.%s end - value.min) / (value.max - value.min) * 10 as INTEGER) +1 as bin`, dbMetric, dbMetric)) - prepQuery = prepQuery.From("job") - prepQuery = prepQuery.CrossJoin(fmt.Sprintf(`(select max(%s) as max, min(%s) as min from job where %s is not null and %s < %f) as value`, dbMetric, dbMetric, dbMetric, dbMetric, peak)) - prepQuery = prepQuery.Where(fmt.Sprintf(`job.%s is not null and job.%s < %f`, dbMetric, dbMetric, peak)) - query, qerr := SecurityCheck(ctx, prepQuery) + start := time.Now() + + crossJoinQuery := sq.Select( + fmt.Sprintf(`max(%s) as max`, dbMetric), + fmt.Sprintf(`min(%s) as min`, dbMetric), + ).From("job").Where( + fmt.Sprintf(`%s is not null`, dbMetric), + ).Where( + fmt.Sprintf(`%s <= %f`, dbMetric, peak), + ) + + crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery) + if cjqerr != nil { + return nil, cjqerr + } + + crossJoinQuerySql, _, sqlerr := crossJoinQuery.ToSql() + if sqlerr != nil { + return nil, sqlerr + } + + bins := 10 + binQuery := fmt.Sprintf(`CAST( (case when job.%s = value.max then value.max*0.999999999 else job.%s end - value.min) / (value.max - value.min) * %d as INTEGER )`, dbMetric, dbMetric, bins) + + mainQuery := sq.Select( + fmt.Sprintf(`%s + 1 as bin`, binQuery), + fmt.Sprintf(`count(job.%s) as count`, dbMetric), + fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery), + fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery), + ).From("job").CrossJoin( + fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), + ).Where(fmt.Sprintf(`job.%s is not null and job.%s <= %f`, dbMetric, dbMetric, peak)) + + mainQuery, qerr := SecurityCheck(ctx, mainQuery) if qerr != nil { return nil, qerr } for _, f := range filters { - query = BuildWhereClause(f, query) + mainQuery = BuildWhereClause(f, mainQuery) } // Finalize query with Grouping and Ordering - query = query.GroupBy("bin").OrderBy("bin") + mainQuery = mainQuery.GroupBy("bin").OrderBy("bin") - rows, err := query.RunWith(r.DB).Query() + rows, err := mainQuery.RunWith(r.DB).Query() if err != nil { - log.Errorf("Error while running query: %s", err) + log.Errorf("Error while running mainQuery: %s", err) return nil, err } points := make([]*model.MetricHistoPoint, 0) for rows.Next() { point := model.MetricHistoPoint{} - if err := rows.Scan(&point.Min, &point.Max, &point.Count, &point.Bin); err != nil { - log.Warn("Error while scanning rows") - return nil, err + if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil { + log.Warnf("Error while scanning rows for %s", metric) + return nil, err // Totally bricks cc-backend if returned and if all metrics requested? } points = append(points, &point) diff --git a/web/frontend/src/HistogramSelection.svelte b/web/frontend/src/HistogramSelection.svelte index afef8c7..142f678 100644 --- a/web/frontend/src/HistogramSelection.svelte +++ b/web/frontend/src/HistogramSelection.svelte @@ -4,10 +4,10 @@ import { gql, getContextClient , mutationStore } from '@urql/svelte' export let cluster - export let availableMetrics = ['cpu_load', 'flops_any', 'mem_bw'] export let metricsInHistograms export let isOpen + let availableMetrics = ['cpu_load', 'flops_any', 'mem_used', 'mem_bw', 'net_bw', 'file_bw'] let pendingMetrics = [...metricsInHistograms] // Copy const client = getContextClient() diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index e216aa6..a26c1aa 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -44,7 +44,7 @@ histNumNodes { count, value } histMetrics { metric, unit, data { min, max, count, bin } } }}`, - variables: { jobFilters, metricsInHistograms} + variables: { jobFilters, metricsInHistograms } }) onMount(() => filterComponent.update()) diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index 537ad3f..794a23a 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -316,16 +316,18 @@ export function checkMetricDisabled(m, c, s) { //[m]etric, [c]luster, [s]ubclust } export function convert2uplot(canvasData) { - // initial use: Canvas Histogram Data to Uplot + // Prep: Uplot Data Structure let uplotData = [[],[]] // [X, Y1, Y2, ...] + // MetricHisto Only: Check if 1st bin not-null -> Set 0-Value bin for scaling + // Else: Only Single 0-Value bin returned -> No reset required + if (canvasData[0]?.bin) { + uplotData[0].push(0) + uplotData[1].push(0) + } + // Iterate canvasData.forEach( cd => { - if (cd.bin) { // MetricHisto Datafromat - // Force Zero Entry for scaling - if (uplotData[0].length == 0) { - uplotData[0].push(0) - uplotData[1].push(0) - } - uplotData[0].push(cd.max) + if (Object.keys(cd).length == 4) { // MetricHisto Datafromat + uplotData[0].push(cd?.max ? cd.max : 0) uplotData[1].push(cd.count) } else { // Default uplotData[0].push(cd.value) From ee4097a2ddcbd3459da03724777a8de57970eb75 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 11 Dec 2023 13:55:56 +0100 Subject: [PATCH 37/93] Add missing filters to crossjoinquery --- internal/repository/stats.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ab70427..b5813b9 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -578,10 +578,15 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( ) crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery) + if cjqerr != nil { return nil, cjqerr } + for _, f := range filters { + crossJoinQuery = BuildWhereClause(f, crossJoinQuery) + } + crossJoinQuerySql, _, sqlerr := crossJoinQuery.ToSql() if sqlerr != nil { return nil, sqlerr From 119637cb9bc8e857a1cc2ca0d08ee1c385a7e99c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 12 Dec 2023 15:07:23 +0100 Subject: [PATCH 38/93] Fix using crossjoin arguments not used --- internal/repository/stats.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index b5813b9..3ac0490 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -587,7 +587,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( crossJoinQuery = BuildWhereClause(f, crossJoinQuery) } - crossJoinQuerySql, _, sqlerr := crossJoinQuery.ToSql() + crossJoinQuerySql, crossJoinQueryArgs, sqlerr := crossJoinQuery.ToSql() if sqlerr != nil { return nil, sqlerr } @@ -601,7 +601,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery), fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery), ).From("job").CrossJoin( - fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), + fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., ).Where(fmt.Sprintf(`job.%s is not null and job.%s <= %f`, dbMetric, dbMetric, peak)) mainQuery, qerr := SecurityCheck(ctx, mainQuery) From ee6d286cd78b16309e237b50e68d52d3db9a0965 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 12 Dec 2023 15:42:14 +0100 Subject: [PATCH 39/93] Small corrections --- web/frontend/src/HistogramSelection.svelte | 5 ++--- web/frontend/src/User.root.svelte | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/HistogramSelection.svelte b/web/frontend/src/HistogramSelection.svelte index 142f678..00f558a 100644 --- a/web/frontend/src/HistogramSelection.svelte +++ b/web/frontend/src/HistogramSelection.svelte @@ -35,13 +35,11 @@ function closeAndApply() { metricsInHistograms = [...pendingMetrics] // Set for parent - + isOpen = !isOpen updateConfiguration({ name: cluster ? `user_view_histogramMetrics:${cluster}` : 'user_view_histogramMetrics', value: metricsInHistograms }) - - isOpen = false } @@ -62,5 +60,6 @@ + diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index a26c1aa..5d9c597 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -224,7 +224,7 @@ bind:metrics={metrics} bind:isOpen={isMetricsSelectionOpen} /> - \ No newline at end of file From 07073e290a6f02e00929a285fe0080a76e51e2eb Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 12 Dec 2023 16:46:03 +0100 Subject: [PATCH 40/93] feat: add selectable histograms to status view --- internal/repository/stats.go | 113 ++++++++++++++++++++++++++++ web/frontend/src/Status.root.svelte | 59 ++++++++++++++- 2 files changed, 169 insertions(+), 3 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 3ac0490..4d7be08 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -8,10 +8,12 @@ import ( "context" "database/sql" "fmt" + "math" "time" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" + "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -460,6 +462,18 @@ func (r *JobRepository) AddMetricHistograms( stat *model.JobsStatistics) (*model.JobsStatistics, error) { start := time.Now() + // Running Jobs Only: First query jobdata from sqlite, then query data and make bins + for _, f := range filter { + if f.State != nil { + if len(f.State) == 1 && f.State[0] == "running" { + stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter) + log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + return stat, nil + } + } + } + + // All other cases: Query and make bins in sqlite directly for _, m := range metrics { metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter) if err != nil { @@ -639,3 +653,102 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) return &result, nil } + +func (r *JobRepository) runningJobsMetricStatisticsHistogram( + ctx context.Context, + metrics []string, + filters []*model.JobFilter) []*model.MetricHistoPoints { + + // Get Jobs + jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil) + if err != nil { + log.Errorf("Error while querying jobs for footprint: %s", err) + return nil + } + if len(jobs) > 500 { + log.Errorf("too many jobs matched (max: %d)", 500) + return nil + } + + // Get AVGs from metric repo + avgs := make([][]schema.Float, len(metrics)) + for i := range avgs { + avgs[i] = make([]schema.Float, 0, len(jobs)) + } + + for _, job := range jobs { + if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed { + continue + } + + if err := metricdata.LoadAverages(job, metrics, avgs, ctx); err != nil { + log.Errorf("Error while loading averages for histogram: %s", err) + return nil + } + } + + // Iterate metrics to fill endresult + data := make([]*model.MetricHistoPoints, 0) + for idx, metric := range metrics { + // Get specific Peak or largest Peak + var metricConfig *schema.MetricConfig + var peak float64 = 0.0 + var unit string = "" + + for _, f := range filters { + if f.Cluster != nil { + metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) + peak = metricConfig.Peak + unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base + log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) + } + } + + if peak == 0.0 { + for _, c := range archive.Clusters { + for _, m := range c.MetricConfig { + if m.Name == metric { + if m.Peak > peak { + peak = m.Peak + } + if unit == "" { + unit = m.Unit.Prefix + m.Unit.Base + } + } + } + } + } + + // Make and fill bins + bins := 10.0 + peakBin := peak / bins + + points := make([]*model.MetricHistoPoint, 0) + for b := 0; b < 10; b++ { + count := 0 + bindex := b + 1 + bmin := math.Round(peakBin * float64(b)) + bmax := math.Round(peakBin * (float64(b) + 1.0)) + + // Iterate AVG values for indexed metric and count for bins + for _, val := range avgs[idx] { + if float64(val) >= bmin && float64(val) < bmax { + count += 1 + } + } + + bminint := int(bmin) + bmaxint := int(bmax) + + // Append Bin to Metric Result Array + point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bminint, Max: &bmaxint} + points = append(points, &point) + } + + // Append Metric Result Array to final results array + result := model.MetricHistoPoints{Metric: metric, Unit: unit, Data: points} + data = append(data, &result) + } + + return data +} diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 563978d..95fc98c 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -15,6 +15,7 @@ Table, Progress, Icon, + Button } from "sveltestrap"; import { init, convert2uplot, transformPerNodeDataForRoofline } from "./utils.js"; import { scaleNumbers } from "./units.js"; @@ -24,6 +25,8 @@ getContextClient, mutationStore, } from "@urql/svelte"; + import PlotTable from './PlotTable.svelte' + import HistogramSelection from './HistogramSelection.svelte' const { query: initq } = init(); const ccconfig = getContext("cc-config"); @@ -63,7 +66,8 @@ option.key == ccconfig.status_view_selectedTopUserCategory ); - let metricsInHistograms = ccconfig[`status_view_histogramMetrics:${cluster}`] || ccconfig.status_view_histogramMetrics + let isHistogramSelectionOpen = false + $: metricsInHistograms = cluster ? ccconfig[`user_view_histogramMetrics:${cluster}`] : (ccconfig.user_view_histogramMetrics || []) const client = getContextClient(); $: mainQuery = queryStore({ @@ -75,6 +79,7 @@ $metrics: [String!] $from: Time! $to: Time! + $metricsInHistograms: [String!] ) { nodeMetrics( cluster: $cluster @@ -100,7 +105,7 @@ } } - stats: jobsStatistics(filter: $filter) { + stats: jobsStatistics(filter: $filter, metrics: $metricsInHistograms) { histDuration { count value @@ -117,6 +122,16 @@ count value } + histMetrics { + metric + unit + data { + min + max + count + bin + } + } } allocatedNodes(cluster: $cluster) { @@ -131,6 +146,7 @@ from: from.toISOString(), to: to.toISOString(), filter: [{ state: ["running"] }, { cluster: { eq: cluster } }], + metricsInHistograms: metricsInHistograms }, }); @@ -313,7 +329,7 @@

    Current utilization of cluster "{cluster}"

    - + {#if $initq.fetching || $mainQuery.fetching} {:else if $initq.error} @@ -323,6 +339,13 @@ {/if} + + + { @@ -668,4 +691,34 @@ {/key} +
    + {#if metricsInHistograms} + + + {#key $mainQuery.data.stats[0].histMetrics} + + + + + {/key} + + + {/if} {/if} + + From b829a5aafeb3998c663e2afdf1b51286a8b19fa7 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 13 Dec 2023 11:58:14 +0100 Subject: [PATCH 41/93] Improve binned data histogram legends --- web/frontend/src/Analysis.root.svelte | 5 +++-- web/frontend/src/Status.root.svelte | 5 +++-- web/frontend/src/User.root.svelte | 5 +++-- web/frontend/src/plots/Histogram.svelte | 9 +++++++++ web/frontend/src/utils.js | 6 ------ 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index aa4ae37..163d511 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -389,9 +389,10 @@ diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 5d9c597..ad08bc6 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -192,9 +192,10 @@ diff --git a/web/frontend/src/plots/Histogram.svelte b/web/frontend/src/plots/Histogram.svelte index d3e1aaa..499ea4f 100644 --- a/web/frontend/src/plots/Histogram.svelte +++ b/web/frontend/src/plots/Histogram.svelte @@ -11,6 +11,7 @@ import { Card } from 'sveltestrap' export let data + export let usesBins = false export let width = 500 export let height = 300 export let title = '' @@ -160,6 +161,14 @@ series: [ { label: xunit !== '' ? xunit : null, + value: (u, ts, sidx, didx) => { + if (usesBins) { + const min = u.data[sidx][didx - 1] ? u.data[sidx][didx - 1] : 0 + const max = u.data[sidx][didx] + ts = min + ' - ' + max // narrow spaces + } + return ts + } }, Object.assign({ label: yunit !== '' ? yunit : null, diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index 794a23a..5346208 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -318,12 +318,6 @@ export function checkMetricDisabled(m, c, s) { //[m]etric, [c]luster, [s]ubclust export function convert2uplot(canvasData) { // Prep: Uplot Data Structure let uplotData = [[],[]] // [X, Y1, Y2, ...] - // MetricHisto Only: Check if 1st bin not-null -> Set 0-Value bin for scaling - // Else: Only Single 0-Value bin returned -> No reset required - if (canvasData[0]?.bin) { - uplotData[0].push(0) - uplotData[1].push(0) - } // Iterate canvasData.forEach( cd => { if (Object.keys(cd).length == 4) { // MetricHisto Datafromat From 6818d1de62d3f289a16a64a97f957a238bceb82a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 7 Feb 2024 13:26:13 +0100 Subject: [PATCH 42/93] Resolve pullrequest comments --- tools/archive-migration/job.go | 8 +- web/frontend/src/JobFootprint.svelte | 187 +++++++++------------------ 2 files changed, 64 insertions(+), 131 deletions(-) diff --git a/tools/archive-migration/job.go b/tools/archive-migration/job.go index 0dff4b4..cd54d6c 100644 --- a/tools/archive-migration/job.go +++ b/tools/archive-migration/job.go @@ -52,10 +52,10 @@ type Job struct { BaseJob StartTimeUnix int64 `json:"-" db:"start_time" example:"1649723812"` // Start epoch time stamp in seconds StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type - MemUsedMax float64 `json:"memUsedMax" db:"mem_used_max"` // MemUsedMax as Float64 - FlopsAnyAvg float64 `json:"flopsAnyAvg" db:"flops_any_avg"` // FlopsAnyAvg as Float64 - MemBwAvg float64 `json:"memBwAvg" db:"mem_bw_avg"` // MemBwAvg as Float64 - LoadAvg float64 `json:"loadAvg" db:"load_avg"` // LoadAvg as Float64 + MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64 + FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64 + MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64 + LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64 NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64 NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64 FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64 diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 20b03d6..04b03a3 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -50,137 +50,78 @@ // Unit const fmc = getContext('metrics')(job.cluster, fm) - let unit = null - if (fmc?.unit?.base) { - unit = fmc.unit.prefix + fmc.unit.base - } else { - unit = '' - } + let unit = '' + if (fmc?.unit?.base) unit = fmc.unit.prefix + fmc.unit.base // Threshold / -Differences const fmt = findJobThresholds(job, fmc, subclusterConfig) - const levelPeak = fm === 'flops_any' ? round((fmt.peak * 0.85), 0) - mv : fmt.peak - mv // Scale flops_any down - const levelNormal = fmt.normal - mv - const levelCaution = fmt.caution - mv - const levelAlert = fmt.alert - mv + if (fm === 'flops_any') fmt.peak = round((fmt.peak * 0.85), 0) // Define basic data const fmBase = { name: fm, unit: unit, avg: mv, - max: fm === 'flops_any' ? round((fmt.peak * 0.85), 0) : fmt.peak + max: fmt.peak } - // Collect - if (fm !== 'mem_used') { // Alert if usage is low, peak as maxmimum possible (scaled down for flops_any) - if (levelAlert > 0) { - return { - ...fmBase, - color: 'danger', - message: 'Metric strongly below common levels!', - impact: 3 - } - } else if (levelCaution > 0) { - return { - ...fmBase, - color: 'warning', - message: 'Metric below common levels', - impact: 2 - } - } else if (levelNormal > 0) { - return { - ...fmBase, - color: 'success', - message: 'Metric within common levels', - impact: 1 - } - } else if (levelPeak > 0) { - return { - ...fmBase, - color: 'info', - message: 'Metric performs better than common levels', - impact: 0 - } - } else { // Possible artifacts - <5% Margin OK, >5% warning, > 50% danger - if (fmBase.avg >= (1.5 * fmBase.max)) { - return { - ...fmBase, - color: 'secondary', - message: 'Metric average at least 50% above common peak value: Check data for artifacts!', - impact: -2 - } - } else if (fmBase.avg >= (1.05 * fmBase.max)) { - return { - ...fmBase, - color: 'secondary', - message: 'Metric average at least 5% above common peak value: Check data for artifacts', - impact: -1 - } - } else { - return { - ...fmBase, - color: 'info', - message: 'Metric performs better than common levels', - impact: 0 - } - } + if (evalFootprint(fm, mv, fmt, 'alert')) { + return { + ...fmBase, + color: 'danger', + message:`Metric average way ${fm === 'mem_used' ? 'above' : 'below' } expected normal thresholds.`, + impact: 3 } - } else { // Inverse Logic: Alert if usage is high, Peak is bad and limits execution - if (levelPeak <= 0 && levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { // Possible artifacts - <5% Margin OK, >5% warning, > 50% danger - if (fmBase.avg >= (1.5 * fmBase.max)) { - return { - ...fmBase, - color: 'secondary', - message: 'Memory usage at least 50% above possible maximum value: Check data for artifacts!', - impact: -2 - } - } else if (fmBase.avg >= (1.05 * fmBase.max)) { - return { - ...fmBase, - color: 'secondary', - message: 'Memory usage at least 5% above possible maximum value: Check data for artifacts!', - impact: -1 - } - } else { - return { - ...fmBase, - color: 'danger', - message: 'Memory usage extremely above common levels!', - impact: 4 - } - } - } else if (levelAlert <= 0 && levelCaution <= 0 && levelNormal <= 0) { - return { - ...fmBase, - color: 'danger', - message: 'Memory usage extremely above common levels!', - impact: 4 - } - } else if (levelAlert > 0 && (levelCaution <= 0 && levelNormal <= 0)) { - return { - ...fmBase, - color: 'danger', - message: 'Memory usage strongly above common levels!', - impact: 3 - } - } else if (levelCaution > 0 && levelNormal <= 0) { - return { - ...fmBase, - color: 'warning', - message: 'Memory usage above common levels', - impact: 2 - } - } else { - return { - ...fmBase, - color: 'success', - message: 'Memory usage within common levels', - impact: 1 - } + } else if (evalFootprint(fm, mv, fmt, 'caution')) { + return { + ...fmBase, + color: 'warning', + message: `Metric average ${fm === 'mem_used' ? 'above' : 'below' } expected normal thresholds.`, + impact: 2 + } + } else if (evalFootprint(fm, mv, fmt, 'normal')) { + return { + ...fmBase, + color: 'success', + message: 'Metric average within expected thresholds.', + impact: 1 + } + } else if (evalFootprint(fm, mv, fmt, 'peak')) { + return { + ...fmBase, + color: 'info', + message: 'Metric average above expected normal thresholds: Check for artifacts recommended.', + impact: 0 + } + } else { + return { + ...fmBase, + color: 'secondary', + message: 'Metric average above expected peak threshold: Check for artifacts!', + impact: -1 } } }) + + function evalFootprint(metric, mean, thresholds, level) { + // mem_used has inverse logic regarding threshold levels + switch (level) { + case 'peak': + if (metric === 'mem_used') return (mean <= thresholds.peak && mean > thresholds.alert) + else return (mean <= thresholds.peak && mean > thresholds.normal) + case 'alert': + if (metric === 'mem_used') return (mean <= thresholds.alert && mean > thresholds.caution) + else return (mean <= thresholds.alert && mean > 0) + case 'caution': + if (metric === 'mem_used') return (mean <= thresholds.caution && mean > thresholds.normal) + else return (mean <= thresholds.caution && mean > thresholds.alert) + case 'normal': + if (metric === 'mem_used') return (mean <= thresholds.normal && mean > 0) + else return (mean <= thresholds.normal && mean > thresholds.caution) + default: + return false + } + } - {#if $initq.fetching || $statsQuery.fetching || $footprintsQuery.fetching} - - - + {#if $initq.fetching || $statsQuery.fetching || $footprintsQuery.fetching} + + + + {/if} + + {#if $initq.error} + {$initq.error.message} + {:else if cluster} + mc.name)} + bind:metricsInHistograms + bind:metricsInScatterplots + /> {/if} - - {#if $initq.error} - {$initq.error.message} - {:else if cluster} - mc.name)} - bind:metricsInHistograms={metricsInHistograms} - bind:metricsInScatterplots={metricsInScatterplots} /> - {/if} - - - { - jobFilters = detail.filters; - }} /> - + + + { + jobFilters = detail.filters; + }} + /> + -
    +
    {#if $statsQuery.error} - - - {$statsQuery.error.message} - - + + + {$statsQuery.error.message} + + {:else if $statsQuery.data} - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Total Jobs{$statsQuery.data.stats[0].totalJobs}
    Short Jobs{$statsQuery.data.stats[0].shortJobs}
    Total Walltime{$statsQuery.data.stats[0].totalWalltime}
    Total Node Hours{$statsQuery.data.stats[0].totalNodeHours}
    Total Core Hours{$statsQuery.data.stats[0].totalCoreHours}
    Total Accelerator Hours{$statsQuery.data.stats[0].totalAccHours}
    - - -
    -
    Top - + {#each groupOptions as option} + + {/each} + +
    + {#key $topQuery.data} + {#if $topQuery.fetching} + + {:else if $topQuery.error} + {$topQuery.error.message} + {:else} + t[sortSelection.key], + )} + entities={$topQuery.data.topList.map((t) => t.id)} + /> + {/if} + {/key} +
    + + + {#key $topQuery.data} + {#if $topQuery.fetching} + + {:else if $topQuery.error} + {$topQuery.error.message} + {:else} + + + + + - {#key $topQuery.data} - {#if $topQuery.fetching} - - {:else if $topQuery.error} - {$topQuery.error.message} + + + {#each $topQuery.data.topList as te, i} + + + {#if groupSelection.key == "user"} + {:else} -
    Legend{groupSelection.label} + - - {#key $topQuery.data} - {#if $topQuery.fetching} - - {:else if $topQuery.error} - {$topQuery.error.message} - {:else} - t[sortSelection.key])} - entities={$topQuery.data.topList.map((t) => t.id)} - /> - {/if} - {/key} - - -
    {te.id}
    - - - - - - {#each $topQuery.data.topList as te, i} - - - {#if groupSelection.key == 'user'} - - {:else} - - {/if} - - - {/each} -
    Legend{groupSelection.label} - -
    {te.id}{te.id}{te[sortSelection.key]}
    + {te.id} {/if} - {/key} - -
    - - - {#if $rooflineQuery.fetching} - - {:else if $rooflineQuery.error} - {$rooflineQuery.error.message} - {:else if $rooflineQuery.data && cluster} -
    - {#key $rooflineQuery.data} - - {/key} -
    - {/if} - - -
    - {#key $statsQuery.data.stats[0].histDuration} - - {/key} -
    - - -
    - {#key $statsQuery.data.stats[0].histNumCores} - - {/key} -
    - -
    + {te[sortSelection.key]} + + {/each} + + {/if} + {/key} + + + + + {#if $rooflineQuery.fetching} + + {:else if $rooflineQuery.error} + {$rooflineQuery.error.message} + {:else if $rooflineQuery.data && cluster} +
    + {#key $rooflineQuery.data} + + {/key} +
    + {/if} + + +
    + {#key $statsQuery.data.stats[0].histDuration} + + {/key} +
    + + +
    + {#key $statsQuery.data.stats[0].histNumCores} + + {/key} +
    + +
    {/if} -
    +
    {#if $footprintsQuery.error} - - - {$footprintsQuery.error.message} - - + + + {$footprintsQuery.error.message} + + {:else if $footprintsQuery.data && $initq.data} - - - - These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours by default - (Accelerator hours for native accelerator scope metrics, coreHours for native core scope metrics). - Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values. - -
    - -
    - - - ({ metric, ...binsFromFootprint( - $footprintsQuery.data.footprints.timeWeights, - metricConfig(cluster.name, metric)?.scope, - $footprintsQuery.data.footprints.metrics.find(f => f.metric == metric).data, numBins) }))} - itemsPerRow={ccconfig.plot_view_plotsPerRow}> - - - - - -
    - - - - Each circle represents one job. The size of a circle is proportional to its node hours. Darker circles mean multiple jobs have the same averages for the respective metrics. - Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values. - -
    - -
    - - - ({ - m1, f1: $footprintsQuery.data.footprints.metrics.find(f => f.metric == m1).data, - m2, f2: $footprintsQuery.data.footprints.metrics.find(f => f.metric == m2).data }))} - itemsPerRow={ccconfig.plot_view_plotsPerRow}> - - - - - + + + + These histograms show the distribution of the averages of all jobs + matching the filters. Each job/average is weighted by its node hours by + default (Accelerator hours for native accelerator scope metrics, + coreHours for native core scope metrics). Note that some metrics could + be disabled for specific subclusters as per metricConfig and thus could + affect shown average values. + +
    + +
    + + + ({ + metric, + ...binsFromFootprint( + $footprintsQuery.data.footprints.timeWeights, + metricConfig(cluster.name, metric)?.scope, + $footprintsQuery.data.footprints.metrics.find( + (f) => f.metric == metric, + ).data, + numBins, + ), + }))} + itemsPerRow={ccconfig.plot_view_plotsPerRow} + > + + + + +
    + + + + Each circle represents one job. The size of a circle is proportional to + its node hours. Darker circles mean multiple jobs have the same averages + for the respective metrics. Note that some metrics could be disabled for + specific subclusters as per metricConfig and thus could affect shown + average values. + +
    + +
    + + + ({ + m1, + f1: $footprintsQuery.data.footprints.metrics.find( + (f) => f.metric == m1, + ).data, + m2, + f2: $footprintsQuery.data.footprints.metrics.find( + (f) => f.metric == m2, + ).data, + }))} + itemsPerRow={ccconfig.plot_view_plotsPerRow} + > + + + + {/if} diff --git a/web/frontend/src/Config.root.svelte b/web/frontend/src/Config.root.svelte index 6df579f..ddd714f 100644 --- a/web/frontend/src/Config.root.svelte +++ b/web/frontend/src/Config.root.svelte @@ -1,31 +1,30 @@ {#if isAdmin == true} - + - Admin Options + Admin Options - - + + {/if} - - Plotting Options - - + + Plotting Options + + diff --git a/web/frontend/src/Header.svelte b/web/frontend/src/Header.svelte index 03c8cd0..cc96dd0 100644 --- a/web/frontend/src/Header.svelte +++ b/web/frontend/src/Header.svelte @@ -1,178 +1,169 @@ - - ClusterCockpit Logo - - (isOpen = !isOpen)} /> - (isOpen = detail.isOpen)} - > - - - + + ClusterCockpit Logo + + (isOpen = !isOpen)} /> + (isOpen = detail.isOpen)} + > + + + diff --git a/web/frontend/src/HistogramSelection.svelte b/web/frontend/src/HistogramSelection.svelte index 9856742..39b1872 100644 --- a/web/frontend/src/HistogramSelection.svelte +++ b/web/frontend/src/HistogramSelection.svelte @@ -1,65 +1,73 @@ - (isOpen = !isOpen)}> - - Select metrics presented in histograms - - - - {#each availableMetrics as metric (metric)} - - - {metric} - - {/each} - - - - - - + (isOpen = !isOpen)}> + Select metrics presented in histograms + + + {#each availableMetrics as metric (metric)} + + + {metric} + + {/each} + + + + + + diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 42cfee8..2020e1d 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -1,43 +1,50 @@ - - {#if $initq.error} - {$initq.error.message} - {:else if $initq.data} - - {:else} - - {/if} - - {#if $jobMetrics.data} - {#key $jobMetrics.data} - - - - {/key} - {/if} - {#if $jobMetrics.data && $initq.data} - {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} - {#if authlevel > roles.manager} - -
    - Concurrent Jobs -
    -
      -
    • - See All -
    • - {#each $initq.data.job.concurrentJobs.items as pjob, index} -
    • - {pjob.jobId} -
    • - {/each} -
    - - {:else} - -
    - {$initq.data.job.concurrentJobs.items.length} Concurrent - Jobs -
    -

    - Number of shared jobs on the same node with overlapping - runtimes. -

    - - {/if} - {/if} - - - - - c.name == $initq.data.job.cluster) - .subClusters.find( - (sc) => sc.name == $initq.data.job.subCluster - )} - data={ - transformDataForRoofline ( - $jobMetrics.data.jobMetrics.find((m) => m.name == "flops_any" && m.scope == "node").metric, - $jobMetrics.data.jobMetrics.find((m) => m.name == "mem_bw" && m.scope == "node").metric - ) - } - /> - + + {#if $initq.error} + {$initq.error.message} + {:else if $initq.data} + {:else} - - + {/if} + + {#if $jobMetrics.data} + {#key $jobMetrics.data} + + + + {/key} + {/if} + {#if $jobMetrics.data && $initq.data} + {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} + {#if authlevel > roles.manager} + +
    + Concurrent Jobs +
    +
      +
    • + See All +
    • + {#each $initq.data.job.concurrentJobs.items as pjob, index} +
    • + {pjob.jobId} +
    • + {/each} +
    + + {:else} + +
    + {$initq.data.job.concurrentJobs.items.length} Concurrent Jobs +
    +

    + Number of shared jobs on the same node with overlapping runtimes. +

    + + {/if} + {/if} + + + + + c.name == $initq.data.job.cluster) + .subClusters.find((sc) => sc.name == $initq.data.job.subCluster)} + data={transformDataForRoofline( + $jobMetrics.data.jobMetrics.find( + (m) => m.name == "flops_any" && m.scope == "node", + ).metric, + $jobMetrics.data.jobMetrics.find( + (m) => m.name == "mem_bw" && m.scope == "node", + ).metric, + )} + /> + + {:else} + + + {/if}
    - - {#if $initq.data} - - {/if} - - - {#if $initq.data} - - {/if} - - - - {#if $jobMetrics.error} - {#if $initq.data.job.monitoringStatus == 0 || $initq.data.job.monitoringStatus == 2} - Not monitored or archiving failed -
    - {/if} - {$jobMetrics.error.message} - {:else if $jobMetrics.fetching} - - {:else if $jobMetrics.data && $initq.data} - - {#if item.data} - - statsTable.moreLoaded(detail)} - job={$initq.data.job} - metricName={item.metric} - rawData={item.data.map((x) => x.metric)} - scopes={item.data.map((x) => x.scope)} - {width} - isShared={$initq.data.job.exclusive != 1} - resources={$initq.data.job.resources} - /> - {:else} - No dataset returned for {item.metric} - {/if} - + + {#if $jobMetrics.error} + {#if $initq.data.job.monitoringStatus == 0 || $initq.data.job.monitoringStatus == 2} + Not monitored or archiving failed +
    + {/if} + {$jobMetrics.error.message} + {:else if $jobMetrics.fetching} + + {:else if $jobMetrics.data && $initq.data} + + {#if item.data} + statsTable.moreLoaded(detail)} + job={$initq.data.job} + metricName={item.metric} + rawData={item.data.map((x) => x.metric)} + scopes={item.data.map((x) => x.scope)} + {width} + isShared={$initq.data.job.exclusive != 1} + resources={$initq.data.job.resources} + /> + {:else} + No dataset returned for {item.metric} {/if} - + + {/if} +
    - - {#if $initq.data} - - {#if somethingMissing} - -
    - - - Missing Metrics/Reseources - - - {#if missingMetrics.length > 0} -

    - No data at all is available for the - metrics: {missingMetrics.join(", ")} -

    - {/if} - {#if missingHosts.length > 0} -

    - Some metrics are missing for the - following hosts: -

    -
      - {#each missingHosts as missing} -
    • - {missing.hostname}: {missing.metrics.join( - ", " - )} -
    • - {/each} -
    - {/if} -
    -
    -
    -
    - {/if} - - {#if $jobMetrics.data} - {#key $jobMetrics.data} - - {/key} - {/if} - - -
    - {#if $initq.data.job.metaData?.jobScript} -
    {$initq.data.job.metaData?.jobScript}
    - {:else} - No job script available - {/if} -
    -
    - -
    - {#if $initq.data.job.metaData?.slurmInfo} -
    {$initq.data.job.metaData?.slurmInfo}
    - {:else} - No additional slurm information available - {/if} -
    -
    -
    + + {#if $initq.data} + + {#if somethingMissing} + +
    + + + Missing Metrics/Reseources + + + {#if missingMetrics.length > 0} +

    + No data at all is available for the metrics: {missingMetrics.join( + ", ", + )} +

    + {/if} + {#if missingHosts.length > 0} +

    Some metrics are missing for the following hosts:

    +
      + {#each missingHosts as missing} +
    • + {missing.hostname}: {missing.metrics.join(", ")} +
    • + {/each} +
    + {/if} +
    +
    +
    +
    {/if} - + + {#if $jobMetrics.data} + {#key $jobMetrics.data} + + {/key} + {/if} + + +
    + {#if $initq.data.job.metaData?.jobScript} +
    {$initq.data.job.metaData?.jobScript}
    + {:else} + No job script available + {/if} +
    +
    + +
    + {#if $initq.data.job.metaData?.slurmInfo} +
    {$initq.data.job.metaData?.slurmInfo}
    + {:else} + No additional slurm information available + {/if} +
    +
    +
    + {/if} +
    {#if $initq.data} - + {/if} diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 30034c0..e3a3ff7 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -1,232 +1,263 @@ - - - {#if view === 'job'} + {#if view === "job"} - - Core Metrics Footprint - + + Core Metrics Footprint + + {/if} + + {#each footprintData as fpd, index} +
    +
     {fpd.name}
    + +
    +
    + + {#if fpd.impact === 3 || fpd.impact === -1} + + {:else if fpd.impact === 2} + + {/if} + + {#if fpd.impact === 3} + + {:else if fpd.impact === 2} + + {:else if fpd.impact === 1} + + {:else if fpd.impact === 0} + + {:else if fpd.impact === -1} + + {/if} +
    +
    + + {fpd.avg} / {fpd.max} + {fpd.unit}   +
    +
    + {fpd.message} +
    +
    + +
    + {/each} + {#if job?.metaData?.message} +
    + {@html job.metaData.message} {/if} - - {#each footprintData as fpd, index} -
    -
     {fpd.name}
    -
    -
    - - {#if fpd.impact === 3 || fpd.impact === -1} - - {:else if fpd.impact === 2} - - {/if} - - {#if fpd.impact === 3} - - {:else if fpd.impact === 2} - - {:else if fpd.impact === 1} - - {:else if fpd.impact === 0} - - {:else if fpd.impact === -1} - - {/if} -
    -
    - - {fpd.avg} / {fpd.max} {fpd.unit}   -
    -
    - {fpd.message} -
    -
    - -
    - {/each} - {#if job?.metaData?.message} -
    - {@html job.metaData.message} - {/if} -
    +
    diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index d7cbf37..204a4e3 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -1,102 +1,121 @@ - {#if $initq.fetching} - - - - {:else if $initq.error} - - {$initq.error.message} - - {/if} + {#if $initq.fetching} + + + + {:else if $initq.error} + + {$initq.error.message} + + {/if} - - - - - - - { - selectedCluster = detail.filters[0]?.cluster ? detail.filters[0].cluster.eq : null - jobList.update(detail.filters) - } - } /> - + + + + + + + { + selectedCluster = detail.filters[0]?.cluster + ? detail.filters[0].cluster.eq + : null; + jobList.update(detail.filters); + }} + /> + - - filterComponent.update(detail)}/> - - - jobList.refresh()} /> - + + filterComponent.update(detail)} + /> + + + jobList.refresh()} /> + -
    +
    - - - + + + - + + bind:cluster={selectedCluster} + configName="plot_list_selectedMetrics" + bind:metrics + bind:isOpen={isMetricsSelectionOpen} + bind:showFootprint + view="list" +/> diff --git a/web/frontend/src/List.root.svelte b/web/frontend/src/List.root.svelte index c004736..bc1ac6f 100644 --- a/web/frontend/src/List.root.svelte +++ b/web/frontend/src/List.root.svelte @@ -2,52 +2,58 @@ @component List of users or projects --> - - - - - - - - { - jobFilters = detail.filters; - }} - /> - + + + + + + + + { + jobFilters = detail.filters; + }} + /> + - + + + + {#if type == "USER"} + + {/if} + + + + + + + + {#if $stats.fetching} + + + + {:else if $stats.error} + + + + {:else if $stats.data} + {#each sort($stats.data.rows, sorting, nameFilter) as row (row.id)} - + - {/if} - - - - - - - - {#if $stats.fetching} - - - - {:else if $stats.error} - - - - {:else if $stats.data} - {#each sort($stats.data.rows, sorting, nameFilter) as row (row.id)} - - - {#if type == "USER"} - - {/if} - - - - - + {scrambleNames ? scramble(row.id) : row.id} + {:else if type == "PROJECT"} + {scrambleNames ? scramble(row.id) : row.id} {:else} - - - - {/each} - {/if} - + {row.id} + {/if} + + {#if type == "USER"} + + {/if} + + + + + + {:else} + + + + {/each} + {/if} +
    + {{ + USER: "Username", + PROJECT: "Project Name", + }[type]} + + + Name + + + Total Jobs + + + Total Walltime + + + Total Core Hours + + + Total Accelerator Hours + +
    {$stats.error.message}
    - {({ - USER: "Username", - PROJECT: "Project Name", - })[type]} - - {#if type == "USER"} - - Name - - - Total Jobs - - - Total Walltime - - - Total Core Hours - - - Total Accelerator Hours - -
    {$stats.error.message}
    - {#if type == "USER"} - {scrambleNames ? scramble(row.id) : row.id} - {:else if type == "PROJECT"} - {scrambleNames ? scramble(row.id) : row.id} - {:else} - {row.id} - {/if} - {scrambleNames ? scramble(row?.name?row.name:"-") : row?.name?row.name:"-"}{row.totalJobs}{row.totalWalltime}{row.totalCoreHours}{row.totalAccHours}
    No {type.toLowerCase()}s/jobs found
    {scrambleNames + ? scramble(row?.name ? row.name : "-") + : row?.name + ? row.name + : "-"}{row.totalJobs}{row.totalWalltime}{row.totalCoreHours}{row.totalAccHours}
    No {type.toLowerCase()}s/jobs found
    diff --git a/web/frontend/src/Metric.svelte b/web/frontend/src/Metric.svelte index 8ff0a58..6022ffb 100644 --- a/web/frontend/src/Metric.svelte +++ b/web/frontend/src/Metric.svelte @@ -1,95 +1,118 @@ + - - {metricName} ({(metricConfig?.unit?.prefix ? metricConfig.unit.prefix : '') + - (metricConfig?.unit?.base ? metricConfig.unit.base : '')}) - - - {#if job.resources.length > 1} - + + {metricName} ({(metricConfig?.unit?.prefix + ? metricConfig.unit.prefix + : "") + (metricConfig?.unit?.base ? metricConfig.unit.base : "")}) + + + {#if job.resources.length > 1} + + {/if} {#key series} - {#if fetching == true} - - {:else if error != null} - {error.message} - {:else if series != null} - - {/if} + {#if fetching == true} + + {:else if error != null} + {error.message} + {:else if series != null} + + {/if} {/key} diff --git a/web/frontend/src/MetricSelection.svelte b/web/frontend/src/MetricSelection.svelte index 5f55cb7..689abef 100644 --- a/web/frontend/src/MetricSelection.svelte +++ b/web/frontend/src/MetricSelection.svelte @@ -8,181 +8,206 @@ --> - - - (isOpen = !isOpen)}> - - Configure columns (Metric availability shown) - - - - {#if view === 'list'} -
  • - Show Footprint -
  • -
    - {/if} - {#each newMetricsOrder as metric, index (metric)} -
  • columnsDragStart(event, index)} - on:drop|preventDefault={event => columnsDrag(event, index)} - on:dragenter={() => columnHovering = index} - class:is-active={columnHovering === index}> - {#if unorderedMetrics.includes(metric)} - - {:else} - - {/if} - {metric} - - {cluster == null ? - clusters // No single cluster specified: List Clusters with Metric - .filter(c => c.metricConfig.find(m => m.name == metric) != null) - .map(c => c.name).join(', ') : - clusters // Single cluster requested: List Subclusters with do not have metric remove flag - .filter(c => c.name == cluster) - .filter(c => c.metricConfig.find(m => m.name == metric) != null) - .map(function(c) { - let scNames = c.subClusters.map(sc => sc.name) - scNames.forEach(function(scName){ - let met = c.metricConfig.find(m => m.name == metric) - let msc = met.subClusters.find(msc => msc.name == scName) - if (msc != null) { - if (msc.remove == true) { - scNames = scNames.filter(scn => scn != msc.name) - } - } - }) - return scNames - }) - .join(', ')} - -
  • - {/each} -
    -
    - - - + (isOpen = !isOpen)}> + Configure columns (Metric availability shown) + + + {#if view === "list"} +
  • + Show Footprint +
  • +
    + {/if} + {#each newMetricsOrder as metric, index (metric)} +
  • columnsDragStart(event, index)} + on:drop|preventDefault={(event) => columnsDrag(event, index)} + on:dragenter={() => (columnHovering = index)} + class:is-active={columnHovering === index} + > + {#if unorderedMetrics.includes(metric)} + + {:else} + + {/if} + {metric} + + {cluster == null + ? clusters // No single cluster specified: List Clusters with Metric + .filter( + (c) => c.metricConfig.find((m) => m.name == metric) != null, + ) + .map((c) => c.name) + .join(", ") + : clusters // Single cluster requested: List Subclusters with do not have metric remove flag + .filter((c) => c.name == cluster) + .filter( + (c) => c.metricConfig.find((m) => m.name == metric) != null, + ) + .map(function (c) { + let scNames = c.subClusters.map((sc) => sc.name); + scNames.forEach(function (scName) { + let met = c.metricConfig.find((m) => m.name == metric); + let msc = met.subClusters.find( + (msc) => msc.name == scName, + ); + if (msc != null) { + if (msc.remove == true) { + scNames = scNames.filter((scn) => scn != msc.name); + } + } + }); + return scNames; + }) + .join(", ")} + +
  • + {/each} +
    +
    + + +
    + + diff --git a/web/frontend/src/NavbarLinks.svelte b/web/frontend/src/NavbarLinks.svelte index 6861da5..24ecddf 100644 --- a/web/frontend/src/NavbarLinks.svelte +++ b/web/frontend/src/NavbarLinks.svelte @@ -1,39 +1,38 @@ {#each links as item} - {#if !item.perCluster} - {item.title} - {:else} - - - - {item.title} - - - {#each clusters as cluster} - - {cluster.name} - - {/each} - - - {/if} + {#if !item.perCluster} + {item.title} + {:else} + + + + {item.title} + + + {#each clusters as cluster} + + {cluster.name} + + {/each} + + + {/if} {/each} diff --git a/web/frontend/src/NavbarTools.svelte b/web/frontend/src/NavbarTools.svelte index f6ded90..f44b4e9 100644 --- a/web/frontend/src/NavbarTools.svelte +++ b/web/frontend/src/NavbarTools.svelte @@ -1,143 +1,153 @@ diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index b23c71e..0a5a75e 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -1,238 +1,230 @@ - {#if $initq.error} - {$initq.error.message} - {:else if $initq.fetching} + {#if $initq.error} + {$initq.error.message} + {:else if $initq.fetching} + + {:else} + + + + {hostname} ({cluster}) + + + + {#if $nodeJobsData.fetching} - {:else} - - - - {hostname} ({cluster}) - - - - {#if $nodeJobsData.fetching} - - {:else if $nodeJobsData.data} - Currently running jobs on this node: {$nodeJobsData.data.jobs - .count} - [ - View in Job List ] - {:else} - No currently running jobs. - {/if} - - - { - const diff = Date.now() - to - from = new Date(from.getTime() + diff) - to = new Date(to.getTime() + diff) - }} /> - - - - - {/if} + {:else if $nodeJobsData.data} + Currently running jobs on this node: {$nodeJobsData.data.jobs.count} + [ + View in Job List ] + {:else} + No currently running jobs. + {/if} + + + { + const diff = Date.now() - to; + from = new Date(from.getTime() + diff); + to = new Date(to.getTime() + diff); + }} + /> + + + + + {/if}
    - - {#if $nodeMetricsData.error} - {$nodeMetricsData.error.message} - {:else if $nodeMetricsData.fetching || $initq.fetching} - + + {#if $nodeMetricsData.error} + {$nodeMetricsData.error.message} + {:else if $nodeMetricsData.fetching || $initq.fetching} + + {:else} + ({ + ...m, + disabled: checkMetricDisabled( + m.name, + cluster, + $nodeMetricsData.data.nodeMetrics[0].subCluster, + ), + })) + .sort((a, b) => a.name.localeCompare(b.name))} + > +

    + {item.name} + {metricUnits[item.name]} +

    + {#if item.disabled === false && item.metric} + c.name == cluster)} + subCluster={$nodeMetricsData.data.nodeMetrics[0].subCluster} + series={item.metric.series} + resources={[{ hostname: hostname }]} + forNode={true} + /> + {:else if item.disabled === true && item.metric} + Metric disabled for subcluster {item.name}:{$nodeMetricsData.data.nodeMetrics[0] + .subCluster} {:else} - ({ - ...m, - disabled: checkMetricDisabled( - m.name, - cluster, - $nodeMetricsData.data.nodeMetrics[0].subCluster - ), - })) - .sort((a, b) => a.name.localeCompare(b.name))} - > -

    - {item.name} - {metricUnits[item.name]} -

    - {#if item.disabled === false && item.metric} - c.name == cluster)} - subCluster={$nodeMetricsData.data.nodeMetrics[0] - .subCluster} - series={item.metric.series} - resources={[{hostname: hostname}]} - forNode={true} - /> - {:else if item.disabled === true && item.metric} - Metric disabled for subcluster {item.name}:{$nodeMetricsData.data.nodeMetrics[0] - .subCluster} - {:else} - No dataset returned for {item.name} - {/if} -
    + No dataset returned for {item.name} {/if} - +
    + {/if} +
    diff --git a/web/frontend/src/PlotSelection.svelte b/web/frontend/src/PlotSelection.svelte index 449de64..b4cf58b 100644 --- a/web/frontend/src/PlotSelection.svelte +++ b/web/frontend/src/PlotSelection.svelte @@ -1,139 +1,163 @@ - - - (isHistogramConfigOpen = !isHistogramConfigOpen)}> - - Select metrics presented in histograms - - - - {#each availableMetrics as metric (metric)} - - updateConfiguration({ - name: 'analysis_view_histogramMetrics', - value: metricsInHistograms - })} /> + (isHistogramConfigOpen = !isHistogramConfigOpen)} +> + Select metrics presented in histograms + + + {#each availableMetrics as metric (metric)} + + + updateConfiguration({ + name: "analysis_view_histogramMetrics", + value: metricsInHistograms, + })} + /> - {metric} - - {/each} - - - - - + {metric} + + {/each} + + + + + - (isScatterPlotConfigOpen = !isScatterPlotConfigOpen)}> - - Select metric pairs presented in scatter plots - - - - {#each metricsInScatterplots as pair} - - {pair[0]} / {pair[1]} + (isScatterPlotConfigOpen = !isScatterPlotConfigOpen)} +> + Select metric pairs presented in scatter plots + + + {#each metricsInScatterplots as pair} + + {pair[0]} / {pair[1]} - - - {/each} - + + + {/each} + -
    +
    - - - - - - -
    - - - + + + + + + + + +
    diff --git a/web/frontend/src/StatsTable.svelte b/web/frontend/src/StatsTable.svelte index e1d0c02..3a9d84d 100644 --- a/web/frontend/src/StatsTable.svelte +++ b/web/frontend/src/StatsTable.svelte @@ -1,139 +1,154 @@ - - - - {#each selectedMetrics as metric} - - {/each} - - - - {#each selectedMetrics as metric} - {#if selectedScopes[metric] != 'node'} - - {/if} - {#each ['min', 'avg', 'max'] as stat} - - {/each} - {/each} - - - - {#each hosts as host (host)} - - - {#each selectedMetrics as metric (metric)} - - {/each} - + + + + {#each selectedMetrics as metric} + + {/each} + + + + {#each selectedMetrics as metric} + {#if selectedScopes[metric] != "node"} + + {/if} + {#each ["min", "avg", "max"] as stat} + {/each} - + {/each} + + + + {#each hosts as host (host)} + + + {#each selectedMetrics as metric (metric)} + + {/each} + + {/each} +
    - - - - - {metric} - - - -
    NodeId sortBy(metric, stat)}> - {stat} - {#if selectedScopes[metric] == 'node'} - - {/if} -
    {host}
    + + + + + {metric} + + + +
    NodeId sortBy(metric, stat)}> + {stat} + {#if selectedScopes[metric] == "node"} + + {/if} +
    {host}
    -
    +
    + cluster={job.cluster} + configName="job_view_nodestats_selectedMetrics" + allMetrics={new Set(allMetrics)} + bind:metrics={selectedMetrics} + bind:isOpen={isMetricSelectionOpen} +/> diff --git a/web/frontend/src/StatsTableEntry.svelte b/web/frontend/src/StatsTableEntry.svelte index 5e497d4..99cde21 100644 --- a/web/frontend/src/StatsTableEntry.svelte +++ b/web/frontend/src/StatsTableEntry.svelte @@ -1,82 +1,86 @@ {#if series == null || series.length == 0} - No data -{:else if series.length == 1 && scope == 'node'} - - {series[0].statistics.min} - - - {series[0].statistics.avg} - - - {series[0].statistics.max} - + No data +{:else if series.length == 1 && scope == "node"} + + {series[0].statistics.min} + + + {series[0].statistics.avg} + + + {series[0].statistics.max} + {:else} - - - - {#each ['id', 'min', 'avg', 'max'] as field} - - {/each} - - {#each series as s, i} - - - - - - - {/each} -
    sortByField(field)}> - Sort - -
    {s.id ?? i}{s.statistics.min}{s.statistics.avg}{s.statistics.max}
    - + + + + {#each ["id", "min", "avg", "max"] as field} + + {/each} + + {#each series as s, i} + + + + + + + {/each} +
    sortByField(field)}> + Sort + +
    {s.id ?? i}{s.statistics.min}{s.statistics.avg}{s.statistics.max}
    + {/if} diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index d0d7ba1..b5ccec0 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -1,366 +1,358 @@ - -

    Current utilization of cluster "{cluster}"

    - - - {#if $initq.fetching || $mainQuery.fetching} - - {:else if $initq.error} - {$initq.error.message} - {:else} - - {/if} - - - - - - { - from = new Date(Date.now() - 5 * 60 * 1000); - to = new Date(Date.now()); - }} - /> - + +

    Current utilization of cluster "{cluster}"

    + + + {#if $initq.fetching || $mainQuery.fetching} + + {:else if $initq.error} + {$initq.error.message} + {:else} + + {/if} + + + + + + { + from = new Date(Date.now() - 5 * 60 * 1000); + to = new Date(Date.now()); + }} + /> +
    {#if $mainQuery.error} - - - {$mainQuery.error.message} - - + + + {$mainQuery.error.message} + + {/if}
    @@ -368,358 +360,318 @@ {#if $initq.data && $mainQuery.data} - {#each $initq.data.clusters.find((c) => c.name == cluster).subClusters as subCluster, i} - - - - - SubCluster "{subCluster.name}" - - - - - - - - - - - - - - - - - - -
    Allocated Nodes
    - -
    {allocatedNodes[subCluster.name]} / {subCluster.numberOfNodes} - Nodes
    Flop Rate (Any)
    - -
    - {scaleNumbers( - flopRate[subCluster.name], - subCluster.flopRateSimd.value * - subCluster.numberOfNodes, - flopRateUnitPrefix[subCluster.name] - )}{flopRateUnitBase[subCluster.name]} [Max] -
    MemBw Rate
    - -
    - {scaleNumbers( - memBwRate[subCluster.name], - subCluster.memoryBandwidth.value * - subCluster.numberOfNodes, - memBwRateUnitPrefix[subCluster.name] - )}{memBwRateUnitBase[subCluster.name]} [Max] -
    -
    -
    - - -
    - {#key $mainQuery.data.nodeMetrics} - data.subCluster == subCluster.name - ) - ) - } - /> - {/key} -
    - -
    - {/each} + {#each $initq.data.clusters.find((c) => c.name == cluster).subClusters as subCluster, i} + + + + + SubCluster "{subCluster.name}" + + + + + + + + + + + + + + + + + + +
    Allocated Nodes
    + +
    {allocatedNodes[subCluster.name]} / {subCluster.numberOfNodes} + Nodes
    Flop Rate (Any)
    + +
    + {scaleNumbers( + flopRate[subCluster.name], + subCluster.flopRateSimd.value * subCluster.numberOfNodes, + flopRateUnitPrefix[subCluster.name], + )}{flopRateUnitBase[subCluster.name]} [Max] +
    MemBw Rate
    + +
    + {scaleNumbers( + memBwRate[subCluster.name], + subCluster.memoryBandwidth.value * subCluster.numberOfNodes, + memBwRateUnitPrefix[subCluster.name], + )}{memBwRateUnitBase[subCluster.name]} [Max] +
    +
    +
    + + +
    + {#key $mainQuery.data.nodeMetrics} + data.subCluster == subCluster.name, + ), + )} + /> + {/key} +
    + +
    + {/each} -
    +
    - + + + +
    +

    + Top Users on {cluster.charAt(0).toUpperCase() + cluster.slice(1)} +

    + {#key $topUserQuery.data} + {#if $topUserQuery.fetching} + + {:else if $topUserQuery.error} + {$topUserQuery.error.message} + {:else} + tu[topUserSelection.key], + )} + entities={$topUserQuery.data.topUser.map((tu) => tu.id)} + /> + {/if} + {/key} +
    + + + {#key $topUserQuery.data} + {#if $topUserQuery.fetching} + + {:else if $topUserQuery.error} + {$topUserQuery.error.message} + {:else} + + + + + + + {#each $topUserQuery.data.topUser as tu, i} + + + + + + {/each} +
    LegendUser NameNumber of + +
    {tu.id}{tu[topUserSelection.key]}
    + {/if} + {/key} + + +

    + Top Projects on {cluster.charAt(0).toUpperCase() + cluster.slice(1)} +

    + {#key $topProjectQuery.data} + {#if $topProjectQuery.fetching} + + {:else if $topProjectQuery.error} + {$topProjectQuery.error.message} + {:else} + tp[topProjectSelection.key], + )} + entities={$topProjectQuery.data.topProjects.map((tp) => tp.id)} + /> + {/if} + {/key} + + + {#key $topProjectQuery.data} + {#if $topProjectQuery.fetching} + + {:else if $topProjectQuery.error} + {$topProjectQuery.error.message} + {:else} + + + + + + + {#each $topProjectQuery.data.topProjects as tp, i} + + + + + + {/each} +
    LegendProject CodeNumber of + +
    {tp.id}{tp[topProjectSelection.key]}
    + {/if} + {/key} + +
    +
    + + +
    + {#key $mainQuery.data.stats} + + {/key} +
    + + + {#key $mainQuery.data.stats} + + {/key} + +
    + + +
    + {#key $mainQuery.data.stats} + + {/key} +
    + + + {#key $mainQuery.data.stats} + + {/key} + +
    +
    + {#if metricsInHistograms} - -
    -

    - Top Users on {cluster.charAt(0).toUpperCase() + - cluster.slice(1)} -

    - {#key $topUserQuery.data} - {#if $topUserQuery.fetching} - - {:else if $topUserQuery.error} - {$topUserQuery.error.message} - {:else} - tu[topUserSelection.key] - )} - entities={$topUserQuery.data.topUser.map( - (tu) => tu.id - )} - /> - {/if} - {/key} -
    - - - {#key $topUserQuery.data} - {#if $topUserQuery.fetching} - - {:else if $topUserQuery.error} - {$topUserQuery.error.message} - {:else} - - - - - - - {#each $topUserQuery.data.topUser as tu, i} - - - - - - {/each} -
    LegendUser NameNumber of - -
    {tu.id}{tu[topUserSelection.key]}
    - {/if} - {/key} - - -

    - Top Projects on {cluster.charAt(0).toUpperCase() + - cluster.slice(1)} -

    - {#key $topProjectQuery.data} - {#if $topProjectQuery.fetching} - - {:else if $topProjectQuery.error} - {$topProjectQuery.error.message} - {:else} - tp[topProjectSelection.key] - )} - entities={$topProjectQuery.data.topProjects.map( - (tp) => tp.id - )} - /> - {/if} - {/key} - - - {#key $topProjectQuery.data} - {#if $topProjectQuery.fetching} - - {:else if $topProjectQuery.error} - {$topProjectQuery.error.message} - {:else} - - - - - - - {#each $topProjectQuery.data.topProjects as tp, i} - - - - - - {/each} -
    LegendProject CodeNumber of - -
    {tp.id}{tp[topProjectSelection.key]}
    - {/if} - {/key} - + + {#key $mainQuery.data.stats[0].histMetrics} + + + + {/key} +
    -
    - - -
    - {#key $mainQuery.data.stats} - - {/key} -
    - - - {#key $mainQuery.data.stats} - - {/key} - -
    - - -
    - {#key $mainQuery.data.stats} - - {/key} -
    - - - {#key $mainQuery.data.stats} - - {/key} - -
    -
    - {#if metricsInHistograms} - - - {#key $mainQuery.data.stats[0].histMetrics} - - - - - {/key} - - - {/if} + {/if} {/if} + bind:cluster + bind:metricsInHistograms + bind:isOpen={isHistogramSelectionOpen} +/> diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index d881236..4a7f633 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -1,159 +1,218 @@ - {#if $initq.error} - {$initq.error.message} - {:else if $initq.fetching} - - {:else} - - { - const diff = Date.now() - to - from = new Date(from.getTime() + diff) - to = new Date(to.getTime() + diff) - }} /> - - - - - - - - Metric - - - - - - - Find Node - - - - {/if} - -
    - + {#if $initq.error} + {$initq.error.message} + {:else if $initq.fetching} + + {:else} - {#if $nodesQuery.error} - {$nodesQuery.error.message} - {:else if $nodesQuery.fetching || $initq.fetching} - - {:else} - h.host.includes(hostnameFilter) && h.metrics.some(m => m.name == selectedMetric && m.scope == 'node')) - .map(h => ({ - host: h.host, - subCluster: h.subCluster, - data: h.metrics.find(m => m.name == selectedMetric && m.scope == 'node'), - disabled: checkMetricDisabled(selectedMetric, cluster, h.subCluster) - })) - .sort((a, b) => a.host.localeCompare(b.host)) - }> - -

    {item.host} ({item.subCluster})

    - {#if item.disabled === false && item.data} - c.name == cluster)} - subCluster={item.subCluster} - resources={[{hostname: item.host}]} - forNode={true}/> - {:else if item.disabled === true && item.data} - Metric disabled for subcluster {selectedMetric}:{item.subCluster} - {:else} - No dataset returned for {selectedMetric} - {/if} -
    - {/if} + { + const diff = Date.now() - to; + from = new Date(from.getTime() + diff); + to = new Date(to.getTime() + diff); + }} + /> + + + + + + + Metric + + + + + + + Find Node + + + + {/if} +
    +
    + + + {#if $nodesQuery.error} + {$nodesQuery.error.message} + {:else if $nodesQuery.fetching || $initq.fetching} + + {:else} + + h.host.includes(hostnameFilter) && + h.metrics.some( + (m) => m.name == selectedMetric && m.scope == "node", + ), + ) + .map((h) => ({ + host: h.host, + subCluster: h.subCluster, + data: h.metrics.find( + (m) => m.name == selectedMetric && m.scope == "node", + ), + disabled: checkMetricDisabled( + selectedMetric, + cluster, + h.subCluster, + ), + })) + .sort((a, b) => a.host.localeCompare(b.host))} + > +

    + {item.host} ({item.subCluster}) +

    + {#if item.disabled === false && item.data} + c.name == cluster)} + subCluster={item.subCluster} + resources={[{ hostname: item.host }]} + forNode={true} + /> + {:else if item.disabled === true && item.data} + Metric disabled for subcluster {selectedMetric}:{item.subCluster} + {:else} + No dataset returned for {selectedMetric} + {/if} +
    + {/if} +
    - diff --git a/web/frontend/src/TagManagement.svelte b/web/frontend/src/TagManagement.svelte index 6ab4752..e9fb9e9 100644 --- a/web/frontend/src/TagManagement.svelte +++ b/web/frontend/src/TagManagement.svelte @@ -1,190 +1,234 @@ - - (isOpen = !isOpen)}> - - Manage Tags - {#if pendingChange !== false} - - {:else} - - {/if} - - - + + Manage Tags + {#if pendingChange !== false} + + {:else} + + {/if} + + + -
    +
    - - Search using "type: name". If no tag matches your search, - a button for creating a new one will appear. - + + Search using "type: name". If no tag matches your search, a + button for creating a new one will appear. + -
      - {#each allTagsFiltered as tag} - - +
        + {#each allTagsFiltered as tag} + + - - {#if pendingChange === tag.id} - - {:else if job.tags.find(t => t.id == tag.id)} - - {:else} - - {/if} - - + + {#if pendingChange === tag.id} + + {:else if job.tags.find((t) => t.id == tag.id)} + {:else} - - No tags matching - - {/each} -
      -
      - {#if newTagType && newTagName && isNewTag(newTagType, newTagName)} - - {:else if allTagsFiltered.length == 0} - Search Term is not a valid Tag (type: name) - {/if} - - - - + + {/if} + +
      + {:else} + + No tags matching + + {/each} +
    +
    + {#if newTagType && newTagName && isNewTag(newTagType, newTagName)} + + {:else if allTagsFiltered.length == 0} + Search Term is not a valid Tag (type: name) + {/if} +
    + + +
    + + diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index f368851..c60ea20 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -1,237 +1,276 @@ - {#if $initq.fetching} - - - - {:else if $initq.error} - - {$initq.error.message} - - {/if} - + {#if $initq.fetching} + + + + {:else if $initq.error} - + {$initq.error.message} + + {/if} - + + - - - - { - jobFilters = [...detail.filters, { user: { eq: user.username } }] - selectedCluster = jobFilters[0]?.cluster ? jobFilters[0].cluster.eq : null - jobList.update(jobFilters) - }} /> - - - jobList.refresh()} /> - + + + + + + { + jobFilters = [...detail.filters, { user: { eq: user.username } }]; + selectedCluster = jobFilters[0]?.cluster + ? jobFilters[0].cluster.eq + : null; + jobList.update(jobFilters); + }} + /> + + + jobList.refresh()} /> + -
    +
    - {#if $stats.error} - - {$stats.error.message} - - {:else if !$stats.data} - - - - {:else} - - - - - - - - {#if user.name} - - - - - {/if} - {#if user.email} - - - - - {/if} - - - - - - - - - - - - - - - - - -
    Username{scrambleNames ? scramble(user.username) : user.username}
    Name{scrambleNames ? scramble(user.name) : user.name}
    Email{user.email}
    Total Jobs{$stats.data.jobsStatistics[0].totalJobs}
    Short Jobs{$stats.data.jobsStatistics[0].shortJobs}
    Total Walltime{$stats.data.jobsStatistics[0].totalWalltime}
    Total Core Hours{$stats.data.jobsStatistics[0].totalCoreHours}
    - -
    - {#key $stats.data.jobsStatistics[0].histDuration} - - {/key} -
    -
    - {#key $stats.data.jobsStatistics[0].histNumNodes} - - {/key} -
    - {/if} + {#if $stats.error} + + {$stats.error.message} + + {:else if !$stats.data} + + + + {:else} + + + + + + + + {#if user.name} + + + + + {/if} + {#if user.email} + + + + + {/if} + + + + + + + + + + + + + + + + + +
    Username{scrambleNames ? scramble(user.username) : user.username}
    Name{scrambleNames ? scramble(user.name) : user.name}
    Email{user.email}
    Total Jobs{$stats.data.jobsStatistics[0].totalJobs}
    Short Jobs{$stats.data.jobsStatistics[0].shortJobs}
    Total Walltime{$stats.data.jobsStatistics[0].totalWalltime}
    Total Core Hours{$stats.data.jobsStatistics[0].totalCoreHours}
    + +
    + {#key $stats.data.jobsStatistics[0].histDuration} + + {/key} +
    +
    + {#key $stats.data.jobsStatistics[0].histNumNodes} + + {/key} +
    + {/if}
    {#if metricsInHistograms} - - {#if $stats.error} - - {$stats.error.message} - - {:else if !$stats.data} - - - - {:else} - - {#key $stats.data.jobsStatistics[0].histMetrics} - - - - - {/key} - - {/if} - + + {#if $stats.error} + + {$stats.error.message} + + {:else if !$stats.data} + + + + {:else} + + {#key $stats.data.jobsStatistics[0].histMetrics} + + + + {/key} + + {/if} + {/if} -
    +
    - - - + + + - + + + - - + bind:cluster={selectedCluster} + bind:metricsInHistograms + bind:isOpen={isHistogramSelectionOpen} +/> diff --git a/web/frontend/src/Zoom.svelte b/web/frontend/src/Zoom.svelte index ae842fc..c5f73c1 100644 --- a/web/frontend/src/Zoom.svelte +++ b/web/frontend/src/Zoom.svelte @@ -1,60 +1,65 @@
    - - - - - - Window Size: - - - ({windowSize}%) - - - - Window Position: - - - + + + + + + Window Size: + + + ({windowSize}%) + + + + Window Position: + + +
    diff --git a/web/frontend/src/config/AdminSettings.svelte b/web/frontend/src/config/AdminSettings.svelte index 97c5b17..26e1d0f 100644 --- a/web/frontend/src/config/AdminSettings.svelte +++ b/web/frontend/src/config/AdminSettings.svelte @@ -1,54 +1,53 @@ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + diff --git a/web/frontend/src/config/PlotSettings.svelte b/web/frontend/src/config/PlotSettings.svelte index 36326bd..20a7f2e 100644 --- a/web/frontend/src/config/PlotSettings.svelte +++ b/web/frontend/src/config/PlotSettings.svelte @@ -1,171 +1,498 @@ - - - -
    handleSettingSubmit('#line-width-form', 'lw')}> - - -
    Line Width
    - - {#if displayMessage && message.target == 'lw'} -
    - - Update: {message.msg} - -
    - {/if} -
    - -
    - - -
    Width of the lines in the timeseries plots.
    + + + + + handleSettingSubmit("#line-width-form", "lw")} + > + + +
    Line Width
    + + {#if displayMessage && message.target == "lw"} +
    + + Update: {message.msg} +
    - - -
    + {/if} + + +
    + + +
    + Width of the lines in the timeseries plots. +
    +
    + + + - - -
    handleSettingSubmit('#plots-per-row-form', 'ppr')}> - - -
    Plots per Row
    - {#if displayMessage && message.target == 'ppr'}
    Update: {message.msg}
    {/if} -
    - -
    - - -
    How many plots to show next to each other on pages such as /monitoring/job/, /monitoring/system/...
    -
    - -
    -
    + + +
    + handleSettingSubmit("#plots-per-row-form", "ppr")} + > + + +
    Plots per Row
    + {#if displayMessage && message.target == "ppr"}
    + Update: {message.msg} +
    {/if} +
    + +
    + + +
    + How many plots to show next to each other on pages such as + /monitoring/job/, /monitoring/system/... +
    +
    + +
    +
    - - -
    handleSettingSubmit('#backgrounds-form', 'bg')}> - - -
    Colored Backgrounds
    - {#if displayMessage && message.target == 'bg'}
    Update: {message.msg}
    {/if} -
    - -
    -
    - {#if config.plot_general_colorBackground} - - {:else} - - {/if} - -
    -
    - {#if config.plot_general_colorBackground} - - {:else} - - {/if} - -
    -
    - -
    -
    + + +
    + handleSettingSubmit("#backgrounds-form", "bg")} + > + + +
    Colored Backgrounds
    + {#if displayMessage && message.target == "bg"}
    + Update: {message.msg} +
    {/if} +
    + +
    +
    + {#if config.plot_general_colorBackground} + + {:else} + + {/if} + +
    +
    + {#if config.plot_general_colorBackground} + + {:else} + + {/if} + +
    +
    + +
    +
    - - -
    - - -
    Color Scheme for Timeseries Plots
    - {#if displayMessage && message.target == 'cs'}
    Update: {message.msg}
    {/if} -
    - - - - {#each Object.entries(colorschemes) as [name, rgbrow]} - - - - - - {/each} - -
    {name} - {#if rgbrow.join(',') == config.plot_general_colorscheme} - handleSettingSubmit("#colorscheme-form", "cs")}/> - {:else} - handleSettingSubmit("#colorscheme-form", "cs")}/> - {/if} - - {#each rgbrow as rgb} - - {/each} -
    -
    -
    + + +
    + + +
    Color Scheme for Timeseries Plots
    + {#if displayMessage && message.target == "cs"}
    + Update: {message.msg} +
    {/if} +
    + + + + {#each Object.entries(colorschemes) as [name, rgbrow]} + + + + + + {/each} + +
    {name} + {#if rgbrow.join(",") == config.plot_general_colorscheme} + + handleSettingSubmit("#colorscheme-form", "cs")} + /> + {:else} + + handleSettingSubmit("#colorscheme-form", "cs")} + /> + {/if} + + {#each rgbrow as rgb} + + {/each} +
    +
    +
    diff --git a/web/frontend/src/config/admin/AddUser.svelte b/web/frontend/src/config/admin/AddUser.svelte index 2712e17..43f08de 100644 --- a/web/frontend/src/config/admin/AddUser.svelte +++ b/web/frontend/src/config/admin/AddUser.svelte @@ -1,103 +1,156 @@ -
    - Create User -
    - - -
    Must be unique.
    -
    -
    - - -
    Only API users are allowed to have a blank password. Users with a blank password can only authenticate via Tokens.
    -
    -
    - - -
    Only Manager users can have a project. Allows to inspect jobs and users of given project.
    -
    -
    - - -
    Optional, can be blank.
    -
    -
    - - -
    Optional, can be blank.
    -
    + + Create User +
    + + +
    Must be unique.
    +
    +
    + + +
    + Only API users are allowed to have a blank password. Users with a blank + password can only authenticate via Tokens. +
    +
    +
    + + +
    + Only Manager users can have a project. Allows to inspect jobs and users + of given project. +
    +
    +
    + + +
    Optional, can be blank.
    +
    +
    + + +
    Optional, can be blank.
    +
    - -
    -

    Role:

    - {#each roles as role, i} - {#if i == 0} -
    - - -
    - {:else if i == 1} -
    - - -
    - {:else} -
    - - -
    - {/if} - {/each} -
    -

    - - {#if displayMessage}

    {message.msg}
    {/if} -

    -
    +
    +

    Role:

    + {#each roles as role, i} + {#if i == 0} +
    + + +
    + {:else if i == 1} +
    + + +
    + {:else} +
    + + +
    + {/if} + {/each} +
    +

    + + {#if displayMessage}

    + {message.msg} +
    {/if} +

    +
    diff --git a/web/frontend/src/config/admin/EditProject.svelte b/web/frontend/src/config/admin/EditProject.svelte index 857f7db..a4a8d75 100644 --- a/web/frontend/src/config/admin/EditProject.svelte +++ b/web/frontend/src/config/admin/EditProject.svelte @@ -1,97 +1,129 @@ - - Edit Project Managed By User (Manager Only) -
    - - - - - - -
    -

    - {#if displayMessage}Update: {message.msg}{/if} -

    -
    + + Edit Project Managed By User (Manager Only) +
    + + + + + + +
    +

    + {#if displayMessage}Update: {message.msg}{/if} +

    +
    diff --git a/web/frontend/src/config/admin/EditRole.svelte b/web/frontend/src/config/admin/EditRole.svelte index ca14699..f201f38 100644 --- a/web/frontend/src/config/admin/EditRole.svelte +++ b/web/frontend/src/config/admin/EditRole.svelte @@ -1,104 +1,131 @@ - - Edit User Roles -
    - - - - - - -
    -

    - {#if displayMessage}Update: {message.msg}{/if} -

    -
    + + Edit User Roles +
    + + + + + + +
    +

    + {#if displayMessage}Update: {message.msg}{/if} +

    +
    diff --git a/web/frontend/src/config/admin/Options.svelte b/web/frontend/src/config/admin/Options.svelte index 44f9650..8ad3c44 100644 --- a/web/frontend/src/config/admin/Options.svelte +++ b/web/frontend/src/config/admin/Options.svelte @@ -1,29 +1,34 @@ - - Scramble Names / Presentation Mode - - Active? - + + Scramble Names / Presentation Mode + + Active? + diff --git a/web/frontend/src/config/admin/ShowUsers.svelte b/web/frontend/src/config/admin/ShowUsers.svelte index 439bebb..be9b146 100644 --- a/web/frontend/src/config/admin/ShowUsers.svelte +++ b/web/frontend/src/config/admin/ShowUsers.svelte @@ -1,68 +1,87 @@ - - Special Users -

    - Not created by an LDAP sync and/or having a role other than user - -

    -
    - - - - - - - - - - - - - - {#each userList as user} - - - - - {:else} - - - - {/each} - -
    UsernameNameProject(s)EmailRolesJWTDelete
    -
    Loading...
    -
    -
    -
    + + Special Users +

    + Not created by an LDAP sync and/or having a role other than user + +

    +
    + + + + + + + + + + + + + + {#each userList as user} + + + + + {:else} + + + + {/each} + +
    UsernameNameProject(s)EmailRolesJWTDelete
    +
    + Loading... +
    +
    +
    +
    diff --git a/web/frontend/src/config/admin/ShowUsersRow.svelte b/web/frontend/src/config/admin/ShowUsersRow.svelte index 34b2240..9845241 100644 --- a/web/frontend/src/config/admin/ShowUsersRow.svelte +++ b/web/frontend/src/config/admin/ShowUsersRow.svelte @@ -1,28 +1,32 @@ {user.username} {user.name} {user.projects} {user.email} -{user.roles.join(', ')} +{user.roles.join(", ")} - {#if ! jwt} - - {:else} - - {/if} + {#if !jwt} + + {:else} + + {/if} diff --git a/web/frontend/src/filters/Cluster.svelte b/web/frontend/src/filters/Cluster.svelte index 2740b74..9c82321 100644 --- a/web/frontend/src/filters/Cluster.svelte +++ b/web/frontend/src/filters/Cluster.svelte @@ -1,77 +1,95 @@ - (isOpen = !isOpen)}> - - Select Cluster & Slurm Partition - - - {#if $initialized} -

    Cluster

    - - (pendingCluster = null, pendingPartition = null)}> - Any Cluster - - {#each clusters as cluster} - (pendingCluster = cluster.name, pendingPartition = null)}> - {cluster.name} - - {/each} - - {/if} - {#if $initialized && pendingCluster != null} -
    -

    Partiton

    - - (pendingPartition = null)}> - Any Partition - - {#each clusters.find(c => c.name == pendingCluster).partitions as partition} - (pendingPartition = partition)}> - {partition} - - {/each} - - {/if} -
    - - - - - + (isOpen = !isOpen)}> + Select Cluster & Slurm Partition + + {#if $initialized} +

    Cluster

    + + ((pendingCluster = null), (pendingPartition = null))} + > + Any Cluster + + {#each clusters as cluster} + ( + (pendingCluster = cluster.name), (pendingPartition = null) + )} + > + {cluster.name} + + {/each} + + {/if} + {#if $initialized && pendingCluster != null} +
    +

    Partiton

    + + (pendingPartition = null)} + > + Any Partition + + {#each clusters.find((c) => c.name == pendingCluster).partitions as partition} + (pendingPartition = partition)} + > + {partition} + + {/each} + + {/if} +
    + + + + +
    diff --git a/web/frontend/src/filters/Duration.svelte b/web/frontend/src/filters/Duration.svelte index ca2ce45..132ce05 100644 --- a/web/frontend/src/filters/Duration.svelte +++ b/web/frontend/src/filters/Duration.svelte @@ -1,158 +1,244 @@ - (isOpen = !isOpen)}> - - Select Job Duration - - -

    Duration more than

    - - -
    - -
    -
    h
    -
    -
    - - -
    - -
    -
    m
    -
    -
    - -
    -
    + (isOpen = !isOpen)}> + Select Job Duration + +

    Duration more than

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +
    -

    Duration less than

    - - -
    - -
    -
    h
    -
    -
    - - -
    - -
    -
    m
    -
    -
    - -
    -
    +

    Duration less than

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +
    -

    Duration between

    - - -
    - -
    -
    h
    -
    -
    - - -
    - -
    -
    m
    -
    -
    - -
    -

    and

    - - -
    - -
    -
    h
    -
    -
    - - -
    - -
    -
    m
    -
    -
    - -
    -
    - - - - - - +

    Duration between

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +

    and

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +
    + + + + + +
    diff --git a/web/frontend/src/filters/Filters.svelte b/web/frontend/src/filters/Filters.svelte index 49eaed6..8e7a8ef 100644 --- a/web/frontend/src/filters/Filters.svelte +++ b/web/frontend/src/filters/Filters.svelte @@ -10,373 +10,418 @@ - void update(additionalFilters: Object?): Triggers an update --> - - - - - Filters - - - - Manage Filters - - {#if menuText} - {menuText} - - {/if} - (isClusterOpen = true)}> - Cluster/Partition - - (isJobStatesOpen = true)}> - Job States - - (isStartTimeOpen = true)}> - Start Time - - (isDurationOpen = true)}> - Duration - - (isTagsOpen = true)}> - Tags - - (isResourcesOpen = true)}> - Resources - - (isStatsOpen = true)}> - (isStatsOpen = true)}/> Statistics - - {#if startTimeQuickSelect} - - Start Time Qick Selection - {#each [ - { text: 'Last 6hrs', url: 'last6h', seconds: 6*60*60 }, - // { text: 'Last 12hrs', seconds: 12*60*60 }, - { text: 'Last 24hrs', url: 'last24h', seconds: 24*60*60 }, - // { text: 'Last 48hrs', seconds: 48*60*60 }, - { text: 'Last 7 days', url: 'last7d', seconds: 7*24*60*60 }, - { text: 'Last 30 days', url: 'last30d', seconds: 30*24*60*60 } - ] as {text, url, seconds}} - { - filters.startTime.from = (new Date(Date.now() - seconds * 1000)).toISOString() - filters.startTime.to = (new Date(Date.now())).toISOString() - filters.startTime.text = text, - filters.startTime.url = url - update() - }}> - {text} - - {/each} - {/if} - - - - - {#if filters.cluster} - (isClusterOpen = true)}> - {filters.cluster} - {#if filters.partition} - ({filters.partition}) - {/if} - + + + + + Filters + + + Manage Filters + {#if menuText} + {menuText} + {/if} + (isClusterOpen = true)}> + Cluster/Partition + + (isJobStatesOpen = true)}> + Job States + + (isStartTimeOpen = true)}> + Start Time + + (isDurationOpen = true)}> + Duration + + (isTagsOpen = true)}> + Tags + + (isResourcesOpen = true)}> + Resources + + (isStatsOpen = true)}> + (isStatsOpen = true)} /> Statistics + + {#if startTimeQuickSelect} + + Start Time Qick Selection + {#each [{ text: "Last 6hrs", url: "last6h", seconds: 6 * 60 * 60 }, { text: "Last 24hrs", url: "last24h", seconds: 24 * 60 * 60 }, { text: "Last 7 days", url: "last7d", seconds: 7 * 24 * 60 * 60 }, { text: "Last 30 days", url: "last30d", seconds: 30 * 24 * 60 * 60 }] as { text, url, seconds }} + { + filters.startTime.from = new Date( + Date.now() - seconds * 1000, + ).toISOString(); + filters.startTime.to = new Date(Date.now()).toISOString(); + (filters.startTime.text = text), (filters.startTime.url = url); + update(); + }} + > + + {text} + + {/each} + {/if} + + + + + {#if filters.cluster} + (isClusterOpen = true)}> + {filters.cluster} + {#if filters.partition} + ({filters.partition}) + {/if} + + {/if} - {#if filters.states.length != allJobStates.length} - (isJobStatesOpen = true)}> - {filters.states.join(', ')} - - {/if} + {#if filters.states.length != allJobStates.length} + (isJobStatesOpen = true)}> + {filters.states.join(", ")} + + {/if} - {#if filters.startTime.from || filters.startTime.to} - (isStartTimeOpen = true)}> - {#if filters.startTime.text} - {filters.startTime.text} - {:else} - {new Date(filters.startTime.from).toLocaleString()} - {new Date(filters.startTime.to).toLocaleString()} - {/if} - + {#if filters.startTime.from || filters.startTime.to} + (isStartTimeOpen = true)}> + {#if filters.startTime.text} + {filters.startTime.text} + {:else} + {new Date(filters.startTime.from).toLocaleString()} - {new Date( + filters.startTime.to, + ).toLocaleString()} {/if} + + {/if} - {#if filters.duration.from || filters.duration.to} - (isDurationOpen = true)}> - {Math.floor(filters.duration.from / 3600)}h:{Math.floor(filters.duration.from % 3600 / 60)}m - - - {Math.floor(filters.duration.to / 3600)}h:{Math.floor(filters.duration.to % 3600 / 60)}m - - {/if} + {#if filters.duration.from || filters.duration.to} + (isDurationOpen = true)}> + {Math.floor(filters.duration.from / 3600)}h:{Math.floor( + (filters.duration.from % 3600) / 60, + )}m - + {Math.floor(filters.duration.to / 3600)}h:{Math.floor( + (filters.duration.to % 3600) / 60, + )}m + + {/if} - {#if filters.duration.lessThan} - (isDurationOpen = true)}> - Duration less than {Math.floor(filters.duration.lessThan / 3600)}h:{Math.floor(filters.duration.lessThan % 3600 / 60)}m - - {/if} + {#if filters.duration.lessThan} + (isDurationOpen = true)}> + Duration less than {Math.floor( + filters.duration.lessThan / 3600, + )}h:{Math.floor((filters.duration.lessThan % 3600) / 60)}m + + {/if} - {#if filters.duration.moreThan} - (isDurationOpen = true)}> - Duration more than {Math.floor(filters.duration.moreThan / 3600)}h:{Math.floor(filters.duration.moreThan % 3600 / 60)}m - - {/if} + {#if filters.duration.moreThan} + (isDurationOpen = true)}> + Duration more than {Math.floor( + filters.duration.moreThan / 3600, + )}h:{Math.floor((filters.duration.moreThan % 3600) / 60)}m + + {/if} - {#if filters.tags.length != 0} - (isTagsOpen = true)}> - {#each filters.tags as tagId} - - {/each} - - {/if} + {#if filters.tags.length != 0} + (isTagsOpen = true)}> + {#each filters.tags as tagId} + + {/each} + + {/if} - {#if filters.numNodes.from != null || filters.numNodes.to != null || - filters.numHWThreads.from != null || filters.numHWThreads.to != null || - filters.numAccelerators.from != null || filters.numAccelerators.to != null } - (isResourcesOpen = true)}> - {#if isNodesModified } Nodes: {filters.numNodes.from} - {filters.numNodes.to} {/if} - {#if isNodesModified && isHwthreadsModified }, {/if} - {#if isHwthreadsModified } HWThreads: {filters.numHWThreads.from} - {filters.numHWThreads.to} {/if} - {#if (isNodesModified || isHwthreadsModified) && isAccsModified }, {/if} - {#if isAccsModified } Accelerators: {filters.numAccelerators.from} - {filters.numAccelerators.to} {/if} - + {#if filters.numNodes.from != null || filters.numNodes.to != null || filters.numHWThreads.from != null || filters.numHWThreads.to != null || filters.numAccelerators.from != null || filters.numAccelerators.to != null} + (isResourcesOpen = true)}> + {#if isNodesModified} + Nodes: {filters.numNodes.from} - {filters.numNodes.to} {/if} + {#if isNodesModified && isHwthreadsModified}, + {/if} + {#if isHwthreadsModified} + HWThreads: {filters.numHWThreads.from} - {filters.numHWThreads.to} + {/if} + {#if (isNodesModified || isHwthreadsModified) && isAccsModified}, + {/if} + {#if isAccsModified} + Accelerators: {filters.numAccelerators.from} - {filters + .numAccelerators.to} + {/if} + + {/if} - {#if filters.node != null } - (isResourcesOpen = true)}> - Node: {filters.node} - - {/if} + {#if filters.node != null} + (isResourcesOpen = true)}> + Node: {filters.node} + + {/if} - {#if filters.stats.length > 0} - (isStatsOpen = true)}> - {filters.stats.map(stat => `${stat.text}: ${stat.from} - ${stat.to}`).join(', ')} - - {/if} - + {#if filters.stats.length > 0} + (isStatsOpen = true)}> + {filters.stats + .map((stat) => `${stat.text}: ${stat.from} - ${stat.to}`) + .join(", ")} + + {/if} + update()} /> + {disableClusterSelection} + bind:isOpen={isClusterOpen} + bind:cluster={filters.cluster} + bind:partition={filters.partition} + on:update={() => update()} +/> update()} /> + bind:isOpen={isJobStatesOpen} + bind:states={filters.states} + on:update={() => update()} +/> { - delete filters.startTime['text'] - delete filters.startTime['url'] - update() - }} /> + bind:isOpen={isStartTimeOpen} + bind:from={filters.startTime.from} + bind:to={filters.startTime.to} + on:update={() => { + delete filters.startTime["text"]; + delete filters.startTime["url"]; + update(); + }} +/> update()} /> + bind:isOpen={isDurationOpen} + bind:lessThan={filters.duration.lessThan} + bind:moreThan={filters.duration.moreThan} + bind:from={filters.duration.from} + bind:to={filters.duration.to} + on:update={() => update()} +/> update()} /> + bind:isOpen={isTagsOpen} + bind:tags={filters.tags} + on:update={() => update()} +/> - update()} /> + update()} +/> - update()} /> + update()} +/> diff --git a/web/frontend/src/filters/InfoBox.svelte b/web/frontend/src/filters/InfoBox.svelte index 58fc8a5..8fe75ab 100644 --- a/web/frontend/src/filters/InfoBox.svelte +++ b/web/frontend/src/filters/InfoBox.svelte @@ -1,11 +1,11 @@ - diff --git a/web/frontend/src/filters/JobStates.svelte b/web/frontend/src/filters/JobStates.svelte index 4e5db2e..e22144f 100644 --- a/web/frontend/src/filters/JobStates.svelte +++ b/web/frontend/src/filters/JobStates.svelte @@ -1,47 +1,76 @@ + - (isOpen = !isOpen)}> - - Select Job States - - - - {#each allJobStates as state} - - - {state} - - {/each} - - - - - - - + (isOpen = !isOpen)}> + Select Job States + + + {#each allJobStates as state} + + + {state} + + {/each} + + + + + + + diff --git a/web/frontend/src/filters/Resources.svelte b/web/frontend/src/filters/Resources.svelte index be5995a..01f1c57 100644 --- a/web/frontend/src/filters/Resources.svelte +++ b/web/frontend/src/filters/Resources.svelte @@ -1,145 +1,242 @@ - (isOpen = !isOpen)}> - - Select number of utilized Resources - - -
    Named Node
    - -
    Number of Nodes
    - { - pendingNumNodes = { from: detail[0], to: detail[1] } - isNodesModified = true - }} - min={minNumNodes} max={maxNumNodes} - firstSlider={pendingNumNodes.from} secondSlider={pendingNumNodes.to} - inputFieldFrom={pendingNumNodes.from} inputFieldTo={pendingNumNodes.to}/> -
    Number of HWThreads (Use for Single-Node Jobs)
    - { - pendingNumHWThreads = { from: detail[0], to: detail[1] } - isHwthreadsModified = true - }} - min={minNumHWThreads} max={maxNumHWThreads} - firstSlider={pendingNumHWThreads.from} secondSlider={pendingNumHWThreads.to} - inputFieldFrom={pendingNumHWThreads.from} inputFieldTo={pendingNumHWThreads.to}/> - {#if maxNumAccelerators != null && maxNumAccelerators > 1} -
    Number of Accelerators
    - { - pendingNumAccelerators = { from: detail[0], to: detail[1] } - isAccsModified = true - }} - min={minNumAccelerators} max={maxNumAccelerators} - firstSlider={pendingNumAccelerators.from} secondSlider={pendingNumAccelerators.to} - inputFieldFrom={pendingNumAccelerators.from} inputFieldTo={pendingNumAccelerators.to}/> - {/if} -
    - - - - - + (isOpen = !isOpen)}> + Select number of utilized Resources + +
    Named Node
    + +
    Number of Nodes
    + { + pendingNumNodes = { from: detail[0], to: detail[1] }; + isNodesModified = true; + }} + min={minNumNodes} + max={maxNumNodes} + firstSlider={pendingNumNodes.from} + secondSlider={pendingNumNodes.to} + inputFieldFrom={pendingNumNodes.from} + inputFieldTo={pendingNumNodes.to} + /> +
    + Number of HWThreads (Use for Single-Node Jobs) +
    + { + pendingNumHWThreads = { from: detail[0], to: detail[1] }; + isHwthreadsModified = true; + }} + min={minNumHWThreads} + max={maxNumHWThreads} + firstSlider={pendingNumHWThreads.from} + secondSlider={pendingNumHWThreads.to} + inputFieldFrom={pendingNumHWThreads.from} + inputFieldTo={pendingNumHWThreads.to} + /> + {#if maxNumAccelerators != null && maxNumAccelerators > 1} +
    Number of Accelerators
    + { + pendingNumAccelerators = { from: detail[0], to: detail[1] }; + isAccsModified = true; + }} + min={minNumAccelerators} + max={maxNumAccelerators} + firstSlider={pendingNumAccelerators.from} + secondSlider={pendingNumAccelerators.to} + inputFieldFrom={pendingNumAccelerators.from} + inputFieldTo={pendingNumAccelerators.to} + /> + {/if} +
    + + + + +
    diff --git a/web/frontend/src/filters/StartTime.svelte b/web/frontend/src/filters/StartTime.svelte index 59f8513..1759b6e 100644 --- a/web/frontend/src/filters/StartTime.svelte +++ b/web/frontend/src/filters/StartTime.svelte @@ -1,86 +1,121 @@ - (isOpen = !isOpen)}> - - Select Start Time - - -

    From

    - - - - - - - - -

    To

    - - - - - - - - -
    - - - - - + (isOpen = !isOpen)}> + Select Start Time + +

    From

    + + + + + + + + +

    To

    + + + + + + + + +
    + + + + +
    diff --git a/web/frontend/src/filters/Stats.svelte b/web/frontend/src/filters/Stats.svelte index cf559da..ee80a4b 100644 --- a/web/frontend/src/filters/Stats.svelte +++ b/web/frontend/src/filters/Stats.svelte @@ -1,115 +1,137 @@ - (isOpen = !isOpen)}> - - Filter based on statistics (of non-running jobs) - - - {#each statistics as stat} -

    {stat.text}

    - (stat.from = detail[0], stat.to = detail[1], stat.enabled = true)} - min={0} max={stat.peak} - firstSlider={stat.from} secondSlider={stat.to} - inputFieldFrom={stat.from} inputFieldTo={stat.to}/> - {/each} -
    - - - - - + (isOpen = !isOpen)}> + Filter based on statistics (of non-running jobs) + + {#each statistics as stat} +

    {stat.text}

    + ( + (stat.from = detail[0]), (stat.to = detail[1]), (stat.enabled = true) + )} + min={0} + max={stat.peak} + firstSlider={stat.from} + secondSlider={stat.to} + inputFieldFrom={stat.from} + inputFieldTo={stat.to} + /> + {/each} +
    + + + + +
    diff --git a/web/frontend/src/filters/Tags.svelte b/web/frontend/src/filters/Tags.svelte index b5a145a..06153ed 100644 --- a/web/frontend/src/filters/Tags.svelte +++ b/web/frontend/src/filters/Tags.svelte @@ -1,67 +1,89 @@ - (isOpen = !isOpen)}> - - Select Tags - - - -
    - - {#if $initialized} - {#each fuzzySearchTags(searchTerm, allTags) as tag (tag)} - - {#if pendingTags.includes(tag.id)} - - {:else} - - {/if} - - - - {:else} - No Tags - {/each} + (isOpen = !isOpen)}> + Select Tags + + +
    + + {#if $initialized} + {#each fuzzySearchTags(searchTerm, allTags) as tag (tag)} + + {#if pendingTags.includes(tag.id)} + + {:else} + {/if} - -
    - - - - - + + + + {:else} + No Tags + {/each} + {/if} +
    +
    + + + + +
    diff --git a/web/frontend/src/filters/TimeSelection.svelte b/web/frontend/src/filters/TimeSelection.svelte index c715b9c..f9c230b 100644 --- a/web/frontend/src/filters/TimeSelection.svelte +++ b/web/frontend/src/filters/TimeSelection.svelte @@ -1,81 +1,96 @@ - - - - {#if timeRange == -1} - from - updateExplicitTimeRange('from', event)}> - to - updateExplicitTimeRange('to', event)}> + + {#if timeRange == -1} + from + updateExplicitTimeRange("from", event)} + > + to + updateExplicitTimeRange("to", event)} + > + {/if} diff --git a/web/frontend/src/filters/UserOrProject.svelte b/web/frontend/src/filters/UserOrProject.svelte index 8235863..983192c 100644 --- a/web/frontend/src/filters/UserOrProject.svelte +++ b/web/frontend/src/filters/UserOrProject.svelte @@ -1,75 +1,84 @@ {#if authlevel >= roles.manager} - - - termChanged()} on:keyup={(event) => termChanged(event.key == 'Enter' ? 0 : throttle)} - placeholder={mode == 'user' ? 'filter username...' : 'filter project...'} /> - + + + termChanged()} + on:keyup={(event) => termChanged(event.key == "Enter" ? 0 : throttle)} + placeholder={mode == "user" ? "filter username..." : "filter project..."} + /> + {:else} - - - termChanged()} on:keyup={(event) => termChanged(event.key == 'Enter' ? 0 : throttle)} placeholder='filter project...' - /> - + + + termChanged()} + on:keyup={(event) => termChanged(event.key == "Enter" ? 0 : throttle)} + placeholder="filter project..." + /> + {/if} diff --git a/web/frontend/src/joblist/JobInfo.svelte b/web/frontend/src/joblist/JobInfo.svelte index b7ca32a..a30d058 100644 --- a/web/frontend/src/joblist/JobInfo.svelte +++ b/web/frontend/src/joblist/JobInfo.svelte @@ -6,115 +6,138 @@ - jobTags: Defaults to job.tags, usefull for dynamically updating the tags. --> +
    -

    - {job.jobId} ({job.cluster}) - {#if job.metaData?.jobName} -
    - {#if job.metaData?.jobName.length <= 25} -

    {job.metaData.jobName}
    - {:else} -
    {job.metaData.jobName}
    - {/if} - {/if} - {#if job.arrayJobId} - Array Job: #{job.arrayJobId} - {/if} -

    +

    + {job.jobId} + ({job.cluster}) + {#if job.metaData?.jobName} +
    + {#if job.metaData?.jobName.length <= 25} +

    {job.metaData.jobName}
    + {:else} +
    + {job.metaData.jobName} +
    + {/if} + {/if} + {#if job.arrayJobId} + Array Job: #{job.arrayJobId} + {/if} +

    -

    - - - {scrambleNames ? scramble(job.user) : job.user} - - {#if job.userData && job.userData.name} - ({scrambleNames ? scramble(job.userData.name) : job.userData.name}) - {/if} - {#if job.project && job.project != 'no project'} -
    - - - {scrambleNames ? scramble(job.project) : job.project} - - {/if} -

    +

    + + + {scrambleNames ? scramble(job.user) : job.user} + + {#if job.userData && job.userData.name} + ({scrambleNames ? scramble(job.userData.name) : job.userData.name}) + {/if} + {#if job.project && job.project != "no project"} +
    + + + {scrambleNames ? scramble(job.project) : job.project} + + {/if} +

    -

    - {#if job.numNodes == 1} - {job.resources[0].hostname} - {:else} - {job.numNodes} - {/if} - - {#if job.exclusive != 1} - (shared) - {/if} - {#if job.numAcc > 0} - , {job.numAcc} - {/if} - {#if job.numHWThreads > 0} - , {job.numHWThreads} - {/if} -
    - {job.subCluster} -

    +

    + {#if job.numNodes == 1} + {job.resources[0].hostname} + {:else} + {job.numNodes} + {/if} + + {#if job.exclusive != 1} + (shared) + {/if} + {#if job.numAcc > 0} + , {job.numAcc} + {/if} + {#if job.numHWThreads > 0} + , {job.numHWThreads} + {/if} +
    + {job.subCluster} +

    -

    - Start: {(new Date(job.startTime)).toLocaleString()} -
    - Duration: {formatDuration(job.duration)} {job.state} - {#if job.walltime} -
    - Walltime: {formatDuration(job.walltime)} - {/if} -

    +

    + Start: {new Date(job.startTime).toLocaleString()} +
    + Duration: {formatDuration(job.duration)} + {job.state} + {#if job.walltime} +
    + Walltime: {formatDuration(job.walltime)} + {/if} +

    -

    - {#each jobTags as tag} - - {/each} -

    +

    + {#each jobTags as tag} + + {/each} +

    diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index 5f8d89b..3efe069 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -9,284 +9,275 @@ - update(filters?: [JobFilter]) --> -
    - - - - - {#if showFootprint} - - {/if} - {#each metrics as metric (metric)} - - {/each} - - - - {#if $jobs.error} - - - - {:else if $jobs.fetching || !$jobs.data} - - - - {:else if $jobs.data && $initialized} - {#each $jobs.data.jobs.items as job (job)} - - {:else} - - - - {/each} - {/if} - -
    - Job Info - - Job Footprint - - {metric} - {#if $initialized} - ({clusters - .map((cluster) => - cluster.metricConfig.find( - (m) => m.name == metric - ) - ) - .filter((m) => m != null) - .map( - (m) => - (m.unit?.prefix - ? m.unit?.prefix - : "") + - (m.unit?.base ? m.unit?.base : "") - ) // Build unitStr - .reduce( - (arr, unitStr) => - arr.includes(unitStr) - ? arr - : [...arr, unitStr], - [] - ) // w/o this, output would be [unitStr, unitStr] - .join(", ")}) - {/if} -
    -

    {$jobs.error.message}

    -
    - -
    - No jobs found -
    -
    +
    + + + + + {#if showFootprint} + + {/if} + {#each metrics as metric (metric)} + + {/each} + + + + {#if $jobs.error} + + + + {:else if $jobs.fetching || !$jobs.data} + + + + {:else if $jobs.data && $initialized} + {#each $jobs.data.jobs.items as job (job)} + + {:else} + + + + {/each} + {/if} + +
    + Job Info + + Job Footprint + + {metric} + {#if $initialized} + ({clusters + .map((cluster) => + cluster.metricConfig.find((m) => m.name == metric), + ) + .filter((m) => m != null) + .map( + (m) => + (m.unit?.prefix ? m.unit?.prefix : "") + + (m.unit?.base ? m.unit?.base : ""), + ) // Build unitStr + .reduce( + (arr, unitStr) => + arr.includes(unitStr) ? arr : [...arr, unitStr], + [], + ) // w/o this, output would be [unitStr, unitStr] + .join(", ")}) + {/if} +
    +

    {$jobs.error.message}

    +
    + +
    No jobs found
    +
    { - if (detail.itemsPerPage != itemsPerPage) { - updateConfiguration( - detail.itemsPerPage.toString(), - detail.page - ) - } else { - paging = { itemsPerPage: detail.itemsPerPage, page: detail.page } - } - }} + bind:page + {itemsPerPage} + itemText="Jobs" + totalItems={matchedJobs} + on:update={({ detail }) => { + if (detail.itemsPerPage != itemsPerPage) { + updateConfiguration(detail.itemsPerPage.toString(), detail.page); + } else { + paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }; + } + }} /> diff --git a/web/frontend/src/joblist/Refresher.svelte b/web/frontend/src/joblist/Refresher.svelte index 2587711..635ffbe 100644 --- a/web/frontend/src/joblist/Refresher.svelte +++ b/web/frontend/src/joblist/Refresher.svelte @@ -5,39 +5,46 @@ - 'reload': When fired, the parent component shoud refresh its contents --> - - - \ No newline at end of file + + + + diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 4d9013c..41caa53 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -9,168 +9,189 @@ --> - - + + + + {#if job.monitoringStatus == 0 || job.monitoringStatus == 2} + + Not monitored or archiving failed - {#if job.monitoringStatus == 0 || job.monitoringStatus == 2} - - Not monitored or archiving failed - - {:else if $metricsQuery.fetching} - - - - {:else if $metricsQuery.error} - - - {$metricsQuery.error.message.length > 500 - ? $metricsQuery.error.message.substring(0, 499) + "..." - : $metricsQuery.error.message} - - - {:else} - {#if showFootprint} - - - - {/if} - {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} - - - {#if metric.disabled == false && metric.data} - - {:else if metric.disabled == true && metric.data} - Metric disabled for subcluster {metric.data.name}:{job.subCluster} - {:else} - No dataset returned - {/if} - - {/each} + {:else if $metricsQuery.fetching} + + + + {:else if $metricsQuery.error} + + + {$metricsQuery.error.message.length > 500 + ? $metricsQuery.error.message.substring(0, 499) + "..." + : $metricsQuery.error.message} + + + {:else} + {#if showFootprint} + + + {/if} + {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} + + + {#if metric.disabled == false && metric.data} + + {:else if metric.disabled == true && metric.data} + Metric disabled for subcluster {metric.data.name}:{job.subCluster} + {:else} + No dataset returned + {/if} + + {/each} + {/if} diff --git a/web/frontend/src/joblist/SortSelection.svelte b/web/frontend/src/joblist/SortSelection.svelte index 5941964..2cc8615 100644 --- a/web/frontend/src/joblist/SortSelection.svelte +++ b/web/frontend/src/joblist/SortSelection.svelte @@ -7,65 +7,94 @@ --> - { isOpen = !isOpen }}> - - Sort rows - - - - {#each sortableColumns as col, i (col)} - - + sortableColumns[i] = { ...sortableColumns[i] }; + activeColumnIdx = i; + sortableColumns = [...sortableColumns]; + sorting = { field: col.field, order: col.order }; + }} + > + + - {col.text} - - {/each} - - - - - + {col.text} + + {/each} + + + + + \ No newline at end of file + .sort { + border: none; + margin: 0; + padding: 0; + background: 0 0; + transition: all 70ms; + } + + diff --git a/web/frontend/src/plots/Histogram.svelte b/web/frontend/src/plots/Histogram.svelte index 499ea4f..8300384 100644 --- a/web/frontend/src/plots/Histogram.svelte +++ b/web/frontend/src/plots/Histogram.svelte @@ -5,221 +5,222 @@ --> {#if data.length > 0} -
    +
    {:else} - Cannot render histogram: No data! + Cannot render histogram: No data! {/if} - - diff --git a/web/frontend/src/plots/MetricPlot.svelte b/web/frontend/src/plots/MetricPlot.svelte index 7bd264c..af8f22a 100644 --- a/web/frontend/src/plots/MetricPlot.svelte +++ b/web/frontend/src/plots/MetricPlot.svelte @@ -1,3 +1,120 @@ + + - {#if series[0].data.length > 0} -
    +
    {:else} - Cannot render plot: No series data returned for {metric} + Cannot render plot: No series data returned for {metric} {/if} diff --git a/web/frontend/src/plots/Roofline.svelte b/web/frontend/src/plots/Roofline.svelte index d4ed5f6..1e47f6f 100644 --- a/web/frontend/src/plots/Roofline.svelte +++ b/web/frontend/src/plots/Roofline.svelte @@ -1,254 +1,339 @@ {#if data != null} -
    +
    {:else} - Cannot render roofline: No data! -{/if} \ No newline at end of file + Cannot render roofline: No data! +{/if} + From f761900a3ebd2c5f181ea751099ceb9895bdb4d2 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Wed, 13 Mar 2024 09:37:12 +0100 Subject: [PATCH 65/93] Add initial code for oidc authentication support --- go.mod | 16 +++++----- go.sum | 19 +++++++++++ internal/auth/auth.go | 19 ++++++----- internal/auth/oidc.go | 73 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 110 insertions(+), 17 deletions(-) create mode 100644 internal/auth/oidc.go diff --git a/go.mod b/go.mod index facfa00..4de12ec 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/swaggo/http-swagger v1.3.3 github.com/swaggo/swag v1.16.2 github.com/vektah/gqlparser/v2 v2.5.10 - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea ) @@ -37,15 +37,17 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/containerd v1.6.18 // indirect + github.com/coreos/go-oidc/v3 v3.9.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/deepmap/oapi-codegen v1.12.4 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect + github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/spec v0.20.9 // indirect github.com/go-openapi/swag v0.22.4 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.4.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect @@ -76,13 +78,13 @@ require ( github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.5.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.16.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index abd7f71..2658092 100644 --- a/go.sum +++ b/go.sum @@ -339,6 +339,8 @@ github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= +github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -451,6 +453,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -588,6 +592,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -1288,6 +1294,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1411,6 +1419,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1431,6 +1441,8 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1570,6 +1582,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1585,6 +1599,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= @@ -1732,6 +1747,8 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1854,6 +1871,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/auth/auth.go b/internal/auth/auth.go index e8f0db4..fe3edad 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -27,18 +27,18 @@ type Authenticator interface { } type Authentication struct { - sessionStore *sessions.CookieStore - SessionMaxAge time.Duration - - authenticators []Authenticator + sessionStore *sessions.CookieStore LdapAuth *LdapAuthenticator JwtAuth *JWTAuthenticator LocalAuth *LocalAuthenticator + authenticators []Authenticator + SessionMaxAge time.Duration } func (auth *Authentication) AuthViaSession( rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { + r *http.Request, +) (*schema.User, error) { session, err := auth.sessionStore.Get(r, "session") if err != nil { log.Error("Error while getting session store") @@ -131,8 +131,8 @@ func Init() (*Authentication, error) { func (auth *Authentication) Login( onsuccess http.Handler, - onfailure func(rw http.ResponseWriter, r *http.Request, loginErr error)) http.Handler { - + onfailure func(rw http.ResponseWriter, r *http.Request, loginErr error), +) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { username := r.FormValue("username") var dbUser *schema.User @@ -193,10 +193,9 @@ func (auth *Authentication) Login( func (auth *Authentication) Auth( onsuccess http.Handler, - onfailure func(rw http.ResponseWriter, r *http.Request, authErr error)) http.Handler { - + onfailure func(rw http.ResponseWriter, r *http.Request, authErr error), +) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - user, err := auth.JwtAuth.AuthViaJWT(rw, r) if err != nil { log.Infof("authentication failed: %s", err.Error()) diff --git a/internal/auth/oidc.go b/internal/auth/oidc.go new file mode 100644 index 0000000..480b212 --- /dev/null +++ b/internal/auth/oidc.go @@ -0,0 +1,73 @@ +// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package auth + +import ( + "context" + "log" + "net/http" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/gorilla/mux" + "golang.org/x/oauth2" +) + +type OIDC struct { + client *oauth2.Config + provider *oidc.Provider + state string + codeVerifier string +} + +func (oa *OIDC) Init(r *mux.Router) error { + oa.client = &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Endpoint: oauth2.Endpoint{ + AuthURL: "https://provider.com/o/oauth2/auth", + TokenURL: "https://provider.com/o/oauth2/token", + }, + } + + provider, err := oidc.NewProvider(context.Background(), "https://provider") + if err != nil { + log.Fatal(err) + } + + oa.provider = provider + + r.HandleFunc("/oidc-login", oa.OAuth2Login) + r.HandleFunc("/oidc-callback", oa.OAuth2Callback) + + return nil +} + +func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) { + _ = r.ParseForm() + state := r.Form.Get("state") + if state != oa.state { + http.Error(rw, "State invalid", http.StatusBadRequest) + return + } + code := r.Form.Get("code") + if code == "" { + http.Error(rw, "Code not found", http.StatusBadRequest) + return + } + token, err := oa.client.Exchange(context.Background(), code, oauth2.VerifierOption(oa.codeVerifier)) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } +} + +func (oa *OIDC) OAuth2Login(rw http.ResponseWriter, r *http.Request) { + // use PKCE to protect against CSRF attacks + oa.codeVerifier = oauth2.GenerateVerifier() + + // Redirect user to consent page to ask for permission + url := oa.client.AuthCodeURL("state", oauth2.AccessTypeOffline, oauth2.S256ChallengeOption(oa.codeVerifier)) + http.Redirect(rw, r, url, http.StatusFound) +} From e92e727279b8cf006047b87031c733f46abc6438 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Wed, 13 Mar 2024 17:09:36 +0100 Subject: [PATCH 66/93] Extend oidc auth provider --- internal/auth/ldap.go | 11 +++--- internal/auth/oidc.go | 84 ++++++++++++++++++++++++++++++++----------- 2 files changed, 69 insertions(+), 26 deletions(-) diff --git a/internal/auth/ldap.go b/internal/auth/ldap.go index b800ca7..d9888ca 100644 --- a/internal/auth/ldap.go +++ b/internal/auth/ldap.go @@ -21,7 +21,7 @@ import ( type LdapAuthenticator struct { syncPassword string - UserAttr string + UserAttr string } var _ Authenticator = (*LdapAuthenticator)(nil) @@ -74,8 +74,8 @@ func (la *LdapAuthenticator) CanLogin( user *schema.User, username string, rw http.ResponseWriter, - r *http.Request) (*schema.User, bool) { - + r *http.Request, +) (*schema.User, bool) { lc := config.Keys.LdapConfig if user != nil { @@ -138,8 +138,8 @@ func (la *LdapAuthenticator) CanLogin( func (la *LdapAuthenticator) Login( user *schema.User, rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { l, err := la.getLdapConnection(false) if err != nil { log.Warn("Error while getting ldap connection") @@ -238,7 +238,6 @@ func (la *LdapAuthenticator) Sync() error { } func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) { - lc := config.Keys.LdapConfig conn, err := ldap.DialURL(lc.Url) if err != nil { diff --git a/internal/auth/oidc.go b/internal/auth/oidc.go index 480b212..cfcf5b6 100644 --- a/internal/auth/oidc.go +++ b/internal/auth/oidc.go @@ -6,38 +6,59 @@ package auth import ( "context" + "crypto/rand" + "encoding/base64" + "io" "log" "net/http" + "strings" + "time" + "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/coreos/go-oidc/v3/oidc" "github.com/gorilla/mux" "golang.org/x/oauth2" ) type OIDC struct { - client *oauth2.Config - provider *oidc.Provider - state string - codeVerifier string + client *oauth2.Config + provider *oidc.Provider +} + +func randString(nByte int) (string, error) { + b := make([]byte, nByte) + if _, err := io.ReadFull(rand.Reader, b); err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +func setCallbackCookie(w http.ResponseWriter, r *http.Request, name, value string) { + c := &http.Cookie{ + Name: name, + Value: value, + MaxAge: int(time.Hour.Seconds()), + Secure: r.TLS != nil, + HttpOnly: true, + } + http.SetCookie(w, c) } func (oa *OIDC) Init(r *mux.Router) error { - oa.client = &oauth2.Config{ - ClientID: "YOUR_CLIENT_ID", - ClientSecret: "YOUR_CLIENT_SECRET", - Endpoint: oauth2.Endpoint{ - AuthURL: "https://provider.com/o/oauth2/auth", - TokenURL: "https://provider.com/o/oauth2/token", - }, - } - provider, err := oidc.NewProvider(context.Background(), "https://provider") if err != nil { log.Fatal(err) } - oa.provider = provider + oa.client = &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Endpoint: provider.Endpoint(), + RedirectURL: "https://" + config.Keys.Addr + "/oidc-callback", + Scopes: []string{oidc.ScopeOpenID, "profile", "email"}, + } + r.HandleFunc("/oidc-login", oa.OAuth2Login) r.HandleFunc("/oidc-callback", oa.OAuth2Callback) @@ -45,9 +66,18 @@ func (oa *OIDC) Init(r *mux.Router) error { } func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) { + c, err := r.Cookie("state") + if err != nil { + http.Error(rw, "state not found", http.StatusBadRequest) + return + } + + str := strings.Split(c.Value, " ") + state := str[0] + codeVerifier := str[1] + _ = r.ParseForm() - state := r.Form.Get("state") - if state != oa.state { + if r.Form.Get("state") != state { http.Error(rw, "State invalid", http.StatusBadRequest) return } @@ -56,18 +86,32 @@ func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) { http.Error(rw, "Code not found", http.StatusBadRequest) return } - token, err := oa.client.Exchange(context.Background(), code, oauth2.VerifierOption(oa.codeVerifier)) + token, err := oa.client.Exchange(context.Background(), code, oauth2.VerifierOption(codeVerifier)) if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) + http.Error(rw, "Failed to exchange token: "+err.Error(), http.StatusInternalServerError) + return + } + + userInfo, err := oa.provider.UserInfo(context.Background(), oauth2.StaticTokenSource(token)) + if err != nil { + http.Error(rw, "Failed to get userinfo: "+err.Error(), http.StatusInternalServerError) return } } func (oa *OIDC) OAuth2Login(rw http.ResponseWriter, r *http.Request) { + state, err := randString(16) + if err != nil { + http.Error(rw, "Internal error", http.StatusInternalServerError) + return + } + // use PKCE to protect against CSRF attacks - oa.codeVerifier = oauth2.GenerateVerifier() + codeVerifier := oauth2.GenerateVerifier() + + setCallbackCookie(rw, r, "state", strings.Join([]string{state, codeVerifier}, " ")) // Redirect user to consent page to ask for permission - url := oa.client.AuthCodeURL("state", oauth2.AccessTypeOffline, oauth2.S256ChallengeOption(oa.codeVerifier)) + url := oa.client.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.S256ChallengeOption(codeVerifier)) http.Redirect(rw, r, url, http.StatusFound) } From ce792426e6cfc2877eaa3797eb8c042f81c9810f Mon Sep 17 00:00:00 2001 From: Michael Schwarz Date: Thu, 14 Mar 2024 09:15:15 +0100 Subject: [PATCH 67/93] Disable foreign key check while updating database --- internal/repository/migrations/mysql/07_fix-tag-id.down.sql | 2 ++ internal/repository/migrations/mysql/07_fix-tag-id.up.sql | 2 ++ 2 files changed, 4 insertions(+) diff --git a/internal/repository/migrations/mysql/07_fix-tag-id.down.sql b/internal/repository/migrations/mysql/07_fix-tag-id.down.sql index 4172f4e..9f9959a 100644 --- a/internal/repository/migrations/mysql/07_fix-tag-id.down.sql +++ b/internal/repository/migrations/mysql/07_fix-tag-id.down.sql @@ -1 +1,3 @@ +SET FOREIGN_KEY_CHECKS = 0; ALTER TABLE tag MODIFY id INTEGER; +SET FOREIGN_KEY_CHECKS = 1; diff --git a/internal/repository/migrations/mysql/07_fix-tag-id.up.sql b/internal/repository/migrations/mysql/07_fix-tag-id.up.sql index f8d805f..1abc4b3 100644 --- a/internal/repository/migrations/mysql/07_fix-tag-id.up.sql +++ b/internal/repository/migrations/mysql/07_fix-tag-id.up.sql @@ -1 +1,3 @@ +SET FOREIGN_KEY_CHECKS = 0; ALTER TABLE tag MODIFY id INTEGER AUTO_INCREMENT; +SET FOREIGN_KEY_CHECKS = 1; From 58415ab5c31fa9778d4a6c0c7d4b57707af015c1 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 14 Mar 2024 10:35:14 +0100 Subject: [PATCH 68/93] Adapt for accs in shared threshold s --- web/frontend/src/JobFootprint.svelte | 20 +++++++++++++------- web/frontend/src/joblist/Row.svelte | 3 ++- web/frontend/src/plots/MetricPlot.svelte | 19 ++++++++++++------- 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 30034c0..06597de 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -20,12 +20,12 @@ const subclusterConfig = clusters.find((c) => c.name == job.cluster).subClusters.find((sc) => sc.name == job.subCluster) const footprintMetrics = (job.numAcc !== 0) - ? (job.exclusive !== 1) - ? ['cpu_load', 'flops_any', 'acc_utilization'] - : ['cpu_load', 'flops_any', 'acc_utilization', 'mem_bw'] - : (job.exclusive !== 1) - ? ['cpu_load', 'flops_any', 'mem_used'] - : ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] + ? (job.exclusive !== 1) // GPU + ? ['acc_utilization', 'acc_mem_used', 'nv_sm_clock', 'nv_mem_util'] // Shared + : ['acc_utilization', 'acc_mem_used', 'nv_sm_clock', 'nv_mem_util'] // Exclusive + : (job.exclusive !== 1) // CPU only + ? ['flops_any', 'mem_used'] // Shared + : ['cpu_load', 'flops_any', 'mem_used', 'mem_bw'] // Exclusive const footprintData = footprintMetrics.map((fm) => { // Mean: Primarily use backend sourced avgs from job.*, secondarily calculate/read from metricdata @@ -155,7 +155,13 @@ } else if (metricConfig.aggregation === 'avg' ){ return defaultThresholds } else if (metricConfig.aggregation === 'sum' ){ - const jobFraction = job.numHWThreads / subClusterConfig.topology.node.length + let jobFraction = 0.0 + if (job.numAcc > 0) { + jobFraction = job.numAcc / subClusterConfig.topology.accelerators.length + } else if (job.numHWThreads > 0) { + jobFraction = job.numHWThreads / subClusterConfig.topology.node.length + } + return { peak: round((defaultThresholds.peak * jobFraction), 0), normal: round((defaultThresholds.normal * jobFraction), 0), diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 4d9013c..c5bd515 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -163,7 +163,8 @@ subCluster={job.subCluster} isShared={(job.exclusive != 1)} resources={job.resources} - hwthreads={job.numHWThreads} + numhwthreads={job.numHWThreads} + numaccs={job.numAcc} /> {:else if metric.disabled == true && metric.data} Metric disabled for subcluster {metric.data.name}:{job.subCluster} diff --git a/web/frontend/src/plots/MetricPlot.svelte b/web/frontend/src/plots/MetricPlot.svelte index 7bd264c..e7ae5b1 100644 --- a/web/frontend/src/plots/MetricPlot.svelte +++ b/web/frontend/src/plots/MetricPlot.svelte @@ -39,7 +39,8 @@ export let subCluster export let isShared = false export let forNode = false - export let hwthreads = 0 + export let numhwthreads = 0 + export let numaccs = 0 if (useStatsSeries == null) useStatsSeries = statisticsSeries != null @@ -54,7 +55,7 @@ const lineWidth = clusterCockpitConfig.plot_general_lineWidth / window.devicePixelRatio const lineColors = clusterCockpitConfig.plot_general_colorscheme const backgroundColors = { normal: 'rgba(255, 255, 255, 1.0)', caution: 'rgba(255, 128, 0, 0.3)', alert: 'rgba(255, 0, 0, 0.3)' } - const thresholds = findThresholds(metricConfig, scope, typeof subCluster == 'string' ? cluster.subClusters.find(sc => sc.name == subCluster) : subCluster, isShared, hwthreads) + const thresholds = findThresholds(metricConfig, scope, typeof subCluster == 'string' ? cluster.subClusters.find(sc => sc.name == subCluster) : subCluster, isShared, numhwthreads, numaccs) // converts the legend into a simple tooltip function legendAsTooltipPlugin({ className, style = { backgroundColor:"rgba(255, 249, 196, 0.92)", color: "black" } } = {}) { @@ -381,7 +382,7 @@ } } - export function findThresholds(metricConfig, scope, subCluster, isShared, hwthreads) { + export function findThresholds(metricConfig, scope, subCluster, isShared, numhwthreads, numaccs) { // console.log('NAME ' + metricConfig.name + ' / SCOPE ' + scope + ' / SUBCLUSTER ' + subCluster.name) if (!metricConfig || !scope || !subCluster) { console.warn('Argument missing for findThresholds!') @@ -409,9 +410,13 @@ } let divisor = 1 - if (isShared == true && hwthreads > 0) { // Shared - divisor = subCluster.topology.node.length / hwthreads - } else if (scope == 'socket') + if (isShared == true) { // Shared + if (numaccs > 0) { + divisor = subCluster.topology.accelerators.length / numaccs + } else if (numhwthreads > 0) { + divisor = subCluster.topology.node.length / numhwthreads + } + else if (scope == 'socket') divisor = subCluster.topology.socket.length else if (scope == 'core') divisor = subCluster.topology.core.length @@ -419,7 +424,7 @@ divisor = subCluster.topology.accelerators.length else if (scope == 'hwthread') divisor = subCluster.topology.node.length - else { + else // console.log('TODO: how to calc thresholds for ', scope) return null } From e347659db4d19237a45c56686a3a0da69021ed3d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 14 Mar 2024 11:09:18 +0100 Subject: [PATCH 69/93] moved module context script --- web/frontend/src/JobFootprint.svelte | 44 ---------------------------- 1 file changed, 44 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 64e20c1..126ab7b 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -196,50 +196,6 @@ } - - {#if view === "job"} From 82f5257cf1d66ff4834c916f990fee4939d4ce9b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 14 Mar 2024 14:24:54 +0100 Subject: [PATCH 70/93] fix merge bugs --- web/frontend/src/plots/MetricPlot.svelte | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/web/frontend/src/plots/MetricPlot.svelte b/web/frontend/src/plots/MetricPlot.svelte index 9e193cb..bd44675 100644 --- a/web/frontend/src/plots/MetricPlot.svelte +++ b/web/frontend/src/plots/MetricPlot.svelte @@ -32,7 +32,8 @@ scope, subCluster, isShared, - hwthreads, + numhwthreads, + numaccs ) { // console.log('NAME ' + metricConfig.name + ' / SCOPE ' + scope + ' / SUBCLUSTER ' + subCluster.name) if (!metricConfig || !scope || !subCluster) { @@ -157,7 +158,8 @@ export let subCluster; export let isShared = false; export let forNode = false; - export let hwthreads = 0; + export let numhwthreads = 0; + export let numaccs = 0; if (useStatsSeries == null) useStatsSeries = statisticsSeries != null; @@ -182,7 +184,8 @@ ? cluster.subClusters.find((sc) => sc.name == subCluster) : subCluster, isShared, - hwthreads, + numhwthreads, + numaccs ); // converts the legend into a simple tooltip From 849b7e038d3ce4fb1ba4d583e2792f34ebec8024 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 14 Mar 2024 15:14:19 +0100 Subject: [PATCH 71/93] Fix: make footprint display configurable app-wide - note: requires full ui-defaults object in config --- internal/config/config.go | 1 + web/frontend/src/Job.root.svelte | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/config/config.go b/internal/config/config.go index 253951c..76fd62a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -31,6 +31,7 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{ "job_view_nodestats_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "job_view_showFootprint": true, "plot_general_colorBackground": true, "plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"}, "plot_general_lineWidth": 3, diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 2020e1d..758cef9 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -65,6 +65,7 @@ let isMetricsSelectionOpen = false, selectedMetrics = [], + showFootprint = true, isFetched = new Set(); const [jobMetrics, startFetching] = fetchMetricsStore(); getContext("on-init")(() => { @@ -77,6 +78,9 @@ .find((c) => c.name == job.cluster) .metricConfig.map((mc) => mc.name); + showFootprint = + ccconfig[`job_view_showFootprint`] + let toFetch = new Set([ "flops_any", "mem_bw", @@ -209,7 +213,7 @@ {/if} - {#if $jobMetrics.data} + {#if $jobMetrics.data && showFootprint} {#key $jobMetrics.data} Date: Mon, 18 Mar 2024 18:57:15 +0100 Subject: [PATCH 72/93] Fix: Simplify footprint logic, fix aggregated sum values --- web/frontend/src/JobFootprint.svelte | 108 ++++++++++++++------------- 1 file changed, 55 insertions(+), 53 deletions(-) diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 126ab7b..8ed8089 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -23,38 +23,25 @@ : metricConfig.alert, }; - if (job.exclusive === 1) { - // Exclusive: Use as defined + // Job_Exclusivity does not matter, only aggregation + if (metricConfig.aggregation === "avg") { return defaultThresholds; + } else if (metricConfig.aggregation === "sum") { + const jobFraction = + job.numHWThreads / subClusterConfig.topology.node.length; + return { + peak: round(defaultThresholds.peak * jobFraction, 0), + normal: round(defaultThresholds.normal * jobFraction, 0), + caution: round(defaultThresholds.caution * jobFraction, 0), + alert: round(defaultThresholds.alert * jobFraction, 0), + }; } else { - // Shared: Handle specifically - if (metricConfig.name === "cpu_load") { - // Special: Avg Aggregation BUT scaled based on #hwthreads - return { - peak: job.numHWThreads, - normal: job.numHWThreads, - caution: defaultThresholds.caution, - alert: defaultThresholds.alert, - }; - } else if (metricConfig.aggregation === "avg") { - return defaultThresholds; - } else if (metricConfig.aggregation === "sum") { - const jobFraction = - job.numHWThreads / subClusterConfig.topology.node.length; - return { - peak: round(defaultThresholds.peak * jobFraction, 0), - normal: round(defaultThresholds.normal * jobFraction, 0), - caution: round(defaultThresholds.caution * jobFraction, 0), - alert: round(defaultThresholds.alert * jobFraction, 0), - }; - } else { - console.warn( - "Missing or unkown aggregation mode (sum/avg) for metric:", - metricConfig, - ); - return null; - } - } // Other job.exclusive cases? + console.warn( + "Missing or unkown aggregation mode (sum/avg) for metric:", + metricConfig, + ); + return defaultThresholds; + } } @@ -91,29 +78,6 @@ : ["cpu_load", "flops_any", "mem_used", "mem_bw"]; // Exclusive const footprintData = footprintMetrics.map((fm) => { - // Mean: Primarily use backend sourced avgs from job.*, secondarily calculate/read from metricdata - let mv = null; - if (fm === "cpu_load" && job.loadAvg !== 0) { - mv = round(job.loadAvg, 2); - } else if (fm === "flops_any" && job.flopsAnyAvg !== 0) { - mv = round(job.flopsAnyAvg, 2); - } else if (fm === "mem_bw" && job.memBwAvg !== 0) { - mv = round(job.memBwAvg, 2); - } else { - // Calculate from jobMetrics - const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === "node"); - if (jm?.metric?.statisticsSeries) { - mv = round(mean(jm.metric.statisticsSeries.mean), 2); - } else if (jm?.metric?.series?.length > 1) { - const avgs = jm.metric.series.map((jms) => jms.statistics.avg); - mv = round(mean(avgs), 2); - } else if (jm?.metric?.series) { - mv = round(jm.metric.series[0].statistics.avg, 2); - } else { - mv = 0.0; - } - } - // Unit const fmc = getContext("metrics")(job.cluster, fm); let unit = ""; @@ -123,6 +87,44 @@ const fmt = findJobThresholds(job, fmc, subclusterConfig); if (fm === "flops_any") fmt.peak = round(fmt.peak * 0.85, 0); + // Value: Primarily use backend sourced avgs from job.*, secondarily calculate/read from metricdata + // Exclusivity does not matter + let mv = 0.0; + if (fmc.aggregation === "avg") { + if (fm === "cpu_load" && job.loadAvg !== 0) { + mv = round(job.loadAvg, 2); + } else if (fm === "flops_any" && job.flopsAnyAvg !== 0) { + mv = round(job.flopsAnyAvg, 2); + } else if (fm === "mem_bw" && job.memBwAvg !== 0) { + mv = round(job.memBwAvg, 2); + } else { + // Calculate Avg from jobMetrics + const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === "node"); + if (jm?.metric?.statisticsSeries) { + mv = round(mean(jm.metric.statisticsSeries.mean), 2); + } else if (jm?.metric?.series?.length > 1) { + const avgs = jm.metric.series.map((jms) => jms.statistics.avg); + mv = round(mean(avgs), 2); + } else if (jm?.metric?.series) { + mv = round(jm.metric.series[0].statistics.avg, 2); + } + } + } else if (fmc.aggregation === "sum") { + // Calculate Sum from jobMetrics: Sum all node averages + const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === "node"); + if (jm?.metric?.series?.length > 1) { // More than 1 node + const avgs = jm.metric.series.map((jms) => jms.statistics.avg); + mv = round(avgs.reduce((a, b) => a + b, 0)); + } else if (jm?.metric?.series) { + mv = round(jm.metric.series[0].statistics.avg, 2); + } + } else { + console.warn( + "Missing or unkown aggregation mode (sum/avg) for metric:", + metricConfig, + ); + } + // Define basic data const fmBase = { name: fm, From 0005469101722a79158ade60c2bccb2c70f2c7c1 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 19 Mar 2024 16:16:02 +0100 Subject: [PATCH 73/93] Refactor --- internal/repository/job.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index 0de871a..329c0ba 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -30,13 +30,11 @@ var ( ) type JobRepository struct { - DB *sqlx.DB - driver string - - stmtCache *sq.StmtCache - cache *lrucache.Cache - + DB *sqlx.DB + stmtCache *sq.StmtCache + cache *lrucache.Cache archiveChannel chan *schema.Job + driver string archivePending sync.WaitGroup } From c65694b36c79c550370e7561fa37d1a8b8169a17 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 19 Mar 2024 16:18:43 +0100 Subject: [PATCH 74/93] Add tags and metadata to job queries. Fix query parameter handling. Fixes #248 --- internal/api/rest.go | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index 807e7ae..dadabcd 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -114,12 +114,11 @@ type UpdateUserApiResponse struct { // StopJobApiRequest model type StopJobApiRequest struct { - // Stop Time of job as epoch + JobId *int64 `json:"jobId" example:"123000"` + Cluster *string `json:"cluster" example:"fritz"` + StartTime *int64 `json:"startTime" example:"1649723812"` + State schema.JobState `json:"jobState" validate:"required" example:"completed"` StopTime int64 `json:"stopTime" validate:"required" example:"1649763839"` - State schema.JobState `json:"jobState" validate:"required" example:"completed"` // Final job state - JobId *int64 `json:"jobId" example:"123000"` // Cluster Job ID of job - Cluster *string `json:"cluster" example:"fritz"` // Cluster of job - StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch } // DeleteJobApiRequest model @@ -176,9 +175,9 @@ type GetCompleteJobApiResponse struct { } type JobMetricWithName struct { + Metric *schema.JobMetric `json:"metric"` Name string `json:"name"` Scope schema.MetricScope `json:"scope"` - Metric *schema.JobMetric `json:"metric"` } type ApiReturnedUser struct { @@ -482,6 +481,18 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) return } + job.Tags, err = api.JobRepository.GetTags(&job.ID) + if err != nil { + handleError(err, http.StatusInternalServerError, rw) + return + + } + if _, err = api.JobRepository.FetchMetadata(job); err != nil { + + handleError(err, http.StatusInternalServerError, rw) + return + } + var scopes []schema.MetricScope if job.NumNodes == 1 { @@ -492,7 +503,7 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) var data schema.JobData - if r.URL.Query().Has("all-metrics") { + if r.URL.Query().Get("all-metrics") == "true" { data, err = metricdata.LoadData(job, nil, scopes, r.Context()) if err != nil { log.Warn("Error while loading job data") @@ -564,6 +575,18 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { return } + job.Tags, err = api.JobRepository.GetTags(&job.ID) + if err != nil { + handleError(err, http.StatusInternalServerError, rw) + return + + } + if _, err = api.JobRepository.FetchMetadata(job); err != nil { + + handleError(err, http.StatusInternalServerError, rw) + return + } + var metrics GetJobApiRequest if err = decode(r.Body, &metrics); err != nil { http.Error(rw, err.Error(), http.StatusBadRequest) @@ -1218,7 +1241,7 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { return } - rw.Write([]byte(fmt.Sprintf("User %v successfully created!\n", username))) + fmt.Fprintf(rw, "User %v successfully created!\n", username) } // deleteUser godoc From 1e5f2944cf76137d72a77ddf72ee45afebc2e3bd Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 21 Mar 2024 22:02:59 +0100 Subject: [PATCH 75/93] Upgrade dependencies. Port to jwt-auth v5. --- go.mod | 38 ++++++------- go.sum | 93 ++++++++++++++----------------- internal/auth/jwt.go | 12 ++-- internal/auth/jwtCookieSession.go | 15 +++-- internal/auth/jwtSession.go | 14 ++--- 5 files changed, 81 insertions(+), 91 deletions(-) diff --git a/go.mod b/go.mod index facfa00..4c57c15 100644 --- a/go.mod +++ b/go.mod @@ -3,13 +3,13 @@ module github.com/ClusterCockpit/cc-backend go 1.18 require ( - github.com/99designs/gqlgen v0.17.40 + github.com/99designs/gqlgen v0.17.45 github.com/ClusterCockpit/cc-units v0.4.0 github.com/Masterminds/squirrel v1.5.3 github.com/go-co-op/gocron v1.25.0 github.com/go-ldap/ldap/v3 v3.4.4 github.com/go-sql-driver/mysql v1.7.0 - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang-jwt/jwt/v5 v5.2.1 github.com/golang-migrate/migrate/v4 v4.15.2 github.com/google/gops v0.3.27 github.com/gorilla/handlers v1.5.1 @@ -23,9 +23,9 @@ require ( github.com/qustavo/sqlhooks/v2 v2.1.0 github.com/santhosh-tekuri/jsonschema/v5 v5.2.0 github.com/swaggo/http-swagger v1.3.3 - github.com/swaggo/swag v1.16.2 - github.com/vektah/gqlparser/v2 v2.5.10 - golang.org/x/crypto v0.16.0 + github.com/swaggo/swag v1.16.3 + github.com/vektah/gqlparser/v2 v2.5.11 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea ) @@ -37,21 +37,21 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/containerd v1.6.18 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/deepmap/oapi-codegen v1.12.4 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.3 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect @@ -72,17 +72,17 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sosodev/duration v1.2.0 // indirect github.com/swaggo/files v1.0.0 // indirect - github.com/urfave/cli/v2 v2.25.7 // indirect - github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect + github.com/urfave/cli/v2 v2.27.1 // indirect + github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.uber.org/atomic v1.10.0 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/net v0.22.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.16.0 // indirect + golang.org/x/tools v0.19.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index abd7f71..86b2d68 100644 --- a/go.sum +++ b/go.sum @@ -50,8 +50,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -github.com/99designs/gqlgen v0.17.40 h1:/l8JcEVQ93wqIfmH9VS1jsAkwm6eAF1NwQn3N+SDqBY= -github.com/99designs/gqlgen v0.17.40/go.mod h1:b62q1USk82GYIVjC60h02YguAZLqYZtvWml8KkhJps4= +github.com/99designs/gqlgen v0.17.45 h1:bH0AH67vIJo8JKNKPJP+pOPpQhZeuVRQLf53dKIpDik= +github.com/99designs/gqlgen v0.17.45/go.mod h1:Bas0XQ+Jiu/Xm5E33jC8sES3G+iC2esHBMXcq0fUPs0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -113,6 +113,7 @@ github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:m github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/goquery v1.9.1 h1:mTL6XjbJTZdpfL+Gwl5U2h1l9yEkJjhmlTeV9VPW7UI= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -130,6 +131,7 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= @@ -352,8 +354,8 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -472,28 +474,24 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -547,8 +545,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -643,8 +641,8 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -701,8 +699,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru/v2 v2.0.3 h1:kmRrRLlInXvng0SmLxmQpQkpbYAvcXm7NPDrgxJa9mE= -github.com/hashicorp/golang-lru/v2 v2.0.3/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -1065,7 +1063,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -1132,8 +1130,6 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1141,18 +1137,15 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/swaggo/files v1.0.0 h1:1gGXVIeUFCS/dta17rnP0iOpr6CXFwKD7EO5ID233e4= github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc= github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc= github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo= -github.com/swaggo/swag v1.16.2 h1:28Pp+8DkQoV+HLzLx8RGJZXNGKbFqnuvSbAAtoxiY04= -github.com/swaggo/swag v1.16.2/go.mod h1:6YzXnDcpr0767iOejs318CwYkCQqyGer6BizOg03f+E= +github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= +github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1167,10 +1160,10 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= -github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= -github.com/vektah/gqlparser/v2 v2.5.10 h1:6zSM4azXC9u4Nxy5YmdmGu4uKamfwsdKTwp5zsEealU= -github.com/vektah/gqlparser/v2 v2.5.10/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= +github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= +github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8= +github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1190,8 +1183,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1286,8 +1279,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1338,8 +1331,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1409,8 +1402,8 @@ golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1444,7 +1437,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1568,8 +1561,8 @@ golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1676,8 +1669,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1852,8 +1845,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/auth/jwt.go b/internal/auth/jwt.go index 83bfee3..c1af752 100644 --- a/internal/auth/jwt.go +++ b/internal/auth/jwt.go @@ -17,7 +17,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" - "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v5" ) type JWTAuthenticator struct { @@ -49,8 +49,8 @@ func (ja *JWTAuthenticator) Init() error { func (ja *JWTAuthenticator) AuthViaJWT( rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { rawtoken := r.Header.Get("X-Auth-Token") if rawtoken == "" { rawtoken = r.Header.Get("Authorization") @@ -73,9 +73,9 @@ func (ja *JWTAuthenticator) AuthViaJWT( log.Warn("Error while parsing JWT token") return nil, err } - if err := token.Claims.Valid(); err != nil { + if !token.Valid { log.Warn("jwt token claims are not valid") - return nil, err + return nil, errors.New("jwt token claims are not valid") } // Token is valid, extract payload @@ -88,7 +88,6 @@ func (ja *JWTAuthenticator) AuthViaJWT( if config.Keys.JwtConfig.ValidateUser { ur := repository.GetUserRepository() user, err := ur.GetUser(sub) - // Deny any logins for unknown usernames if err != nil { log.Warn("Could not find user from JWT in internal database.") @@ -117,7 +116,6 @@ func (ja *JWTAuthenticator) AuthViaJWT( // Generate a new JWT that can be used for authentication func (ja *JWTAuthenticator) ProvideJWT(user *schema.User) (string, error) { - if ja.privateKey == nil { return "", errors.New("environment variable 'JWT_PRIVATE_KEY' not set") } diff --git a/internal/auth/jwtCookieSession.go b/internal/auth/jwtCookieSession.go index 3748836..01f5746 100644 --- a/internal/auth/jwtCookieSession.go +++ b/internal/auth/jwtCookieSession.go @@ -17,7 +17,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" - "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v5" ) type JWTCookieSessionAuthenticator struct { @@ -90,8 +90,8 @@ func (ja *JWTCookieSessionAuthenticator) CanLogin( user *schema.User, username string, rw http.ResponseWriter, - r *http.Request) (*schema.User, bool) { - + r *http.Request, +) (*schema.User, bool) { jc := config.Keys.JwtConfig cookieName := "" if jc.CookieName != "" { @@ -113,8 +113,8 @@ func (ja *JWTCookieSessionAuthenticator) CanLogin( func (ja *JWTCookieSessionAuthenticator) Login( user *schema.User, rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { jc := config.Keys.JwtConfig jwtCookie, err := r.Cookie(jc.CookieName) var rawtoken string @@ -144,10 +144,9 @@ func (ja *JWTCookieSessionAuthenticator) Login( return nil, err } - // Check token validity and extract paypload - if err := token.Claims.Valid(); err != nil { + if !token.Valid { log.Warn("jwt token claims are not valid") - return nil, err + return nil, errors.New("jwt token claims are not valid") } claims := token.Claims.(jwt.MapClaims) diff --git a/internal/auth/jwtSession.go b/internal/auth/jwtSession.go index 286bb82..541e31e 100644 --- a/internal/auth/jwtSession.go +++ b/internal/auth/jwtSession.go @@ -17,7 +17,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" - "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v5" ) type JWTSessionAuthenticator struct { @@ -44,8 +44,8 @@ func (ja *JWTSessionAuthenticator) CanLogin( user *schema.User, username string, rw http.ResponseWriter, - r *http.Request) (*schema.User, bool) { - + r *http.Request, +) (*schema.User, bool) { return user, r.Header.Get("Authorization") != "" || r.URL.Query().Get("login-token") != "" } @@ -53,8 +53,8 @@ func (ja *JWTSessionAuthenticator) CanLogin( func (ja *JWTSessionAuthenticator) Login( user *schema.User, rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { rawtoken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ") if rawtoken == "" { rawtoken = r.URL.Query().Get("login-token") @@ -71,9 +71,9 @@ func (ja *JWTSessionAuthenticator) Login( return nil, err } - if err = token.Claims.Valid(); err != nil { + if !token.Valid { log.Warn("jwt token claims are not valid") - return nil, err + return nil, errors.New("jwt token claims are not valid") } claims := token.Claims.(jwt.MapClaims) From 83c38e74db7488dd69ad5427be1968a5482da288 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 22 Mar 2024 08:59:35 +0100 Subject: [PATCH 76/93] Refactoring: Reduze bytesize of structs. --- internal/metricdata/cc-metric-store.go | 42 ++++++++++++-------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 4874975..e011974 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -32,32 +32,32 @@ type CCMetricStoreConfig struct { } type CCMetricStore struct { + here2there map[string]string + there2here map[string]string + client http.Client jwt string url string queryEndpoint string - client http.Client - here2there map[string]string - there2here map[string]string } type ApiQueryRequest struct { Cluster string `json:"cluster"` + Queries []ApiQuery `json:"queries"` + ForAllNodes []string `json:"for-all-nodes"` From int64 `json:"from"` To int64 `json:"to"` WithStats bool `json:"with-stats"` WithData bool `json:"with-data"` - Queries []ApiQuery `json:"queries"` - ForAllNodes []string `json:"for-all-nodes"` } type ApiQuery struct { + Type *string `json:"type,omitempty"` + SubType *string `json:"subtype,omitempty"` Metric string `json:"metric"` Hostname string `json:"host"` - Aggregate bool `json:"aggreg"` - Type *string `json:"type,omitempty"` TypeIds []string `json:"type-ids,omitempty"` - SubType *string `json:"subtype,omitempty"` SubTypeIds []string `json:"subtype-ids,omitempty"` + Aggregate bool `json:"aggreg"` } type ApiQueryResponse struct { @@ -67,16 +67,15 @@ type ApiQueryResponse struct { type ApiMetricData struct { Error *string `json:"error"` + Data []schema.Float `json:"data"` From int64 `json:"from"` To int64 `json:"to"` - Data []schema.Float `json:"data"` Avg schema.Float `json:"avg"` Min schema.Float `json:"min"` Max schema.Float `json:"max"` } func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { - var config CCMetricStoreConfig if err := json.Unmarshal(rawConfig, &config); err != nil { log.Warn("Error while unmarshaling raw json config") @@ -122,8 +121,8 @@ func (ccms *CCMetricStore) toLocalName(metric string) string { func (ccms *CCMetricStore) doRequest( ctx context.Context, - body *ApiQueryRequest) (*ApiQueryResponse, error) { - + body *ApiQueryRequest, +) (*ApiQueryResponse, error) { buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(body); err != nil { log.Warn("Error while encoding request body") @@ -162,8 +161,8 @@ func (ccms *CCMetricStore) LoadData( job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.JobData, error) { - + ctx context.Context, +) (schema.JobData, error) { queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes) if err != nil { log.Warn("Error while building queries") @@ -186,7 +185,7 @@ func (ccms *CCMetricStore) LoadData( } var errors []string - var jobData schema.JobData = make(schema.JobData) + jobData := make(schema.JobData) for i, row := range resBody.Results { query := req.Queries[i] metric := ccms.toLocalName(query.Metric) @@ -267,8 +266,8 @@ var ( func (ccms *CCMetricStore) buildQueries( job *schema.Job, metrics []string, - scopes []schema.MetricScope) ([]ApiQuery, []schema.MetricScope, error) { - + scopes []schema.MetricScope, +) ([]ApiQuery, []schema.MetricScope, error) { queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) assignedScope := []schema.MetricScope{} @@ -504,8 +503,8 @@ func (ccms *CCMetricStore) buildQueries( func (ccms *CCMetricStore) LoadStats( job *schema.Job, metrics []string, - ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { - + ctx context.Context, +) (map[string]map[string]schema.MetricStatistics, error) { queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { log.Warn("Error while building query") @@ -566,8 +565,8 @@ func (ccms *CCMetricStore) LoadNodeData( metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, - ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { - + ctx context.Context, +) (map[string]map[string][]*schema.JobMetric, error) { req := ApiQueryRequest{ Cluster: cluster, From: from.Unix(), @@ -652,7 +651,6 @@ func (ccms *CCMetricStore) LoadNodeData( } func intToStringSlice(is []int) []string { - ss := make([]string, len(is)) for i, x := range is { ss[i] = strconv.Itoa(x) From 5c79f440559ce0c45e7164ce9a1d859ec049fa94 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 22 Mar 2024 08:59:59 +0100 Subject: [PATCH 77/93] Clarify functionality of gen-keypair tool --- tools/gen-keypair/main.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/gen-keypair/main.go b/tools/gen-keypair/main.go index f8c66fe..ff9c5c3 100644 --- a/tools/gen-keypair/main.go +++ b/tools/gen-keypair/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -20,7 +20,8 @@ func main() { os.Exit(1) } - fmt.Fprintf(os.Stdout, "JWT_PUBLIC_KEY=%#v\nJWT_PRIVATE_KEY=%#v\n", + fmt.Fprintf(os.Stdout, "ED25519 PUBLIC_KEY=%#v\nED25519 PRIVATE_KEY=%#v\n", base64.StdEncoding.EncodeToString(pub), base64.StdEncoding.EncodeToString(priv)) + fmt.Println("This is NO JWT token. You can generate JWT tokens with cc-backend. Use this keypair for signing and validation of JWT tokens in ClusterCockpit.") } From 03895f9e454b2487a6190713eed6758155feeda0 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 22 Mar 2024 09:41:18 +0100 Subject: [PATCH 78/93] Swag dependency needs at least Go 1.20 --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index db99fb2..6c2fc9b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,7 +7,7 @@ jobs: - name: Install Go uses: actions/setup-go@v4 with: - go-version: 1.19.x + go-version: 1.20.x - name: Checkout code uses: actions/checkout@v3 - name: Build, Vet & Test From 5a177c952db4eb602461483264a96dba7e8251dd Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Mar 2024 16:10:30 +0100 Subject: [PATCH 79/93] fix: multiple accs with identical label, cloned data for single acc - GPU id label in job view statistic table is always the same on multi GPU jobs #239 - Multiple accelerators listed in plot despite using only one #241 --- internal/metricdata/cc-metric-store.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 4874975..4e69e47 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -206,7 +206,7 @@ func (ccms *CCMetricStore) LoadData( jobData[metric][scope] = jobMetric } - for _, res := range row { + for ndx, res := range row { if res.Error != nil { /* Build list for "partial errors", if any */ errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) @@ -216,7 +216,7 @@ func (ccms *CCMetricStore) LoadData( id := (*string)(nil) if query.Type != nil { id = new(string) - *id = query.TypeIds[0] + *id = query.TypeIds[ndx] } if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { @@ -313,6 +313,11 @@ func (ccms *CCMetricStore) buildQueries( // Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node) if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) { + if scope != schema.MetricScopeAccelerator { + // Skip all other catched cases + continue + } + queries = append(queries, ApiQuery{ Metric: remoteName, Hostname: host.Hostname, From b9b452f043397cab1ee3a4839f754bedf0e25701 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 26 Mar 2024 15:56:07 +0100 Subject: [PATCH 80/93] feat: prototype infinite scroll implementation --- api/schema.graphqls | 1 + internal/config/config.go | 1 + internal/graph/generated/generated.go | 142 +++++++++++------------- internal/graph/model/models_gen.go | 15 ++- internal/graph/schema.resolvers.go | 17 ++- web/frontend/src/joblist/JobList.svelte | 87 ++++++++++----- 6 files changed, 152 insertions(+), 111 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index aa6aea2..73140b9 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -278,6 +278,7 @@ type JobResultList { offset: Int limit: Int count: Int + hasNextPage: Boolean! } type JobLinkResultList { diff --git a/internal/config/config.go b/internal/config/config.go index 76fd62a..60c7da3 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -32,6 +32,7 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{ "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_showFootprint": true, + "job_list_usePaging": true, "plot_general_colorBackground": true, "plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"}, "plot_general_lineWidth": 3, diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index d84f043..1ea41f9 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -139,10 +139,11 @@ type ComplexityRoot struct { } JobResultList struct { - Count func(childComplexity int) int - Items func(childComplexity int) int - Limit func(childComplexity int) int - Offset func(childComplexity int) int + Count func(childComplexity int) int + HasNextPage func(childComplexity int) int + Items func(childComplexity int) int + Limit func(childComplexity int) int + Offset func(childComplexity int) int } JobsStatistics struct { @@ -755,6 +756,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Count(childComplexity), true + case "JobResultList.hasNextPage": + if e.complexity.JobResultList.HasNextPage == nil { + break + } + + return e.complexity.JobResultList.HasNextPage(childComplexity), true + case "JobResultList.items": if e.complexity.JobResultList.Items == nil { break @@ -1987,6 +1995,7 @@ type JobResultList { offset: Int limit: Int count: Int + hasNextPage: Boolean! } type JobLinkResultList { @@ -5221,6 +5230,50 @@ func (ec *executionContext) fieldContext_JobResultList_count(ctx context.Context return fc, nil } +func (ec *executionContext) _JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField, obj *model.JobResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobResultList_hasNextPage(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HasNextPage, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + fc.Result = res + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_id(ctx, field) if err != nil { @@ -8017,6 +8070,8 @@ func (ec *executionContext) fieldContext_Query_jobs(ctx context.Context, field g return ec.fieldContext_JobResultList_limit(ctx, field) case "count": return ec.fieldContext_JobResultList_count(ctx, field) + case "hasNextPage": + return ec.fieldContext_JobResultList_hasNextPage(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type JobResultList", field.Name) }, @@ -12226,8 +12281,6 @@ func (ec *executionContext) unmarshalInputFloatRange(ctx context.Context, obj in } switch k { case "from": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) data, err := ec.unmarshalNFloat2float64(ctx, v) if err != nil { @@ -12235,8 +12288,6 @@ func (ec *executionContext) unmarshalInputFloatRange(ctx context.Context, obj in } it.From = data case "to": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) data, err := ec.unmarshalNFloat2float64(ctx, v) if err != nil { @@ -12264,8 +12315,6 @@ func (ec *executionContext) unmarshalInputIntRange(ctx context.Context, obj inte } switch k { case "from": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) data, err := ec.unmarshalNInt2int(ctx, v) if err != nil { @@ -12273,8 +12322,6 @@ func (ec *executionContext) unmarshalInputIntRange(ctx context.Context, obj inte } it.From = data case "to": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) data, err := ec.unmarshalNInt2int(ctx, v) if err != nil { @@ -12302,8 +12349,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } switch k { case "tags": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("tags")) data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) if err != nil { @@ -12311,8 +12356,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Tags = data case "jobId": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobId")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -12320,8 +12363,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.JobID = data case "arrayJobId": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("arrayJobId")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) if err != nil { @@ -12329,8 +12370,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.ArrayJobID = data case "user": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("user")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -12338,8 +12377,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.User = data case "project": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("project")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -12347,8 +12384,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Project = data case "jobName": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobName")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -12356,8 +12391,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.JobName = data case "cluster": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -12365,8 +12398,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Cluster = data case "partition": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -12374,8 +12405,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Partition = data case "duration": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("duration")) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) if err != nil { @@ -12383,8 +12412,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Duration = data case "minRunningFor": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("minRunningFor")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) if err != nil { @@ -12392,8 +12419,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.MinRunningFor = data case "numNodes": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numNodes")) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) if err != nil { @@ -12401,8 +12426,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.NumNodes = data case "numAccelerators": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numAccelerators")) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) if err != nil { @@ -12410,8 +12433,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.NumAccelerators = data case "numHWThreads": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numHWThreads")) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) if err != nil { @@ -12419,8 +12440,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.NumHWThreads = data case "startTime": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startTime")) data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx, v) if err != nil { @@ -12428,8 +12447,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.StartTime = data case "state": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("state")) data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx, v) if err != nil { @@ -12437,8 +12454,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.State = data case "flopsAnyAvg": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("flopsAnyAvg")) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) if err != nil { @@ -12446,8 +12461,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.FlopsAnyAvg = data case "memBwAvg": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memBwAvg")) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) if err != nil { @@ -12455,8 +12468,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.MemBwAvg = data case "loadAvg": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("loadAvg")) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) if err != nil { @@ -12464,8 +12475,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.LoadAvg = data case "memUsedMax": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memUsedMax")) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) if err != nil { @@ -12473,8 +12482,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.MemUsedMax = data case "exclusive": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("exclusive")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) if err != nil { @@ -12482,8 +12489,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Exclusive = data case "node": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("node")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -12515,8 +12520,6 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj } switch k { case "field": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("field")) data, err := ec.unmarshalNString2string(ctx, v) if err != nil { @@ -12524,8 +12527,6 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj } it.Field = data case "order": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("order")) data, err := ec.unmarshalNSortDirectionEnum2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortDirectionEnum(ctx, v) if err != nil { @@ -12553,8 +12554,6 @@ func (ec *executionContext) unmarshalInputPageRequest(ctx context.Context, obj i } switch k { case "itemsPerPage": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("itemsPerPage")) data, err := ec.unmarshalNInt2int(ctx, v) if err != nil { @@ -12562,8 +12561,6 @@ func (ec *executionContext) unmarshalInputPageRequest(ctx context.Context, obj i } it.ItemsPerPage = data case "page": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) data, err := ec.unmarshalNInt2int(ctx, v) if err != nil { @@ -12591,8 +12588,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } switch k { case "eq": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -12600,8 +12595,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.Eq = data case "neq": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("neq")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -12609,8 +12602,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.Neq = data case "contains": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("contains")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -12618,8 +12609,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.Contains = data case "startsWith": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startsWith")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -12627,8 +12616,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.StartsWith = data case "endsWith": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("endsWith")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -12636,8 +12623,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.EndsWith = data case "in": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("in")) data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) if err != nil { @@ -12665,8 +12650,6 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int } switch k { case "from": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) if err != nil { @@ -12674,8 +12657,6 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int } it.From = data case "to": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) if err != nil { @@ -13481,6 +13462,11 @@ func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.Selectio out.Values[i] = ec._JobResultList_limit(ctx, field, obj) case "count": out.Values[i] = ec._JobResultList_count(ctx, field, obj) + case "hasNextPage": + out.Values[i] = ec._JobResultList_hasNextPage(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 7b8ebd2..7aa2764 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -78,10 +78,11 @@ type JobMetricWithName struct { } type JobResultList struct { - Items []*schema.Job `json:"items"` - Offset *int `json:"offset,omitempty"` - Limit *int `json:"limit,omitempty"` - Count *int `json:"count,omitempty"` + Items []*schema.Job `json:"items"` + Offset *int `json:"offset,omitempty"` + Limit *int `json:"limit,omitempty"` + Count *int `json:"count,omitempty"` + HasNextPage bool `json:"hasNextPage"` } type JobsStatistics struct { @@ -122,6 +123,9 @@ type MetricHistoPoints struct { Data []*MetricHistoPoint `json:"data,omitempty"` } +type Mutation struct { +} + type NodeMetrics struct { Host string `json:"host"` SubCluster string `json:"subCluster"` @@ -138,6 +142,9 @@ type PageRequest struct { Page int `json:"page"` } +type Query struct { +} + type StringInput struct { Eq *string `json:"eq,omitempty"` Neq *string `json:"neq,omitempty"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 82bf026..c20cf1e 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -2,7 +2,7 @@ package graph // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. -// Code generated by github.com/99designs/gqlgen version v0.17.40 +// Code generated by github.com/99designs/gqlgen version v0.17.45 import ( "context" @@ -234,13 +234,26 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag return nil, err } + hasNextPage := false + nextPage := page + nextPage.Page += 1 + + nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order) + if err != nil { + log.Warn("Error while querying next jobs") + return nil, err + } + if len(nextJobs) > 0 { + hasNextPage = true + } + count, err := r.Repo.CountJobs(ctx, filter) if err != nil { log.Warn("Error while counting jobs") return nil, err } - return &model.JobResultList{Items: jobs, Count: &count}, nil + return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: hasNextPage}, nil } // JobsStatistics is the resolver for the jobsStatistics field. diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index 3efe069..6311a66 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -30,7 +30,8 @@ export let metrics = ccconfig.plot_list_selectedMetrics; export let showFootprint; - let itemsPerPage = ccconfig.plot_list_jobsPerPage; + let usePaging = !!ccconfig.job_list_usePaging + let itemsPerPage = usePaging ? ccconfig.plot_list_jobsPerPage : 10; let page = 1; let paging = { itemsPerPage, page }; let filter = []; @@ -79,21 +80,27 @@ loadAvg } count + hasNextPage } } `; - $: jobs = queryStore({ + $: jobsStore = queryStore({ client: client, query: query, variables: { paging, sorting, filter }, }); - $: matchedJobs = $jobs.data != null ? $jobs.data.jobs.count : 0; + let jobs = [] + $: if ($initialized && $jobsStore.data) { + jobs = [...$jobsStore.data.jobs.items] + } + + $: matchedJobs = $jobsStore.data != null ? $jobsStore.data.jobs.count : 0; // Force refresh list with existing unchanged variables (== usually would not trigger reactivity) export function refresh() { - jobs = queryStore({ + jobsStore = queryStore({ client: client, query: query, variables: { paging, sorting, filter }, @@ -132,6 +139,7 @@ value: value, }).subscribe((res) => { if (res.fetching === false && !res.error) { + jobs = [] // Empty List paging = { itemsPerPage: value, page: page }; // Trigger reload of jobList } else if (res.fetching === false && res.error) { throw res.error; @@ -140,6 +148,25 @@ }); } + window.addEventListener('scroll', () => { + let { + scrollTop, + scrollHeight, + clientHeight + } = document.documentElement; + if (scrollTop + clientHeight >= scrollHeight && !usePaging && $jobsStore.data != null && $jobsStore.data.jobs.hasNextPage) { + fetchMore() + } + }); + + let scrollMultiplier = 1 + function fetchMore() { + let pendingPaging = { ...paging } + scrollMultiplier += 1 + pendingPaging.itemsPerPage = itemsPerPage * scrollMultiplier + paging = pendingPaging + } + let plotWidth = null; let tableWidth = null; let jobInfoColumnWidth = 250; @@ -212,22 +239,16 @@ - {#if $jobs.error} + {#if $jobsStore.error}

    {$jobs.error.message}

    {$jobsStore.error.message}

    - {:else if $jobs.fetching || !$jobs.data} - - - - - - {:else if $jobs.data && $initialized} - {#each $jobs.data.jobs.items as job (job)} + {:else} + {#each jobs as job (job)} {:else} @@ -235,24 +256,36 @@ {/each} {/if} + {#if $jobsStore.fetching || !$jobsStore.data} + + +
    + +
    + + + {/if}
    - { - if (detail.itemsPerPage != itemsPerPage) { - updateConfiguration(detail.itemsPerPage.toString(), detail.page); - } else { - paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }; - } - }} -/> +{#if usePaging} + { + if (detail.itemsPerPage != itemsPerPage) { + updateConfiguration(detail.itemsPerPage.toString(), detail.page); + } else { + jobs = [] + paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }; + } + }} + /> +{/if}